2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
35 * Ulises Alonso : Frame number limit removal and
36 * packet_set_ring memory leak.
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll
42 * Johann Baudy : Added TX RING.
43 * Chetan Loke : Implemented TPACKET_V3 block abstraction
45 * Copyright (C) 2011, <lokec@ccs.neu.edu>
48 * This program is free software; you can redistribute it and/or
49 * modify it under the terms of the GNU General Public License
50 * as published by the Free Software Foundation; either version
51 * 2 of the License, or (at your option) any later version.
55 #include <linux/types.h>
57 #include <linux/capability.h>
58 #include <linux/fcntl.h>
59 #include <linux/socket.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/if_packet.h>
64 #include <linux/wireless.h>
65 #include <linux/kernel.h>
66 #include <linux/kmod.h>
67 #include <linux/slab.h>
68 #include <linux/vmalloc.h>
69 #include <net/net_namespace.h>
71 #include <net/protocol.h>
72 #include <linux/skbuff.h>
74 #include <linux/errno.h>
75 #include <linux/timer.h>
76 #include <asm/uaccess.h>
77 #include <asm/ioctls.h>
79 #include <asm/cacheflush.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/poll.h>
84 #include <linux/module.h>
85 #include <linux/init.h>
86 #include <linux/mutex.h>
87 #include <linux/if_vlan.h>
88 #include <linux/virtio_net.h>
89 #include <linux/errqueue.h>
90 #include <linux/net_tstamp.h>
93 #include <net/inet_common.h>
98 - if device has no dev->hard_header routine, it adds and removes ll header
99 inside itself. In this case ll header is invisible outside of device,
100 but higher levels still should reserve dev->hard_header_len.
101 Some devices are enough clever to reallocate skb, when header
102 will not fit to reserved space (tunnel), another ones are silly
104 - packet socket receives packets with pulled ll header,
105 so that SOCK_RAW should push it back.
110 Incoming, dev->hard_header!=NULL
111 mac_header -> ll header
114 Outgoing, dev->hard_header!=NULL
115 mac_header -> ll header
118 Incoming, dev->hard_header==NULL
119 mac_header -> UNKNOWN position. It is very likely, that it points to ll
120 header. PPP makes it, that is wrong, because introduce
121 assymetry between rx and tx paths.
124 Outgoing, dev->hard_header==NULL
125 mac_header -> data. ll header is still not built!
129 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
135 dev->hard_header != NULL
136 mac_header -> ll header
139 dev->hard_header == NULL (ll header is added by device, we cannot control it)
143 We should set nh.raw on output to correct posistion,
144 packet classifier depends on it.
147 /* Private packet socket structures. */
149 struct packet_mclist {
150 struct packet_mclist *next;
155 unsigned char addr[MAX_ADDR_LEN];
157 /* identical to struct packet_mreq except it has
158 * a longer address field.
160 struct packet_mreq_max {
162 unsigned short mr_type;
163 unsigned short mr_alen;
164 unsigned char mr_address[MAX_ADDR_LEN];
167 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
168 int closing, int tx_ring);
171 #define V3_ALIGNMENT (8)
173 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
175 #define BLK_PLUS_PRIV(sz_of_priv) \
176 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
178 /* kbdq - kernel block descriptor queue */
179 struct tpacket_kbdq_core {
181 unsigned int feature_req_word;
183 unsigned char reset_pending_on_curr_blk;
184 unsigned char delete_blk_timer;
185 unsigned short kactive_blk_num;
186 unsigned short blk_sizeof_priv;
188 /* last_kactive_blk_num:
189 * trick to see if user-space has caught up
190 * in order to avoid refreshing timer when every single pkt arrives.
192 unsigned short last_kactive_blk_num;
197 unsigned int knum_blocks;
198 uint64_t knxt_seq_num;
203 atomic_t blk_fill_in_prog;
205 /* Default is set to 8ms */
206 #define DEFAULT_PRB_RETIRE_TOV (8)
208 unsigned short retire_blk_tov;
209 unsigned short version;
210 unsigned long tov_in_jiffies;
212 /* timer to retire an outstanding block */
213 struct timer_list retire_blk_timer;
216 #define PGV_FROM_VMALLOC 1
221 struct packet_ring_buffer {
224 unsigned int frames_per_block;
225 unsigned int frame_size;
226 unsigned int frame_max;
228 unsigned int pg_vec_order;
229 unsigned int pg_vec_pages;
230 unsigned int pg_vec_len;
232 struct tpacket_kbdq_core prb_bdqc;
236 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
237 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
238 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
239 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
240 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
241 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
242 #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
245 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
247 static void *packet_previous_frame(struct packet_sock *po,
248 struct packet_ring_buffer *rb,
250 static void packet_increment_head(struct packet_ring_buffer *buff);
251 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
252 struct tpacket_block_desc *);
253 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
254 struct packet_sock *);
255 static void prb_retire_current_block(struct tpacket_kbdq_core *,
256 struct packet_sock *, unsigned int status);
257 static int prb_queue_frozen(struct tpacket_kbdq_core *);
258 static void prb_open_block(struct tpacket_kbdq_core *,
259 struct tpacket_block_desc *);
260 static void prb_retire_rx_blk_timer_expired(unsigned long);
261 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
262 static void prb_init_blk_timer(struct packet_sock *,
263 struct tpacket_kbdq_core *,
264 void (*func) (unsigned long));
265 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
266 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
267 struct tpacket3_hdr *);
268 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
269 struct tpacket3_hdr *);
270 static void packet_flush_mclist(struct sock *sk);
272 struct packet_fanout;
274 /* struct sock has to be the first member of packet_sock */
276 struct packet_fanout *fanout;
277 struct tpacket_stats stats;
278 union tpacket_stats_u stats_u;
279 struct packet_ring_buffer rx_ring;
280 struct packet_ring_buffer tx_ring;
282 spinlock_t bind_lock;
283 struct mutex pg_vec_lock;
284 unsigned int running:1, /* prot_hook is attached*/
288 int ifindex; /* bound device */
290 struct packet_mclist *mclist;
292 enum tpacket_versions tp_version;
293 unsigned int tp_hdrlen;
294 unsigned int tp_reserve;
295 unsigned int tp_loss:1;
296 unsigned int tp_tstamp;
297 struct packet_type prot_hook ____cacheline_aligned_in_smp;
300 #define PACKET_FANOUT_MAX 256
302 struct packet_fanout {
306 unsigned int num_members;
311 struct list_head list;
312 struct sock *arr[PACKET_FANOUT_MAX];
315 struct packet_type prot_hook ____cacheline_aligned_in_smp;
318 struct packet_skb_cb {
319 unsigned int origlen;
321 struct sockaddr_pkt pkt;
322 struct sockaddr_ll ll;
326 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
328 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
329 #define GET_PBLOCK_DESC(x, bid) \
330 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
331 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
332 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
333 #define GET_NEXT_PRB_BLK_NUM(x) \
334 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
335 ((x)->kactive_blk_num+1) : 0)
337 static struct packet_sock *pkt_sk(struct sock *sk)
339 return (struct packet_sock *)sk;
342 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
343 static void __fanout_link(struct sock *sk, struct packet_sock *po);
345 /* register_prot_hook must be invoked with the po->bind_lock held,
346 * or from a context in which asynchronous accesses to the packet
347 * socket is not possible (packet_create()).
349 static void register_prot_hook(struct sock *sk)
351 struct packet_sock *po = pkt_sk(sk);
354 __fanout_link(sk, po);
356 dev_add_pack(&po->prot_hook);
362 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
363 * held. If the sync parameter is true, we will temporarily drop
364 * the po->bind_lock and do a synchronize_net to make sure no
365 * asynchronous packet processing paths still refer to the elements
366 * of po->prot_hook. If the sync parameter is false, it is the
367 * callers responsibility to take care of this.
369 static void __unregister_prot_hook(struct sock *sk, bool sync)
371 struct packet_sock *po = pkt_sk(sk);
375 __fanout_unlink(sk, po);
377 __dev_remove_pack(&po->prot_hook);
381 spin_unlock(&po->bind_lock);
383 spin_lock(&po->bind_lock);
387 static void unregister_prot_hook(struct sock *sk, bool sync)
389 struct packet_sock *po = pkt_sk(sk);
392 __unregister_prot_hook(sk, sync);
395 static inline __pure struct page *pgv_to_page(void *addr)
397 if (is_vmalloc_addr(addr))
398 return vmalloc_to_page(addr);
399 return virt_to_page(addr);
402 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
405 struct tpacket_hdr *h1;
406 struct tpacket2_hdr *h2;
411 switch (po->tp_version) {
413 h.h1->tp_status = status;
414 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
417 h.h2->tp_status = status;
418 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
422 WARN(1, "TPACKET version not supported.\n");
429 static int __packet_get_status(struct packet_sock *po, void *frame)
432 struct tpacket_hdr *h1;
433 struct tpacket2_hdr *h2;
440 switch (po->tp_version) {
442 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
443 return h.h1->tp_status;
445 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
446 return h.h2->tp_status;
449 WARN(1, "TPACKET version not supported.\n");
455 static void *packet_lookup_frame(struct packet_sock *po,
456 struct packet_ring_buffer *rb,
457 unsigned int position,
460 unsigned int pg_vec_pos, frame_offset;
462 struct tpacket_hdr *h1;
463 struct tpacket2_hdr *h2;
467 pg_vec_pos = position / rb->frames_per_block;
468 frame_offset = position % rb->frames_per_block;
470 h.raw = rb->pg_vec[pg_vec_pos].buffer +
471 (frame_offset * rb->frame_size);
473 if (status != __packet_get_status(po, h.raw))
479 static void *packet_current_frame(struct packet_sock *po,
480 struct packet_ring_buffer *rb,
483 return packet_lookup_frame(po, rb, rb->head, status);
486 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
488 del_timer_sync(&pkc->retire_blk_timer);
491 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
493 struct sk_buff_head *rb_queue)
495 struct tpacket_kbdq_core *pkc;
497 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
499 spin_lock(&rb_queue->lock);
500 pkc->delete_blk_timer = 1;
501 spin_unlock(&rb_queue->lock);
503 prb_del_retire_blk_timer(pkc);
506 static void prb_init_blk_timer(struct packet_sock *po,
507 struct tpacket_kbdq_core *pkc,
508 void (*func) (unsigned long))
510 init_timer(&pkc->retire_blk_timer);
511 pkc->retire_blk_timer.data = (long)po;
512 pkc->retire_blk_timer.function = func;
513 pkc->retire_blk_timer.expires = jiffies;
516 static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
518 struct tpacket_kbdq_core *pkc;
523 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
524 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
527 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
528 int blk_size_in_bytes)
530 struct net_device *dev;
531 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
532 struct ethtool_cmd ecmd;
537 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
538 if (unlikely(!dev)) {
540 return DEFAULT_PRB_RETIRE_TOV;
542 err = __ethtool_get_settings(dev, &ecmd);
543 speed = ethtool_cmd_speed(&ecmd);
547 * If the link speed is so slow you don't really
548 * need to worry about perf anyways
550 if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
551 return DEFAULT_PRB_RETIRE_TOV;
558 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
570 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
571 union tpacket_req_u *req_u)
573 p1->feature_req_word = req_u->req3.tp_feature_req_word;
576 static void init_prb_bdqc(struct packet_sock *po,
577 struct packet_ring_buffer *rb,
579 union tpacket_req_u *req_u, int tx_ring)
581 struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
582 struct tpacket_block_desc *pbd;
584 memset(p1, 0x0, sizeof(*p1));
586 p1->knxt_seq_num = 1;
588 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
589 p1->pkblk_start = pg_vec[0].buffer;
590 p1->kblk_size = req_u->req3.tp_block_size;
591 p1->knum_blocks = req_u->req3.tp_block_nr;
592 p1->hdrlen = po->tp_hdrlen;
593 p1->version = po->tp_version;
594 p1->last_kactive_blk_num = 0;
595 po->stats_u.stats3.tp_freeze_q_cnt = 0;
596 if (req_u->req3.tp_retire_blk_tov)
597 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
599 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
600 req_u->req3.tp_block_size);
601 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
602 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
604 prb_init_ft_ops(p1, req_u);
605 prb_setup_retire_blk_timer(po, tx_ring);
606 prb_open_block(p1, pbd);
609 /* Do NOT update the last_blk_num first.
610 * Assumes sk_buff_head lock is held.
612 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
614 mod_timer(&pkc->retire_blk_timer,
615 jiffies + pkc->tov_in_jiffies);
616 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
621 * 1) We refresh the timer only when we open a block.
622 * By doing this we don't waste cycles refreshing the timer
623 * on packet-by-packet basis.
625 * With a 1MB block-size, on a 1Gbps line, it will take
626 * i) ~8 ms to fill a block + ii) memcpy etc.
627 * In this cut we are not accounting for the memcpy time.
629 * So, if the user sets the 'tmo' to 10ms then the timer
630 * will never fire while the block is still getting filled
631 * (which is what we want). However, the user could choose
632 * to close a block early and that's fine.
634 * But when the timer does fire, we check whether or not to refresh it.
635 * Since the tmo granularity is in msecs, it is not too expensive
636 * to refresh the timer, lets say every '8' msecs.
637 * Either the user can set the 'tmo' or we can derive it based on
638 * a) line-speed and b) block-size.
639 * prb_calc_retire_blk_tmo() calculates the tmo.
642 static void prb_retire_rx_blk_timer_expired(unsigned long data)
644 struct packet_sock *po = (struct packet_sock *)data;
645 struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
647 struct tpacket_block_desc *pbd;
649 spin_lock(&po->sk.sk_receive_queue.lock);
651 frozen = prb_queue_frozen(pkc);
652 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
654 if (unlikely(pkc->delete_blk_timer))
657 /* We only need to plug the race when the block is partially filled.
659 * lock(); increment BLOCK_NUM_PKTS; unlock()
660 * copy_bits() is in progress ...
661 * timer fires on other cpu:
662 * we can't retire the current block because copy_bits
666 if (BLOCK_NUM_PKTS(pbd)) {
667 while (atomic_read(&pkc->blk_fill_in_prog)) {
668 /* Waiting for skb_copy_bits to finish... */
673 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
675 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
676 if (!prb_dispatch_next_block(pkc, po))
681 /* Case 1. Queue was frozen because user-space was
684 if (prb_curr_blk_in_use(pkc, pbd)) {
686 * Ok, user-space is still behind.
687 * So just refresh the timer.
691 /* Case 2. queue was frozen,user-space caught up,
692 * now the link went idle && the timer fired.
693 * We don't have a block to close.So we open this
694 * block and restart the timer.
695 * opening a block thaws the queue,restarts timer
696 * Thawing/timer-refresh is a side effect.
698 prb_open_block(pkc, pbd);
705 _prb_refresh_rx_retire_blk_timer(pkc);
708 spin_unlock(&po->sk.sk_receive_queue.lock);
711 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
712 struct tpacket_block_desc *pbd1, __u32 status)
714 /* Flush everything minus the block header */
716 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
721 /* Skip the block header(we know header WILL fit in 4K) */
724 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
725 for (; start < end; start += PAGE_SIZE)
726 flush_dcache_page(pgv_to_page(start));
731 /* Now update the block status. */
733 BLOCK_STATUS(pbd1) = status;
735 /* Flush the block header */
737 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
739 flush_dcache_page(pgv_to_page(start));
749 * 2) Increment active_blk_num
751 * Note:We DONT refresh the timer on purpose.
752 * Because almost always the next block will be opened.
754 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
755 struct tpacket_block_desc *pbd1,
756 struct packet_sock *po, unsigned int stat)
758 __u32 status = TP_STATUS_USER | stat;
760 struct tpacket3_hdr *last_pkt;
761 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
763 if (po->stats.tp_drops)
764 status |= TP_STATUS_LOSING;
766 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
767 last_pkt->tp_next_offset = 0;
769 /* Get the ts of the last pkt */
770 if (BLOCK_NUM_PKTS(pbd1)) {
771 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
772 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
774 /* Ok, we tmo'd - so get the current time */
777 h1->ts_last_pkt.ts_sec = ts.tv_sec;
778 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
783 /* Flush the block */
784 prb_flush_block(pkc1, pbd1, status);
786 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
789 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
791 pkc->reset_pending_on_curr_blk = 0;
795 * Side effect of opening a block:
797 * 1) prb_queue is thawed.
798 * 2) retire_blk_timer is refreshed.
801 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
802 struct tpacket_block_desc *pbd1)
805 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
809 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) {
811 /* We could have just memset this but we will lose the
812 * flexibility of making the priv area sticky
814 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
815 BLOCK_NUM_PKTS(pbd1) = 0;
816 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
818 h1->ts_first_pkt.ts_sec = ts.tv_sec;
819 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
820 pkc1->pkblk_start = (char *)pbd1;
821 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
822 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
823 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
824 pbd1->version = pkc1->version;
825 pkc1->prev = pkc1->nxt_offset;
826 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
827 prb_thaw_queue(pkc1);
828 _prb_refresh_rx_retire_blk_timer(pkc1);
835 WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n",
836 pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num);
842 * Queue freeze logic:
843 * 1) Assume tp_block_nr = 8 blocks.
844 * 2) At time 't0', user opens Rx ring.
845 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
846 * 4) user-space is either sleeping or processing block '0'.
847 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
848 * it will close block-7,loop around and try to fill block '0'.
850 * __packet_lookup_frame_in_block
851 * prb_retire_current_block()
852 * prb_dispatch_next_block()
853 * |->(BLOCK_STATUS == USER) evaluates to true
854 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
855 * 6) Now there are two cases:
856 * 6.1) Link goes idle right after the queue is frozen.
857 * But remember, the last open_block() refreshed the timer.
858 * When this timer expires,it will refresh itself so that we can
859 * re-open block-0 in near future.
860 * 6.2) Link is busy and keeps on receiving packets. This is a simple
861 * case and __packet_lookup_frame_in_block will check if block-0
862 * is free and can now be re-used.
864 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
865 struct packet_sock *po)
867 pkc->reset_pending_on_curr_blk = 1;
868 po->stats_u.stats3.tp_freeze_q_cnt++;
871 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
874 * If the next block is free then we will dispatch it
875 * and return a good offset.
876 * Else, we will freeze the queue.
877 * So, caller must check the return value.
879 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
880 struct packet_sock *po)
882 struct tpacket_block_desc *pbd;
886 /* 1. Get current block num */
887 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
889 /* 2. If this block is currently in_use then freeze the queue */
890 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
891 prb_freeze_queue(pkc, po);
897 * open this block and return the offset where the first packet
898 * needs to get stored.
900 prb_open_block(pkc, pbd);
901 return (void *)pkc->nxt_offset;
904 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
905 struct packet_sock *po, unsigned int status)
907 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
909 /* retire/close the current block */
910 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
912 * Plug the case where copy_bits() is in progress on
913 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
914 * have space to copy the pkt in the current block and
915 * called prb_retire_current_block()
917 * We don't need to worry about the TMO case because
918 * the timer-handler already handled this case.
920 if (!(status & TP_STATUS_BLK_TMO)) {
921 while (atomic_read(&pkc->blk_fill_in_prog)) {
922 /* Waiting for skb_copy_bits to finish... */
926 prb_close_block(pkc, pbd, po, status);
930 WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
935 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
936 struct tpacket_block_desc *pbd)
938 return TP_STATUS_USER & BLOCK_STATUS(pbd);
941 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
943 return pkc->reset_pending_on_curr_blk;
946 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
948 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
949 atomic_dec(&pkc->blk_fill_in_prog);
952 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
953 struct tpacket3_hdr *ppd)
955 ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
958 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
959 struct tpacket3_hdr *ppd)
961 ppd->hv1.tp_rxhash = 0;
964 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
965 struct tpacket3_hdr *ppd)
967 if (vlan_tx_tag_present(pkc->skb)) {
968 ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
969 ppd->tp_status = TP_STATUS_VLAN_VALID;
971 ppd->hv1.tp_vlan_tci = ppd->tp_status = 0;
975 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
976 struct tpacket3_hdr *ppd)
978 prb_fill_vlan_info(pkc, ppd);
980 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
981 prb_fill_rxhash(pkc, ppd);
983 prb_clear_rxhash(pkc, ppd);
986 static void prb_fill_curr_block(char *curr,
987 struct tpacket_kbdq_core *pkc,
988 struct tpacket_block_desc *pbd,
991 struct tpacket3_hdr *ppd;
993 ppd = (struct tpacket3_hdr *)curr;
994 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
996 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
997 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
998 BLOCK_NUM_PKTS(pbd) += 1;
999 atomic_inc(&pkc->blk_fill_in_prog);
1000 prb_run_all_ft_ops(pkc, ppd);
1003 /* Assumes caller has the sk->rx_queue.lock */
1004 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1005 struct sk_buff *skb,
1010 struct tpacket_kbdq_core *pkc;
1011 struct tpacket_block_desc *pbd;
1014 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1015 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1017 /* Queue is frozen when user space is lagging behind */
1018 if (prb_queue_frozen(pkc)) {
1020 * Check if that last block which caused the queue to freeze,
1021 * is still in_use by user-space.
1023 if (prb_curr_blk_in_use(pkc, pbd)) {
1024 /* Can't record this packet */
1028 * Ok, the block was released by user-space.
1029 * Now let's open that block.
1030 * opening a block also thaws the queue.
1031 * Thawing is a side effect.
1033 prb_open_block(pkc, pbd);
1038 curr = pkc->nxt_offset;
1040 end = (char *)pbd + pkc->kblk_size;
1042 /* first try the current block */
1043 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1044 prb_fill_curr_block(curr, pkc, pbd, len);
1045 return (void *)curr;
1048 /* Ok, close the current block */
1049 prb_retire_current_block(pkc, po, 0);
1051 /* Now, try to dispatch the next block */
1052 curr = (char *)prb_dispatch_next_block(pkc, po);
1054 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1055 prb_fill_curr_block(curr, pkc, pbd, len);
1056 return (void *)curr;
1060 * No free blocks are available.user_space hasn't caught up yet.
1061 * Queue was just frozen and now this packet will get dropped.
1066 static void *packet_current_rx_frame(struct packet_sock *po,
1067 struct sk_buff *skb,
1068 int status, unsigned int len)
1071 switch (po->tp_version) {
1074 curr = packet_lookup_frame(po, &po->rx_ring,
1075 po->rx_ring.head, status);
1078 return __packet_lookup_frame_in_block(po, skb, status, len);
1080 WARN(1, "TPACKET version not supported\n");
1086 static void *prb_lookup_block(struct packet_sock *po,
1087 struct packet_ring_buffer *rb,
1088 unsigned int previous,
1091 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1092 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, previous);
1094 if (status != BLOCK_STATUS(pbd))
1099 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1102 if (rb->prb_bdqc.kactive_blk_num)
1103 prev = rb->prb_bdqc.kactive_blk_num-1;
1105 prev = rb->prb_bdqc.knum_blocks-1;
1109 /* Assumes caller has held the rx_queue.lock */
1110 static void *__prb_previous_block(struct packet_sock *po,
1111 struct packet_ring_buffer *rb,
1114 unsigned int previous = prb_previous_blk_num(rb);
1115 return prb_lookup_block(po, rb, previous, status);
1118 static void *packet_previous_rx_frame(struct packet_sock *po,
1119 struct packet_ring_buffer *rb,
1122 if (po->tp_version <= TPACKET_V2)
1123 return packet_previous_frame(po, rb, status);
1125 return __prb_previous_block(po, rb, status);
1128 static void packet_increment_rx_head(struct packet_sock *po,
1129 struct packet_ring_buffer *rb)
1131 switch (po->tp_version) {
1134 return packet_increment_head(rb);
1137 WARN(1, "TPACKET version not supported.\n");
1143 static void *packet_previous_frame(struct packet_sock *po,
1144 struct packet_ring_buffer *rb,
1147 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1148 return packet_lookup_frame(po, rb, previous, status);
1151 static void packet_increment_head(struct packet_ring_buffer *buff)
1153 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1156 static void packet_sock_destruct(struct sock *sk)
1158 skb_queue_purge(&sk->sk_error_queue);
1160 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1161 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1163 if (!sock_flag(sk, SOCK_DEAD)) {
1164 pr_err("Attempt to release alive packet socket: %p\n", sk);
1168 sk_refcnt_debug_dec(sk);
1171 static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
1173 int x = atomic_read(&f->rr_cur) + 1;
1181 static struct sock *fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1183 u32 idx, hash = skb->rxhash;
1185 idx = ((u64)hash * num) >> 32;
1190 static struct sock *fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1194 cur = atomic_read(&f->rr_cur);
1195 while ((old = atomic_cmpxchg(&f->rr_cur, cur,
1196 fanout_rr_next(f, num))) != cur)
1201 static struct sock *fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1203 unsigned int cpu = smp_processor_id();
1205 return f->arr[cpu % num];
1208 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1209 struct packet_type *pt, struct net_device *orig_dev)
1211 struct packet_fanout *f = pt->af_packet_priv;
1212 unsigned int num = f->num_members;
1213 struct packet_sock *po;
1216 if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1223 case PACKET_FANOUT_HASH:
1226 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
1230 skb_get_rxhash(skb);
1231 sk = fanout_demux_hash(f, skb, num);
1233 case PACKET_FANOUT_LB:
1234 sk = fanout_demux_lb(f, skb, num);
1236 case PACKET_FANOUT_CPU:
1237 sk = fanout_demux_cpu(f, skb, num);
1243 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1246 static DEFINE_MUTEX(fanout_mutex);
1247 static LIST_HEAD(fanout_list);
1249 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1251 struct packet_fanout *f = po->fanout;
1253 spin_lock(&f->lock);
1254 f->arr[f->num_members] = sk;
1257 spin_unlock(&f->lock);
1260 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1262 struct packet_fanout *f = po->fanout;
1265 spin_lock(&f->lock);
1266 for (i = 0; i < f->num_members; i++) {
1267 if (f->arr[i] == sk)
1270 BUG_ON(i >= f->num_members);
1271 f->arr[i] = f->arr[f->num_members - 1];
1273 spin_unlock(&f->lock);
1276 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1278 struct packet_sock *po = pkt_sk(sk);
1279 struct packet_fanout *f, *match;
1280 u8 type = type_flags & 0xff;
1281 u8 defrag = (type_flags & PACKET_FANOUT_FLAG_DEFRAG) ? 1 : 0;
1285 case PACKET_FANOUT_HASH:
1286 case PACKET_FANOUT_LB:
1287 case PACKET_FANOUT_CPU:
1299 mutex_lock(&fanout_mutex);
1301 list_for_each_entry(f, &fanout_list, list) {
1303 read_pnet(&f->net) == sock_net(sk)) {
1309 if (match && match->defrag != defrag)
1313 match = kzalloc(sizeof(*match), GFP_KERNEL);
1316 write_pnet(&match->net, sock_net(sk));
1319 match->defrag = defrag;
1320 atomic_set(&match->rr_cur, 0);
1321 INIT_LIST_HEAD(&match->list);
1322 spin_lock_init(&match->lock);
1323 atomic_set(&match->sk_ref, 0);
1324 match->prot_hook.type = po->prot_hook.type;
1325 match->prot_hook.dev = po->prot_hook.dev;
1326 match->prot_hook.func = packet_rcv_fanout;
1327 match->prot_hook.af_packet_priv = match;
1328 dev_add_pack(&match->prot_hook);
1329 list_add(&match->list, &fanout_list);
1332 if (match->type == type &&
1333 match->prot_hook.type == po->prot_hook.type &&
1334 match->prot_hook.dev == po->prot_hook.dev) {
1336 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1337 __dev_remove_pack(&po->prot_hook);
1339 atomic_inc(&match->sk_ref);
1340 __fanout_link(sk, po);
1345 mutex_unlock(&fanout_mutex);
1349 static void fanout_release(struct sock *sk)
1351 struct packet_sock *po = pkt_sk(sk);
1352 struct packet_fanout *f;
1360 mutex_lock(&fanout_mutex);
1361 if (atomic_dec_and_test(&f->sk_ref)) {
1363 dev_remove_pack(&f->prot_hook);
1366 mutex_unlock(&fanout_mutex);
1369 static const struct proto_ops packet_ops;
1371 static const struct proto_ops packet_ops_spkt;
1373 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1374 struct packet_type *pt, struct net_device *orig_dev)
1377 struct sockaddr_pkt *spkt;
1380 * When we registered the protocol we saved the socket in the data
1381 * field for just this event.
1384 sk = pt->af_packet_priv;
1387 * Yank back the headers [hope the device set this
1388 * right or kerboom...]
1390 * Incoming packets have ll header pulled,
1393 * For outgoing ones skb->data == skb_mac_header(skb)
1394 * so that this procedure is noop.
1397 if (skb->pkt_type == PACKET_LOOPBACK)
1400 if (!net_eq(dev_net(dev), sock_net(sk)))
1403 skb = skb_share_check(skb, GFP_ATOMIC);
1407 /* drop any routing info */
1410 /* drop conntrack reference */
1413 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1415 skb_push(skb, skb->data - skb_mac_header(skb));
1418 * The SOCK_PACKET socket receives _all_ frames.
1421 spkt->spkt_family = dev->type;
1422 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1423 spkt->spkt_protocol = skb->protocol;
1426 * Charge the memory to the socket. This is done specifically
1427 * to prevent sockets using all the memory up.
1430 if (sock_queue_rcv_skb(sk, skb) == 0)
1441 * Output a raw packet to a device layer. This bypasses all the other
1442 * protocol layers and you must therefore supply it with a complete frame
1445 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
1446 struct msghdr *msg, size_t len)
1448 struct sock *sk = sock->sk;
1449 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
1450 struct sk_buff *skb = NULL;
1451 struct net_device *dev;
1457 * Get and verify the address.
1461 if (msg->msg_namelen < sizeof(struct sockaddr))
1463 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1464 proto = saddr->spkt_protocol;
1466 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
1469 * Find the device first to size check it
1472 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1475 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1481 if (!(dev->flags & IFF_UP))
1485 * You may not queue a frame bigger than the mtu. This is the lowest level
1486 * raw protocol and you must do your own fragmentation at this level.
1489 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1490 if (!netif_supports_nofcs(dev)) {
1491 err = -EPROTONOSUPPORT;
1494 extra_len = 4; /* We're doing our own CRC */
1498 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1502 size_t reserved = LL_RESERVED_SPACE(dev);
1503 int tlen = dev->needed_tailroom;
1504 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1507 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1510 /* FIXME: Save some space for broken drivers that write a hard
1511 * header at transmission time by themselves. PPP is the notable
1512 * one here. This should really be fixed at the driver level.
1514 skb_reserve(skb, reserved);
1515 skb_reset_network_header(skb);
1517 /* Try to align data part correctly */
1522 skb_reset_network_header(skb);
1524 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1530 if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
1531 /* Earlier code assumed this would be a VLAN pkt,
1532 * double-check this now that we have the actual
1535 struct ethhdr *ehdr;
1536 skb_reset_mac_header(skb);
1537 ehdr = eth_hdr(skb);
1538 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1544 skb->protocol = proto;
1546 skb->priority = sk->sk_priority;
1547 skb->mark = sk->sk_mark;
1548 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1552 if (unlikely(extra_len == 4))
1555 dev_queue_xmit(skb);
1566 static unsigned int run_filter(const struct sk_buff *skb,
1567 const struct sock *sk,
1570 struct sk_filter *filter;
1573 filter = rcu_dereference(sk->sk_filter);
1575 res = SK_RUN_FILTER(filter, skb);
1582 * This function makes lazy skb cloning in hope that most of packets
1583 * are discarded by BPF.
1585 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1586 * and skb->cb are mangled. It works because (and until) packets
1587 * falling here are owned by current CPU. Output packets are cloned
1588 * by dev_queue_xmit_nit(), input packets are processed by net_bh
1589 * sequencially, so that if we return skb to original state on exit,
1590 * we will not harm anyone.
1593 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1594 struct packet_type *pt, struct net_device *orig_dev)
1597 struct sockaddr_ll *sll;
1598 struct packet_sock *po;
1599 u8 *skb_head = skb->data;
1600 int skb_len = skb->len;
1601 unsigned int snaplen, res;
1603 if (skb->pkt_type == PACKET_LOOPBACK)
1606 sk = pt->af_packet_priv;
1609 if (!net_eq(dev_net(dev), sock_net(sk)))
1614 if (dev->header_ops) {
1615 /* The device has an explicit notion of ll header,
1616 * exported to higher levels.
1618 * Otherwise, the device hides details of its frame
1619 * structure, so that corresponding packet head is
1620 * never delivered to user.
1622 if (sk->sk_type != SOCK_DGRAM)
1623 skb_push(skb, skb->data - skb_mac_header(skb));
1624 else if (skb->pkt_type == PACKET_OUTGOING) {
1625 /* Special case: outgoing packets have ll header at head */
1626 skb_pull(skb, skb_network_offset(skb));
1632 res = run_filter(skb, sk, snaplen);
1634 goto drop_n_restore;
1638 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
1641 if (skb_shared(skb)) {
1642 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1646 if (skb_head != skb->data) {
1647 skb->data = skb_head;
1654 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
1657 sll = &PACKET_SKB_CB(skb)->sa.ll;
1658 sll->sll_family = AF_PACKET;
1659 sll->sll_hatype = dev->type;
1660 sll->sll_protocol = skb->protocol;
1661 sll->sll_pkttype = skb->pkt_type;
1662 if (unlikely(po->origdev))
1663 sll->sll_ifindex = orig_dev->ifindex;
1665 sll->sll_ifindex = dev->ifindex;
1667 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1669 PACKET_SKB_CB(skb)->origlen = skb->len;
1671 if (pskb_trim(skb, snaplen))
1674 skb_set_owner_r(skb, sk);
1678 /* drop conntrack reference */
1681 spin_lock(&sk->sk_receive_queue.lock);
1682 po->stats.tp_packets++;
1683 skb->dropcount = atomic_read(&sk->sk_drops);
1684 __skb_queue_tail(&sk->sk_receive_queue, skb);
1685 spin_unlock(&sk->sk_receive_queue.lock);
1686 sk->sk_data_ready(sk, skb->len);
1690 spin_lock(&sk->sk_receive_queue.lock);
1691 po->stats.tp_drops++;
1692 atomic_inc(&sk->sk_drops);
1693 spin_unlock(&sk->sk_receive_queue.lock);
1696 if (skb_head != skb->data && skb_shared(skb)) {
1697 skb->data = skb_head;
1705 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1706 struct packet_type *pt, struct net_device *orig_dev)
1709 struct packet_sock *po;
1710 struct sockaddr_ll *sll;
1712 struct tpacket_hdr *h1;
1713 struct tpacket2_hdr *h2;
1714 struct tpacket3_hdr *h3;
1717 u8 *skb_head = skb->data;
1718 int skb_len = skb->len;
1719 unsigned int snaplen, res;
1720 unsigned long status = TP_STATUS_USER;
1721 unsigned short macoff, netoff, hdrlen;
1722 struct sk_buff *copy_skb = NULL;
1725 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
1727 if (skb->pkt_type == PACKET_LOOPBACK)
1730 sk = pt->af_packet_priv;
1733 if (!net_eq(dev_net(dev), sock_net(sk)))
1736 if (dev->header_ops) {
1737 if (sk->sk_type != SOCK_DGRAM)
1738 skb_push(skb, skb->data - skb_mac_header(skb));
1739 else if (skb->pkt_type == PACKET_OUTGOING) {
1740 /* Special case: outgoing packets have ll header at head */
1741 skb_pull(skb, skb_network_offset(skb));
1745 if (skb->ip_summed == CHECKSUM_PARTIAL)
1746 status |= TP_STATUS_CSUMNOTREADY;
1750 res = run_filter(skb, sk, snaplen);
1752 goto drop_n_restore;
1756 if (sk->sk_type == SOCK_DGRAM) {
1757 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1760 unsigned int maclen = skb_network_offset(skb);
1761 netoff = TPACKET_ALIGN(po->tp_hdrlen +
1762 (maclen < 16 ? 16 : maclen)) +
1764 macoff = netoff - maclen;
1766 if (po->tp_version <= TPACKET_V2) {
1767 if (macoff + snaplen > po->rx_ring.frame_size) {
1768 if (po->copy_thresh &&
1769 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1770 if (skb_shared(skb)) {
1771 copy_skb = skb_clone(skb, GFP_ATOMIC);
1773 copy_skb = skb_get(skb);
1774 skb_head = skb->data;
1777 skb_set_owner_r(copy_skb, sk);
1779 snaplen = po->rx_ring.frame_size - macoff;
1780 if ((int)snaplen < 0)
1784 spin_lock(&sk->sk_receive_queue.lock);
1785 h.raw = packet_current_rx_frame(po, skb,
1786 TP_STATUS_KERNEL, (macoff+snaplen));
1789 if (po->tp_version <= TPACKET_V2) {
1790 packet_increment_rx_head(po, &po->rx_ring);
1792 * LOSING will be reported till you read the stats,
1793 * because it's COR - Clear On Read.
1794 * Anyways, moving it for V1/V2 only as V3 doesn't need this
1797 if (po->stats.tp_drops)
1798 status |= TP_STATUS_LOSING;
1800 po->stats.tp_packets++;
1802 status |= TP_STATUS_COPY;
1803 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1805 spin_unlock(&sk->sk_receive_queue.lock);
1807 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
1809 switch (po->tp_version) {
1811 h.h1->tp_len = skb->len;
1812 h.h1->tp_snaplen = snaplen;
1813 h.h1->tp_mac = macoff;
1814 h.h1->tp_net = netoff;
1815 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1816 && shhwtstamps->syststamp.tv64)
1817 tv = ktime_to_timeval(shhwtstamps->syststamp);
1818 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1819 && shhwtstamps->hwtstamp.tv64)
1820 tv = ktime_to_timeval(shhwtstamps->hwtstamp);
1821 else if (skb->tstamp.tv64)
1822 tv = ktime_to_timeval(skb->tstamp);
1824 do_gettimeofday(&tv);
1825 h.h1->tp_sec = tv.tv_sec;
1826 h.h1->tp_usec = tv.tv_usec;
1827 hdrlen = sizeof(*h.h1);
1830 h.h2->tp_len = skb->len;
1831 h.h2->tp_snaplen = snaplen;
1832 h.h2->tp_mac = macoff;
1833 h.h2->tp_net = netoff;
1834 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1835 && shhwtstamps->syststamp.tv64)
1836 ts = ktime_to_timespec(shhwtstamps->syststamp);
1837 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1838 && shhwtstamps->hwtstamp.tv64)
1839 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1840 else if (skb->tstamp.tv64)
1841 ts = ktime_to_timespec(skb->tstamp);
1843 getnstimeofday(&ts);
1844 h.h2->tp_sec = ts.tv_sec;
1845 h.h2->tp_nsec = ts.tv_nsec;
1846 if (vlan_tx_tag_present(skb)) {
1847 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
1848 status |= TP_STATUS_VLAN_VALID;
1850 h.h2->tp_vlan_tci = 0;
1852 h.h2->tp_padding = 0;
1853 hdrlen = sizeof(*h.h2);
1856 /* tp_nxt_offset,vlan are already populated above.
1857 * So DONT clear those fields here
1859 h.h3->tp_status |= status;
1860 h.h3->tp_len = skb->len;
1861 h.h3->tp_snaplen = snaplen;
1862 h.h3->tp_mac = macoff;
1863 h.h3->tp_net = netoff;
1864 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1865 && shhwtstamps->syststamp.tv64)
1866 ts = ktime_to_timespec(shhwtstamps->syststamp);
1867 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1868 && shhwtstamps->hwtstamp.tv64)
1869 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1870 else if (skb->tstamp.tv64)
1871 ts = ktime_to_timespec(skb->tstamp);
1873 getnstimeofday(&ts);
1874 h.h3->tp_sec = ts.tv_sec;
1875 h.h3->tp_nsec = ts.tv_nsec;
1876 hdrlen = sizeof(*h.h3);
1882 sll = h.raw + TPACKET_ALIGN(hdrlen);
1883 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1884 sll->sll_family = AF_PACKET;
1885 sll->sll_hatype = dev->type;
1886 sll->sll_protocol = skb->protocol;
1887 sll->sll_pkttype = skb->pkt_type;
1888 if (unlikely(po->origdev))
1889 sll->sll_ifindex = orig_dev->ifindex;
1891 sll->sll_ifindex = dev->ifindex;
1894 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
1898 if (po->tp_version <= TPACKET_V2) {
1899 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw
1900 + macoff + snaplen);
1901 for (start = h.raw; start < end; start += PAGE_SIZE)
1902 flush_dcache_page(pgv_to_page(start));
1907 if (po->tp_version <= TPACKET_V2)
1908 __packet_set_status(po, h.raw, status);
1910 prb_clear_blk_fill_status(&po->rx_ring);
1912 sk->sk_data_ready(sk, 0);
1915 if (skb_head != skb->data && skb_shared(skb)) {
1916 skb->data = skb_head;
1924 po->stats.tp_drops++;
1925 spin_unlock(&sk->sk_receive_queue.lock);
1927 sk->sk_data_ready(sk, 0);
1928 kfree_skb(copy_skb);
1929 goto drop_n_restore;
1932 static void tpacket_destruct_skb(struct sk_buff *skb)
1934 struct packet_sock *po = pkt_sk(skb->sk);
1937 if (likely(po->tx_ring.pg_vec)) {
1938 ph = skb_shinfo(skb)->destructor_arg;
1939 BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
1940 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
1941 atomic_dec(&po->tx_ring.pending);
1942 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
1948 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1949 void *frame, struct net_device *dev, int size_max,
1950 __be16 proto, unsigned char *addr, int hlen)
1953 struct tpacket_hdr *h1;
1954 struct tpacket2_hdr *h2;
1957 int to_write, offset, len, tp_len, nr_frags, len_max;
1958 struct socket *sock = po->sk.sk_socket;
1965 skb->protocol = proto;
1967 skb->priority = po->sk.sk_priority;
1968 skb->mark = po->sk.sk_mark;
1969 skb_shinfo(skb)->destructor_arg = ph.raw;
1971 switch (po->tp_version) {
1973 tp_len = ph.h2->tp_len;
1976 tp_len = ph.h1->tp_len;
1979 if (unlikely(tp_len > size_max)) {
1980 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
1984 skb_reserve(skb, hlen);
1985 skb_reset_network_header(skb);
1987 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
1990 if (sock->type == SOCK_DGRAM) {
1991 err = dev_hard_header(skb, dev, ntohs(proto), addr,
1993 if (unlikely(err < 0))
1995 } else if (dev->hard_header_len) {
1996 /* net device doesn't like empty head */
1997 if (unlikely(tp_len <= dev->hard_header_len)) {
1998 pr_err("packet size is too short (%d < %d)\n",
1999 tp_len, dev->hard_header_len);
2003 skb_push(skb, dev->hard_header_len);
2004 err = skb_store_bits(skb, 0, data,
2005 dev->hard_header_len);
2009 data += dev->hard_header_len;
2010 to_write -= dev->hard_header_len;
2014 offset = offset_in_page(data);
2015 len_max = PAGE_SIZE - offset;
2016 len = ((to_write > len_max) ? len_max : to_write);
2018 skb->data_len = to_write;
2019 skb->len += to_write;
2020 skb->truesize += to_write;
2021 atomic_add(to_write, &po->sk.sk_wmem_alloc);
2023 while (likely(to_write)) {
2024 nr_frags = skb_shinfo(skb)->nr_frags;
2026 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2027 pr_err("Packet exceed the number of skb frags(%lu)\n",
2032 page = pgv_to_page(data);
2034 flush_dcache_page(page);
2036 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2039 len_max = PAGE_SIZE;
2040 len = ((to_write > len_max) ? len_max : to_write);
2046 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2048 struct sk_buff *skb;
2049 struct net_device *dev;
2051 bool need_rls_dev = false;
2052 int err, reserve = 0;
2054 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
2055 int tp_len, size_max;
2056 unsigned char *addr;
2061 mutex_lock(&po->pg_vec_lock);
2064 if (saddr == NULL) {
2065 dev = po->prot_hook.dev;
2070 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2072 if (msg->msg_namelen < (saddr->sll_halen
2073 + offsetof(struct sockaddr_ll,
2076 proto = saddr->sll_protocol;
2077 addr = saddr->sll_addr;
2078 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2079 need_rls_dev = true;
2083 if (unlikely(dev == NULL))
2086 reserve = dev->hard_header_len;
2089 if (unlikely(!(dev->flags & IFF_UP)))
2092 size_max = po->tx_ring.frame_size
2093 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2095 if (size_max > dev->mtu + reserve)
2096 size_max = dev->mtu + reserve;
2099 ph = packet_current_frame(po, &po->tx_ring,
2100 TP_STATUS_SEND_REQUEST);
2102 if (unlikely(ph == NULL)) {
2107 status = TP_STATUS_SEND_REQUEST;
2108 hlen = LL_RESERVED_SPACE(dev);
2109 tlen = dev->needed_tailroom;
2110 skb = sock_alloc_send_skb(&po->sk,
2111 hlen + tlen + sizeof(struct sockaddr_ll),
2114 if (unlikely(skb == NULL))
2117 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2120 if (unlikely(tp_len < 0)) {
2122 __packet_set_status(po, ph,
2123 TP_STATUS_AVAILABLE);
2124 packet_increment_head(&po->tx_ring);
2128 status = TP_STATUS_WRONG_FORMAT;
2134 skb->destructor = tpacket_destruct_skb;
2135 __packet_set_status(po, ph, TP_STATUS_SENDING);
2136 atomic_inc(&po->tx_ring.pending);
2138 status = TP_STATUS_SEND_REQUEST;
2139 err = dev_queue_xmit(skb);
2140 if (unlikely(err > 0)) {
2141 err = net_xmit_errno(err);
2142 if (err && __packet_get_status(po, ph) ==
2143 TP_STATUS_AVAILABLE) {
2144 /* skb was destructed already */
2149 * skb was dropped but not destructed yet;
2150 * let's treat it like congestion or err < 0
2154 packet_increment_head(&po->tx_ring);
2156 } while (likely((ph != NULL) ||
2157 ((!(msg->msg_flags & MSG_DONTWAIT)) &&
2158 (atomic_read(&po->tx_ring.pending))))
2165 __packet_set_status(po, ph, status);
2171 mutex_unlock(&po->pg_vec_lock);
2175 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2176 size_t reserve, size_t len,
2177 size_t linear, int noblock,
2180 struct sk_buff *skb;
2182 /* Under a page? Don't bother with paged skb. */
2183 if (prepad + len < PAGE_SIZE || !linear)
2186 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2191 skb_reserve(skb, reserve);
2192 skb_put(skb, linear);
2193 skb->data_len = len - linear;
2194 skb->len += len - linear;
2199 static int packet_snd(struct socket *sock,
2200 struct msghdr *msg, size_t len)
2202 struct sock *sk = sock->sk;
2203 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
2204 struct sk_buff *skb;
2205 struct net_device *dev;
2207 bool need_rls_dev = false;
2208 unsigned char *addr;
2209 int err, reserve = 0;
2210 struct virtio_net_hdr vnet_hdr = { 0 };
2213 struct packet_sock *po = pkt_sk(sk);
2214 unsigned short gso_type = 0;
2219 * Get and verify the address.
2222 if (saddr == NULL) {
2223 dev = po->prot_hook.dev;
2228 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2230 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2232 proto = saddr->sll_protocol;
2233 addr = saddr->sll_addr;
2234 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2235 need_rls_dev = true;
2241 if (sock->type == SOCK_RAW)
2242 reserve = dev->hard_header_len;
2245 if (!(dev->flags & IFF_UP))
2248 if (po->has_vnet_hdr) {
2249 vnet_hdr_len = sizeof(vnet_hdr);
2252 if (len < vnet_hdr_len)
2255 len -= vnet_hdr_len;
2257 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
2262 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2263 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
2265 vnet_hdr.hdr_len = vnet_hdr.csum_start +
2266 vnet_hdr.csum_offset + 2;
2269 if (vnet_hdr.hdr_len > len)
2272 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2273 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2274 case VIRTIO_NET_HDR_GSO_TCPV4:
2275 gso_type = SKB_GSO_TCPV4;
2277 case VIRTIO_NET_HDR_GSO_TCPV6:
2278 gso_type = SKB_GSO_TCPV6;
2280 case VIRTIO_NET_HDR_GSO_UDP:
2281 gso_type = SKB_GSO_UDP;
2287 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2288 gso_type |= SKB_GSO_TCP_ECN;
2290 if (vnet_hdr.gso_size == 0)
2296 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2297 if (!netif_supports_nofcs(dev)) {
2298 err = -EPROTONOSUPPORT;
2301 extra_len = 4; /* We're doing our own CRC */
2305 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2309 hlen = LL_RESERVED_SPACE(dev);
2310 tlen = dev->needed_tailroom;
2311 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
2312 msg->msg_flags & MSG_DONTWAIT, &err);
2316 skb_set_network_header(skb, reserve);
2319 if (sock->type == SOCK_DGRAM &&
2320 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
2323 /* Returns -EFAULT on error */
2324 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
2327 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2331 if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
2332 /* Earlier code assumed this would be a VLAN pkt,
2333 * double-check this now that we have the actual
2336 struct ethhdr *ehdr;
2337 skb_reset_mac_header(skb);
2338 ehdr = eth_hdr(skb);
2339 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2345 skb->protocol = proto;
2347 skb->priority = sk->sk_priority;
2348 skb->mark = sk->sk_mark;
2350 if (po->has_vnet_hdr) {
2351 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2352 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
2353 vnet_hdr.csum_offset)) {
2359 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
2360 skb_shinfo(skb)->gso_type = gso_type;
2362 /* Header must be checked, and gso_segs computed. */
2363 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2364 skb_shinfo(skb)->gso_segs = 0;
2366 len += vnet_hdr_len;
2369 if (unlikely(extra_len == 4))
2376 err = dev_queue_xmit(skb);
2377 if (err > 0 && (err = net_xmit_errno(err)) != 0)
2388 if (dev && need_rls_dev)
2394 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
2395 struct msghdr *msg, size_t len)
2397 struct sock *sk = sock->sk;
2398 struct packet_sock *po = pkt_sk(sk);
2399 if (po->tx_ring.pg_vec)
2400 return tpacket_snd(po, msg);
2402 return packet_snd(sock, msg, len);
2406 * Close a PACKET socket. This is fairly simple. We immediately go
2407 * to 'closed' state and remove our protocol entry in the device list.
2410 static int packet_release(struct socket *sock)
2412 struct sock *sk = sock->sk;
2413 struct packet_sock *po;
2415 union tpacket_req_u req_u;
2423 spin_lock_bh(&net->packet.sklist_lock);
2424 sk_del_node_init_rcu(sk);
2425 sock_prot_inuse_add(net, sk->sk_prot, -1);
2426 spin_unlock_bh(&net->packet.sklist_lock);
2428 spin_lock(&po->bind_lock);
2429 unregister_prot_hook(sk, false);
2430 if (po->prot_hook.dev) {
2431 dev_put(po->prot_hook.dev);
2432 po->prot_hook.dev = NULL;
2434 spin_unlock(&po->bind_lock);
2436 packet_flush_mclist(sk);
2438 memset(&req_u, 0, sizeof(req_u));
2440 if (po->rx_ring.pg_vec)
2441 packet_set_ring(sk, &req_u, 1, 0);
2443 if (po->tx_ring.pg_vec)
2444 packet_set_ring(sk, &req_u, 1, 1);
2450 * Now the socket is dead. No more input will appear.
2457 skb_queue_purge(&sk->sk_receive_queue);
2458 sk_refcnt_debug_release(sk);
2465 * Attach a packet hook.
2468 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
2470 struct packet_sock *po = pkt_sk(sk);
2481 spin_lock(&po->bind_lock);
2482 unregister_prot_hook(sk, true);
2484 po->prot_hook.type = protocol;
2485 if (po->prot_hook.dev)
2486 dev_put(po->prot_hook.dev);
2487 po->prot_hook.dev = dev;
2489 po->ifindex = dev ? dev->ifindex : 0;
2494 if (!dev || (dev->flags & IFF_UP)) {
2495 register_prot_hook(sk);
2497 sk->sk_err = ENETDOWN;
2498 if (!sock_flag(sk, SOCK_DEAD))
2499 sk->sk_error_report(sk);
2503 spin_unlock(&po->bind_lock);
2509 * Bind a packet socket to a device
2512 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2515 struct sock *sk = sock->sk;
2517 struct net_device *dev;
2524 if (addr_len != sizeof(struct sockaddr))
2526 strlcpy(name, uaddr->sa_data, sizeof(name));
2528 dev = dev_get_by_name(sock_net(sk), name);
2530 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
2534 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2536 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2537 struct sock *sk = sock->sk;
2538 struct net_device *dev = NULL;
2546 if (addr_len < sizeof(struct sockaddr_ll))
2548 if (sll->sll_family != AF_PACKET)
2551 if (sll->sll_ifindex) {
2553 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
2557 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
2563 static struct proto packet_proto = {
2565 .owner = THIS_MODULE,
2566 .obj_size = sizeof(struct packet_sock),
2570 * Create a packet of type SOCK_PACKET.
2573 static int packet_create(struct net *net, struct socket *sock, int protocol,
2577 struct packet_sock *po;
2578 __be16 proto = (__force __be16)protocol; /* weird, but documented */
2581 if (!capable(CAP_NET_RAW))
2583 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2584 sock->type != SOCK_PACKET)
2585 return -ESOCKTNOSUPPORT;
2587 sock->state = SS_UNCONNECTED;
2590 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
2594 sock->ops = &packet_ops;
2595 if (sock->type == SOCK_PACKET)
2596 sock->ops = &packet_ops_spkt;
2598 sock_init_data(sock, sk);
2601 sk->sk_family = PF_PACKET;
2604 sk->sk_destruct = packet_sock_destruct;
2605 sk_refcnt_debug_inc(sk);
2608 * Attach a protocol block
2611 spin_lock_init(&po->bind_lock);
2612 mutex_init(&po->pg_vec_lock);
2613 po->prot_hook.func = packet_rcv;
2615 if (sock->type == SOCK_PACKET)
2616 po->prot_hook.func = packet_rcv_spkt;
2618 po->prot_hook.af_packet_priv = sk;
2621 po->prot_hook.type = proto;
2622 register_prot_hook(sk);
2625 spin_lock_bh(&net->packet.sklist_lock);
2626 sk_add_node_rcu(sk, &net->packet.sklist);
2627 sock_prot_inuse_add(net, &packet_proto, 1);
2628 spin_unlock_bh(&net->packet.sklist_lock);
2635 static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
2637 struct sock_exterr_skb *serr;
2638 struct sk_buff *skb, *skb2;
2642 skb = skb_dequeue(&sk->sk_error_queue);
2648 msg->msg_flags |= MSG_TRUNC;
2651 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2655 sock_recv_timestamp(msg, sk, skb);
2657 serr = SKB_EXT_ERR(skb);
2658 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
2659 sizeof(serr->ee), &serr->ee);
2661 msg->msg_flags |= MSG_ERRQUEUE;
2664 /* Reset and regenerate socket error */
2665 spin_lock_bh(&sk->sk_error_queue.lock);
2667 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2668 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2669 spin_unlock_bh(&sk->sk_error_queue.lock);
2670 sk->sk_error_report(sk);
2672 spin_unlock_bh(&sk->sk_error_queue.lock);
2681 * Pull a packet from our receive queue and hand it to the user.
2682 * If necessary we block.
2685 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
2686 struct msghdr *msg, size_t len, int flags)
2688 struct sock *sk = sock->sk;
2689 struct sk_buff *skb;
2691 struct sockaddr_ll *sll;
2692 int vnet_hdr_len = 0;
2695 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
2699 /* What error should we return now? EUNATTACH? */
2700 if (pkt_sk(sk)->ifindex < 0)
2704 if (flags & MSG_ERRQUEUE) {
2705 err = packet_recv_error(sk, msg, len);
2710 * Call the generic datagram receiver. This handles all sorts
2711 * of horrible races and re-entrancy so we can forget about it
2712 * in the protocol layers.
2714 * Now it will return ENETDOWN, if device have just gone down,
2715 * but then it will block.
2718 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
2721 * An error occurred so return it. Because skb_recv_datagram()
2722 * handles the blocking we don't see and worry about blocking
2729 if (pkt_sk(sk)->has_vnet_hdr) {
2730 struct virtio_net_hdr vnet_hdr = { 0 };
2733 vnet_hdr_len = sizeof(vnet_hdr);
2734 if (len < vnet_hdr_len)
2737 len -= vnet_hdr_len;
2739 if (skb_is_gso(skb)) {
2740 struct skb_shared_info *sinfo = skb_shinfo(skb);
2742 /* This is a hint as to how much should be linear. */
2743 vnet_hdr.hdr_len = skb_headlen(skb);
2744 vnet_hdr.gso_size = sinfo->gso_size;
2745 if (sinfo->gso_type & SKB_GSO_TCPV4)
2746 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2747 else if (sinfo->gso_type & SKB_GSO_TCPV6)
2748 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2749 else if (sinfo->gso_type & SKB_GSO_UDP)
2750 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2751 else if (sinfo->gso_type & SKB_GSO_FCOE)
2755 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2756 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2758 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
2760 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2761 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
2762 vnet_hdr.csum_start = skb_checksum_start_offset(skb);
2763 vnet_hdr.csum_offset = skb->csum_offset;
2764 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2765 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
2766 } /* else everything is zero */
2768 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
2775 * If the address length field is there to be filled in, we fill
2779 sll = &PACKET_SKB_CB(skb)->sa.ll;
2780 if (sock->type == SOCK_PACKET)
2781 msg->msg_namelen = sizeof(struct sockaddr_pkt);
2783 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
2786 * You lose any data beyond the buffer you gave. If it worries a
2787 * user program they can ask the device for its MTU anyway.
2793 msg->msg_flags |= MSG_TRUNC;
2796 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2800 sock_recv_ts_and_drops(msg, sk, skb);
2803 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
2806 if (pkt_sk(sk)->auxdata) {
2807 struct tpacket_auxdata aux;
2809 aux.tp_status = TP_STATUS_USER;
2810 if (skb->ip_summed == CHECKSUM_PARTIAL)
2811 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
2812 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
2813 aux.tp_snaplen = skb->len;
2815 aux.tp_net = skb_network_offset(skb);
2816 if (vlan_tx_tag_present(skb)) {
2817 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
2818 aux.tp_status |= TP_STATUS_VLAN_VALID;
2820 aux.tp_vlan_tci = 0;
2823 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
2827 * Free or return the buffer as appropriate. Again this
2828 * hides all the races and re-entrancy issues from us.
2830 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
2833 skb_free_datagram(sk, skb);
2838 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
2839 int *uaddr_len, int peer)
2841 struct net_device *dev;
2842 struct sock *sk = sock->sk;
2847 uaddr->sa_family = AF_PACKET;
2849 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
2851 strncpy(uaddr->sa_data, dev->name, 14);
2853 memset(uaddr->sa_data, 0, 14);
2855 *uaddr_len = sizeof(*uaddr);
2860 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
2861 int *uaddr_len, int peer)
2863 struct net_device *dev;
2864 struct sock *sk = sock->sk;
2865 struct packet_sock *po = pkt_sk(sk);
2866 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
2871 sll->sll_family = AF_PACKET;
2872 sll->sll_ifindex = po->ifindex;
2873 sll->sll_protocol = po->num;
2874 sll->sll_pkttype = 0;
2876 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
2878 sll->sll_hatype = dev->type;
2879 sll->sll_halen = dev->addr_len;
2880 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
2882 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
2886 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
2891 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
2895 case PACKET_MR_MULTICAST:
2896 if (i->alen != dev->addr_len)
2899 return dev_mc_add(dev, i->addr);
2901 return dev_mc_del(dev, i->addr);
2903 case PACKET_MR_PROMISC:
2904 return dev_set_promiscuity(dev, what);
2906 case PACKET_MR_ALLMULTI:
2907 return dev_set_allmulti(dev, what);
2909 case PACKET_MR_UNICAST:
2910 if (i->alen != dev->addr_len)
2913 return dev_uc_add(dev, i->addr);
2915 return dev_uc_del(dev, i->addr);
2923 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
2925 for ( ; i; i = i->next) {
2926 if (i->ifindex == dev->ifindex)
2927 packet_dev_mc(dev, i, what);
2931 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
2933 struct packet_sock *po = pkt_sk(sk);
2934 struct packet_mclist *ml, *i;
2935 struct net_device *dev;
2941 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
2946 if (mreq->mr_alen > dev->addr_len)
2950 i = kmalloc(sizeof(*i), GFP_KERNEL);
2955 for (ml = po->mclist; ml; ml = ml->next) {
2956 if (ml->ifindex == mreq->mr_ifindex &&
2957 ml->type == mreq->mr_type &&
2958 ml->alen == mreq->mr_alen &&
2959 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2961 /* Free the new element ... */
2967 i->type = mreq->mr_type;
2968 i->ifindex = mreq->mr_ifindex;
2969 i->alen = mreq->mr_alen;
2970 memcpy(i->addr, mreq->mr_address, i->alen);
2972 i->next = po->mclist;
2974 err = packet_dev_mc(dev, i, 1);
2976 po->mclist = i->next;
2985 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
2987 struct packet_mclist *ml, **mlp;
2991 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
2992 if (ml->ifindex == mreq->mr_ifindex &&
2993 ml->type == mreq->mr_type &&
2994 ml->alen == mreq->mr_alen &&
2995 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2996 if (--ml->count == 0) {
2997 struct net_device *dev;
2999 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3001 packet_dev_mc(dev, ml, -1);
3009 return -EADDRNOTAVAIL;
3012 static void packet_flush_mclist(struct sock *sk)
3014 struct packet_sock *po = pkt_sk(sk);
3015 struct packet_mclist *ml;
3021 while ((ml = po->mclist) != NULL) {
3022 struct net_device *dev;
3024 po->mclist = ml->next;
3025 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3027 packet_dev_mc(dev, ml, -1);
3034 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
3036 struct sock *sk = sock->sk;
3037 struct packet_sock *po = pkt_sk(sk);
3040 if (level != SOL_PACKET)
3041 return -ENOPROTOOPT;
3044 case PACKET_ADD_MEMBERSHIP:
3045 case PACKET_DROP_MEMBERSHIP:
3047 struct packet_mreq_max mreq;
3049 memset(&mreq, 0, sizeof(mreq));
3050 if (len < sizeof(struct packet_mreq))
3052 if (len > sizeof(mreq))
3054 if (copy_from_user(&mreq, optval, len))
3056 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3058 if (optname == PACKET_ADD_MEMBERSHIP)
3059 ret = packet_mc_add(sk, &mreq);
3061 ret = packet_mc_drop(sk, &mreq);
3065 case PACKET_RX_RING:
3066 case PACKET_TX_RING:
3068 union tpacket_req_u req_u;
3071 switch (po->tp_version) {
3074 len = sizeof(req_u.req);
3078 len = sizeof(req_u.req3);
3083 if (pkt_sk(sk)->has_vnet_hdr)
3085 if (copy_from_user(&req_u.req, optval, len))
3087 return packet_set_ring(sk, &req_u, 0,
3088 optname == PACKET_TX_RING);
3090 case PACKET_COPY_THRESH:
3094 if (optlen != sizeof(val))
3096 if (copy_from_user(&val, optval, sizeof(val)))
3099 pkt_sk(sk)->copy_thresh = val;
3102 case PACKET_VERSION:
3106 if (optlen != sizeof(val))
3108 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3110 if (copy_from_user(&val, optval, sizeof(val)))
3116 po->tp_version = val;
3122 case PACKET_RESERVE:
3126 if (optlen != sizeof(val))
3128 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3130 if (copy_from_user(&val, optval, sizeof(val)))
3132 po->tp_reserve = val;
3139 if (optlen != sizeof(val))
3141 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3143 if (copy_from_user(&val, optval, sizeof(val)))
3145 po->tp_loss = !!val;
3148 case PACKET_AUXDATA:
3152 if (optlen < sizeof(val))
3154 if (copy_from_user(&val, optval, sizeof(val)))
3157 po->auxdata = !!val;
3160 case PACKET_ORIGDEV:
3164 if (optlen < sizeof(val))
3166 if (copy_from_user(&val, optval, sizeof(val)))
3169 po->origdev = !!val;
3172 case PACKET_VNET_HDR:
3176 if (sock->type != SOCK_RAW)
3178 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3180 if (optlen < sizeof(val))
3182 if (copy_from_user(&val, optval, sizeof(val)))
3185 po->has_vnet_hdr = !!val;
3188 case PACKET_TIMESTAMP:
3192 if (optlen != sizeof(val))
3194 if (copy_from_user(&val, optval, sizeof(val)))
3197 po->tp_tstamp = val;
3204 if (optlen != sizeof(val))
3206 if (copy_from_user(&val, optval, sizeof(val)))
3209 return fanout_add(sk, val & 0xffff, val >> 16);
3212 return -ENOPROTOOPT;
3216 static int packet_getsockopt(struct socket *sock, int level, int optname,
3217 char __user *optval, int __user *optlen)
3220 int val, lv = sizeof(val);
3221 struct sock *sk = sock->sk;
3222 struct packet_sock *po = pkt_sk(sk);
3224 struct tpacket_stats st;
3225 union tpacket_stats_u st_u;
3227 if (level != SOL_PACKET)
3228 return -ENOPROTOOPT;
3230 if (get_user(len, optlen))
3237 case PACKET_STATISTICS:
3238 spin_lock_bh(&sk->sk_receive_queue.lock);
3239 if (po->tp_version == TPACKET_V3) {
3240 lv = sizeof(struct tpacket_stats_v3);
3241 memcpy(&st_u.stats3, &po->stats,
3242 sizeof(struct tpacket_stats));
3243 st_u.stats3.tp_freeze_q_cnt =
3244 po->stats_u.stats3.tp_freeze_q_cnt;
3245 st_u.stats3.tp_packets += po->stats.tp_drops;
3246 data = &st_u.stats3;
3248 lv = sizeof(struct tpacket_stats);
3250 st.tp_packets += st.tp_drops;
3253 memset(&po->stats, 0, sizeof(st));
3254 spin_unlock_bh(&sk->sk_receive_queue.lock);
3256 case PACKET_AUXDATA:
3259 case PACKET_ORIGDEV:
3262 case PACKET_VNET_HDR:
3263 val = po->has_vnet_hdr;
3265 case PACKET_VERSION:
3266 val = po->tp_version;
3269 if (len > sizeof(int))
3271 if (copy_from_user(&val, optval, len))
3275 val = sizeof(struct tpacket_hdr);
3278 val = sizeof(struct tpacket2_hdr);
3281 val = sizeof(struct tpacket3_hdr);
3287 case PACKET_RESERVE:
3288 val = po->tp_reserve;
3293 case PACKET_TIMESTAMP:
3294 val = po->tp_tstamp;
3298 ((u32)po->fanout->id |
3299 ((u32)po->fanout->type << 16)) :
3303 return -ENOPROTOOPT;
3308 if (put_user(len, optlen))
3310 if (copy_to_user(optval, data, len))
3316 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
3319 struct hlist_node *node;
3320 struct net_device *dev = data;
3321 struct net *net = dev_net(dev);
3324 sk_for_each_rcu(sk, node, &net->packet.sklist) {
3325 struct packet_sock *po = pkt_sk(sk);
3328 case NETDEV_UNREGISTER:
3330 packet_dev_mclist(dev, po->mclist, -1);
3334 if (dev->ifindex == po->ifindex) {
3335 spin_lock(&po->bind_lock);
3337 __unregister_prot_hook(sk, false);
3338 sk->sk_err = ENETDOWN;
3339 if (!sock_flag(sk, SOCK_DEAD))
3340 sk->sk_error_report(sk);
3342 if (msg == NETDEV_UNREGISTER) {
3344 if (po->prot_hook.dev)
3345 dev_put(po->prot_hook.dev);
3346 po->prot_hook.dev = NULL;
3348 spin_unlock(&po->bind_lock);
3352 if (dev->ifindex == po->ifindex) {
3353 spin_lock(&po->bind_lock);
3355 register_prot_hook(sk);
3356 spin_unlock(&po->bind_lock);
3366 static int packet_ioctl(struct socket *sock, unsigned int cmd,
3369 struct sock *sk = sock->sk;
3374 int amount = sk_wmem_alloc_get(sk);
3376 return put_user(amount, (int __user *)arg);
3380 struct sk_buff *skb;
3383 spin_lock_bh(&sk->sk_receive_queue.lock);
3384 skb = skb_peek(&sk->sk_receive_queue);
3387 spin_unlock_bh(&sk->sk_receive_queue.lock);
3388 return put_user(amount, (int __user *)arg);
3391 return sock_get_timestamp(sk, (struct timeval __user *)arg);
3393 return sock_get_timestampns(sk, (struct timespec __user *)arg);
3403 case SIOCGIFBRDADDR:
3404 case SIOCSIFBRDADDR:
3405 case SIOCGIFNETMASK:
3406 case SIOCSIFNETMASK:
3407 case SIOCGIFDSTADDR:
3408 case SIOCSIFDSTADDR:
3410 return inet_dgram_ops.ioctl(sock, cmd, arg);
3414 return -ENOIOCTLCMD;
3419 static unsigned int packet_poll(struct file *file, struct socket *sock,
3422 struct sock *sk = sock->sk;
3423 struct packet_sock *po = pkt_sk(sk);
3424 unsigned int mask = datagram_poll(file, sock, wait);
3426 spin_lock_bh(&sk->sk_receive_queue.lock);
3427 if (po->rx_ring.pg_vec) {
3428 if (!packet_previous_rx_frame(po, &po->rx_ring,
3430 mask |= POLLIN | POLLRDNORM;
3432 spin_unlock_bh(&sk->sk_receive_queue.lock);
3433 spin_lock_bh(&sk->sk_write_queue.lock);
3434 if (po->tx_ring.pg_vec) {
3435 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3436 mask |= POLLOUT | POLLWRNORM;
3438 spin_unlock_bh(&sk->sk_write_queue.lock);
3443 /* Dirty? Well, I still did not learn better way to account
3447 static void packet_mm_open(struct vm_area_struct *vma)
3449 struct file *file = vma->vm_file;
3450 struct socket *sock = file->private_data;
3451 struct sock *sk = sock->sk;
3454 atomic_inc(&pkt_sk(sk)->mapped);
3457 static void packet_mm_close(struct vm_area_struct *vma)
3459 struct file *file = vma->vm_file;
3460 struct socket *sock = file->private_data;
3461 struct sock *sk = sock->sk;
3464 atomic_dec(&pkt_sk(sk)->mapped);
3467 static const struct vm_operations_struct packet_mmap_ops = {
3468 .open = packet_mm_open,
3469 .close = packet_mm_close,
3472 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3477 for (i = 0; i < len; i++) {
3478 if (likely(pg_vec[i].buffer)) {
3479 if (is_vmalloc_addr(pg_vec[i].buffer))
3480 vfree(pg_vec[i].buffer);
3482 free_pages((unsigned long)pg_vec[i].buffer,
3484 pg_vec[i].buffer = NULL;
3490 static char *alloc_one_pg_vec_page(unsigned long order)
3492 char *buffer = NULL;
3493 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3494 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
3496 buffer = (char *) __get_free_pages(gfp_flags, order);
3502 * __get_free_pages failed, fall back to vmalloc
3504 buffer = vzalloc((1 << order) * PAGE_SIZE);
3510 * vmalloc failed, lets dig into swap here
3512 gfp_flags &= ~__GFP_NORETRY;
3513 buffer = (char *)__get_free_pages(gfp_flags, order);
3518 * complete and utter failure
3523 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
3525 unsigned int block_nr = req->tp_block_nr;
3529 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
3530 if (unlikely(!pg_vec))
3533 for (i = 0; i < block_nr; i++) {
3534 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
3535 if (unlikely(!pg_vec[i].buffer))
3536 goto out_free_pgvec;
3543 free_pg_vec(pg_vec, order, block_nr);
3548 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3549 int closing, int tx_ring)
3551 struct pgv *pg_vec = NULL;
3552 struct packet_sock *po = pkt_sk(sk);
3553 int was_running, order = 0;
3554 struct packet_ring_buffer *rb;
3555 struct sk_buff_head *rb_queue;
3558 /* Added to avoid minimal code churn */
3559 struct tpacket_req *req = &req_u->req;
3561 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3562 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3563 WARN(1, "Tx-ring is not supported.\n");
3567 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3568 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
3572 if (atomic_read(&po->mapped))
3574 if (atomic_read(&rb->pending))
3578 if (req->tp_block_nr) {
3579 /* Sanity tests and some calculations */
3581 if (unlikely(rb->pg_vec))
3584 switch (po->tp_version) {
3586 po->tp_hdrlen = TPACKET_HDRLEN;
3589 po->tp_hdrlen = TPACKET2_HDRLEN;
3592 po->tp_hdrlen = TPACKET3_HDRLEN;
3597 if (unlikely((int)req->tp_block_size <= 0))
3599 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
3601 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
3604 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
3607 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3608 if (unlikely(rb->frames_per_block <= 0))
3610 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3615 order = get_order(req->tp_block_size);
3616 pg_vec = alloc_pg_vec(req, order);
3617 if (unlikely(!pg_vec))
3619 switch (po->tp_version) {
3621 /* Transmit path is not supported. We checked
3622 * it above but just being paranoid
3625 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3634 if (unlikely(req->tp_frame_nr))
3640 /* Detach socket from network */
3641 spin_lock(&po->bind_lock);
3642 was_running = po->running;
3646 __unregister_prot_hook(sk, false);
3648 spin_unlock(&po->bind_lock);
3653 mutex_lock(&po->pg_vec_lock);
3654 if (closing || atomic_read(&po->mapped) == 0) {
3656 spin_lock_bh(&rb_queue->lock);
3657 swap(rb->pg_vec, pg_vec);
3658 rb->frame_max = (req->tp_frame_nr - 1);
3660 rb->frame_size = req->tp_frame_size;
3661 spin_unlock_bh(&rb_queue->lock);
3663 swap(rb->pg_vec_order, order);
3664 swap(rb->pg_vec_len, req->tp_block_nr);
3666 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
3667 po->prot_hook.func = (po->rx_ring.pg_vec) ?
3668 tpacket_rcv : packet_rcv;
3669 skb_queue_purge(rb_queue);
3670 if (atomic_read(&po->mapped))
3671 pr_err("packet_mmap: vma is busy: %d\n",
3672 atomic_read(&po->mapped));
3674 mutex_unlock(&po->pg_vec_lock);
3676 spin_lock(&po->bind_lock);
3679 register_prot_hook(sk);
3681 spin_unlock(&po->bind_lock);
3682 if (closing && (po->tp_version > TPACKET_V2)) {
3683 /* Because we don't support block-based V3 on tx-ring */
3685 prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3690 free_pg_vec(pg_vec, order, req->tp_block_nr);
3695 static int packet_mmap(struct file *file, struct socket *sock,
3696 struct vm_area_struct *vma)
3698 struct sock *sk = sock->sk;
3699 struct packet_sock *po = pkt_sk(sk);
3700 unsigned long size, expected_size;
3701 struct packet_ring_buffer *rb;
3702 unsigned long start;
3709 mutex_lock(&po->pg_vec_lock);
3712 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3714 expected_size += rb->pg_vec_len
3720 if (expected_size == 0)
3723 size = vma->vm_end - vma->vm_start;
3724 if (size != expected_size)
3727 start = vma->vm_start;
3728 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3729 if (rb->pg_vec == NULL)
3732 for (i = 0; i < rb->pg_vec_len; i++) {
3734 void *kaddr = rb->pg_vec[i].buffer;
3737 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
3738 page = pgv_to_page(kaddr);
3739 err = vm_insert_page(vma, start, page);
3748 atomic_inc(&po->mapped);
3749 vma->vm_ops = &packet_mmap_ops;
3753 mutex_unlock(&po->pg_vec_lock);
3757 static const struct proto_ops packet_ops_spkt = {
3758 .family = PF_PACKET,
3759 .owner = THIS_MODULE,
3760 .release = packet_release,
3761 .bind = packet_bind_spkt,
3762 .connect = sock_no_connect,
3763 .socketpair = sock_no_socketpair,
3764 .accept = sock_no_accept,
3765 .getname = packet_getname_spkt,
3766 .poll = datagram_poll,
3767 .ioctl = packet_ioctl,
3768 .listen = sock_no_listen,
3769 .shutdown = sock_no_shutdown,
3770 .setsockopt = sock_no_setsockopt,
3771 .getsockopt = sock_no_getsockopt,
3772 .sendmsg = packet_sendmsg_spkt,
3773 .recvmsg = packet_recvmsg,
3774 .mmap = sock_no_mmap,
3775 .sendpage = sock_no_sendpage,
3778 static const struct proto_ops packet_ops = {
3779 .family = PF_PACKET,
3780 .owner = THIS_MODULE,
3781 .release = packet_release,
3782 .bind = packet_bind,
3783 .connect = sock_no_connect,
3784 .socketpair = sock_no_socketpair,
3785 .accept = sock_no_accept,
3786 .getname = packet_getname,
3787 .poll = packet_poll,
3788 .ioctl = packet_ioctl,
3789 .listen = sock_no_listen,
3790 .shutdown = sock_no_shutdown,
3791 .setsockopt = packet_setsockopt,
3792 .getsockopt = packet_getsockopt,
3793 .sendmsg = packet_sendmsg,
3794 .recvmsg = packet_recvmsg,
3795 .mmap = packet_mmap,
3796 .sendpage = sock_no_sendpage,
3799 static const struct net_proto_family packet_family_ops = {
3800 .family = PF_PACKET,
3801 .create = packet_create,
3802 .owner = THIS_MODULE,
3805 static struct notifier_block packet_netdev_notifier = {
3806 .notifier_call = packet_notifier,
3809 #ifdef CONFIG_PROC_FS
3811 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
3814 struct net *net = seq_file_net(seq);
3817 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
3820 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3822 struct net *net = seq_file_net(seq);
3823 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
3826 static void packet_seq_stop(struct seq_file *seq, void *v)
3832 static int packet_seq_show(struct seq_file *seq, void *v)
3834 if (v == SEQ_START_TOKEN)
3835 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
3837 struct sock *s = sk_entry(v);
3838 const struct packet_sock *po = pkt_sk(s);
3841 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
3843 atomic_read(&s->sk_refcnt),
3848 atomic_read(&s->sk_rmem_alloc),
3856 static const struct seq_operations packet_seq_ops = {
3857 .start = packet_seq_start,
3858 .next = packet_seq_next,
3859 .stop = packet_seq_stop,
3860 .show = packet_seq_show,
3863 static int packet_seq_open(struct inode *inode, struct file *file)
3865 return seq_open_net(inode, file, &packet_seq_ops,
3866 sizeof(struct seq_net_private));
3869 static const struct file_operations packet_seq_fops = {
3870 .owner = THIS_MODULE,
3871 .open = packet_seq_open,
3873 .llseek = seq_lseek,
3874 .release = seq_release_net,
3879 static int __net_init packet_net_init(struct net *net)
3881 spin_lock_init(&net->packet.sklist_lock);
3882 INIT_HLIST_HEAD(&net->packet.sklist);
3884 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
3890 static void __net_exit packet_net_exit(struct net *net)
3892 proc_net_remove(net, "packet");
3895 static struct pernet_operations packet_net_ops = {
3896 .init = packet_net_init,
3897 .exit = packet_net_exit,
3901 static void __exit packet_exit(void)
3903 unregister_netdevice_notifier(&packet_netdev_notifier);
3904 unregister_pernet_subsys(&packet_net_ops);
3905 sock_unregister(PF_PACKET);
3906 proto_unregister(&packet_proto);
3909 static int __init packet_init(void)
3911 int rc = proto_register(&packet_proto, 0);
3916 sock_register(&packet_family_ops);
3917 register_pernet_subsys(&packet_net_ops);
3918 register_netdevice_notifier(&packet_netdev_notifier);
3923 module_init(packet_init);
3924 module_exit(packet_exit);
3925 MODULE_LICENSE("GPL");
3926 MODULE_ALIAS_NETPROTO(PF_PACKET);