2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
35 * Ulises Alonso : Frame number limit removal and
36 * packet_set_ring memory leak.
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll
42 * Johann Baudy : Added TX RING.
43 * Chetan Loke : Implemented TPACKET_V3 block abstraction
45 * Copyright (C) 2011, <lokec@ccs.neu.edu>
48 * This program is free software; you can redistribute it and/or
49 * modify it under the terms of the GNU General Public License
50 * as published by the Free Software Foundation; either version
51 * 2 of the License, or (at your option) any later version.
55 #include <linux/types.h>
57 #include <linux/capability.h>
58 #include <linux/fcntl.h>
59 #include <linux/socket.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/if_packet.h>
64 #include <linux/wireless.h>
65 #include <linux/kernel.h>
66 #include <linux/kmod.h>
67 #include <linux/slab.h>
68 #include <linux/vmalloc.h>
69 #include <net/net_namespace.h>
71 #include <net/protocol.h>
72 #include <linux/skbuff.h>
74 #include <linux/errno.h>
75 #include <linux/timer.h>
76 #include <asm/uaccess.h>
77 #include <asm/ioctls.h>
79 #include <asm/cacheflush.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/poll.h>
84 #include <linux/module.h>
85 #include <linux/init.h>
86 #include <linux/mutex.h>
87 #include <linux/if_vlan.h>
88 #include <linux/virtio_net.h>
89 #include <linux/errqueue.h>
90 #include <linux/net_tstamp.h>
93 #include <net/inet_common.h>
100 - if device has no dev->hard_header routine, it adds and removes ll header
101 inside itself. In this case ll header is invisible outside of device,
102 but higher levels still should reserve dev->hard_header_len.
103 Some devices are enough clever to reallocate skb, when header
104 will not fit to reserved space (tunnel), another ones are silly
106 - packet socket receives packets with pulled ll header,
107 so that SOCK_RAW should push it back.
112 Incoming, dev->hard_header!=NULL
113 mac_header -> ll header
116 Outgoing, dev->hard_header!=NULL
117 mac_header -> ll header
120 Incoming, dev->hard_header==NULL
121 mac_header -> UNKNOWN position. It is very likely, that it points to ll
122 header. PPP makes it, that is wrong, because introduce
123 assymetry between rx and tx paths.
126 Outgoing, dev->hard_header==NULL
127 mac_header -> data. ll header is still not built!
131 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
137 dev->hard_header != NULL
138 mac_header -> ll header
141 dev->hard_header == NULL (ll header is added by device, we cannot control it)
145 We should set nh.raw on output to correct posistion,
146 packet classifier depends on it.
149 /* Private packet socket structures. */
151 /* identical to struct packet_mreq except it has
152 * a longer address field.
154 struct packet_mreq_max {
156 unsigned short mr_type;
157 unsigned short mr_alen;
158 unsigned char mr_address[MAX_ADDR_LEN];
161 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
162 int closing, int tx_ring);
165 #define V3_ALIGNMENT (8)
167 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
169 #define BLK_PLUS_PRIV(sz_of_priv) \
170 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
172 #define PGV_FROM_VMALLOC 1
174 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
175 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
176 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
177 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
178 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
179 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
180 #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
183 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
185 static void *packet_previous_frame(struct packet_sock *po,
186 struct packet_ring_buffer *rb,
188 static void packet_increment_head(struct packet_ring_buffer *buff);
189 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
190 struct tpacket_block_desc *);
191 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
192 struct packet_sock *);
193 static void prb_retire_current_block(struct tpacket_kbdq_core *,
194 struct packet_sock *, unsigned int status);
195 static int prb_queue_frozen(struct tpacket_kbdq_core *);
196 static void prb_open_block(struct tpacket_kbdq_core *,
197 struct tpacket_block_desc *);
198 static void prb_retire_rx_blk_timer_expired(unsigned long);
199 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
200 static void prb_init_blk_timer(struct packet_sock *,
201 struct tpacket_kbdq_core *,
202 void (*func) (unsigned long));
203 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
204 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
205 struct tpacket3_hdr *);
206 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
207 struct tpacket3_hdr *);
208 static void packet_flush_mclist(struct sock *sk);
210 struct packet_skb_cb {
211 unsigned int origlen;
213 struct sockaddr_pkt pkt;
214 struct sockaddr_ll ll;
218 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
220 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
221 #define GET_PBLOCK_DESC(x, bid) \
222 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
223 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
224 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
225 #define GET_NEXT_PRB_BLK_NUM(x) \
226 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
227 ((x)->kactive_blk_num+1) : 0)
229 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
230 static void __fanout_link(struct sock *sk, struct packet_sock *po);
232 /* register_prot_hook must be invoked with the po->bind_lock held,
233 * or from a context in which asynchronous accesses to the packet
234 * socket is not possible (packet_create()).
236 static void register_prot_hook(struct sock *sk)
238 struct packet_sock *po = pkt_sk(sk);
241 __fanout_link(sk, po);
243 dev_add_pack(&po->prot_hook);
249 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
250 * held. If the sync parameter is true, we will temporarily drop
251 * the po->bind_lock and do a synchronize_net to make sure no
252 * asynchronous packet processing paths still refer to the elements
253 * of po->prot_hook. If the sync parameter is false, it is the
254 * callers responsibility to take care of this.
256 static void __unregister_prot_hook(struct sock *sk, bool sync)
258 struct packet_sock *po = pkt_sk(sk);
262 __fanout_unlink(sk, po);
264 __dev_remove_pack(&po->prot_hook);
268 spin_unlock(&po->bind_lock);
270 spin_lock(&po->bind_lock);
274 static void unregister_prot_hook(struct sock *sk, bool sync)
276 struct packet_sock *po = pkt_sk(sk);
279 __unregister_prot_hook(sk, sync);
282 static inline __pure struct page *pgv_to_page(void *addr)
284 if (is_vmalloc_addr(addr))
285 return vmalloc_to_page(addr);
286 return virt_to_page(addr);
289 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
292 struct tpacket_hdr *h1;
293 struct tpacket2_hdr *h2;
298 switch (po->tp_version) {
300 h.h1->tp_status = status;
301 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
304 h.h2->tp_status = status;
305 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
309 WARN(1, "TPACKET version not supported.\n");
316 static int __packet_get_status(struct packet_sock *po, void *frame)
319 struct tpacket_hdr *h1;
320 struct tpacket2_hdr *h2;
327 switch (po->tp_version) {
329 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
330 return h.h1->tp_status;
332 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
333 return h.h2->tp_status;
336 WARN(1, "TPACKET version not supported.\n");
342 static void *packet_lookup_frame(struct packet_sock *po,
343 struct packet_ring_buffer *rb,
344 unsigned int position,
347 unsigned int pg_vec_pos, frame_offset;
349 struct tpacket_hdr *h1;
350 struct tpacket2_hdr *h2;
354 pg_vec_pos = position / rb->frames_per_block;
355 frame_offset = position % rb->frames_per_block;
357 h.raw = rb->pg_vec[pg_vec_pos].buffer +
358 (frame_offset * rb->frame_size);
360 if (status != __packet_get_status(po, h.raw))
366 static void *packet_current_frame(struct packet_sock *po,
367 struct packet_ring_buffer *rb,
370 return packet_lookup_frame(po, rb, rb->head, status);
373 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
375 del_timer_sync(&pkc->retire_blk_timer);
378 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
380 struct sk_buff_head *rb_queue)
382 struct tpacket_kbdq_core *pkc;
384 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
386 spin_lock(&rb_queue->lock);
387 pkc->delete_blk_timer = 1;
388 spin_unlock(&rb_queue->lock);
390 prb_del_retire_blk_timer(pkc);
393 static void prb_init_blk_timer(struct packet_sock *po,
394 struct tpacket_kbdq_core *pkc,
395 void (*func) (unsigned long))
397 init_timer(&pkc->retire_blk_timer);
398 pkc->retire_blk_timer.data = (long)po;
399 pkc->retire_blk_timer.function = func;
400 pkc->retire_blk_timer.expires = jiffies;
403 static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
405 struct tpacket_kbdq_core *pkc;
410 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
411 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
414 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
415 int blk_size_in_bytes)
417 struct net_device *dev;
418 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
419 struct ethtool_cmd ecmd;
424 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
425 if (unlikely(!dev)) {
427 return DEFAULT_PRB_RETIRE_TOV;
429 err = __ethtool_get_settings(dev, &ecmd);
430 speed = ethtool_cmd_speed(&ecmd);
434 * If the link speed is so slow you don't really
435 * need to worry about perf anyways
437 if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
438 return DEFAULT_PRB_RETIRE_TOV;
445 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
457 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
458 union tpacket_req_u *req_u)
460 p1->feature_req_word = req_u->req3.tp_feature_req_word;
463 static void init_prb_bdqc(struct packet_sock *po,
464 struct packet_ring_buffer *rb,
466 union tpacket_req_u *req_u, int tx_ring)
468 struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
469 struct tpacket_block_desc *pbd;
471 memset(p1, 0x0, sizeof(*p1));
473 p1->knxt_seq_num = 1;
475 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
476 p1->pkblk_start = pg_vec[0].buffer;
477 p1->kblk_size = req_u->req3.tp_block_size;
478 p1->knum_blocks = req_u->req3.tp_block_nr;
479 p1->hdrlen = po->tp_hdrlen;
480 p1->version = po->tp_version;
481 p1->last_kactive_blk_num = 0;
482 po->stats_u.stats3.tp_freeze_q_cnt = 0;
483 if (req_u->req3.tp_retire_blk_tov)
484 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
486 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
487 req_u->req3.tp_block_size);
488 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
489 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
491 prb_init_ft_ops(p1, req_u);
492 prb_setup_retire_blk_timer(po, tx_ring);
493 prb_open_block(p1, pbd);
496 /* Do NOT update the last_blk_num first.
497 * Assumes sk_buff_head lock is held.
499 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
501 mod_timer(&pkc->retire_blk_timer,
502 jiffies + pkc->tov_in_jiffies);
503 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
508 * 1) We refresh the timer only when we open a block.
509 * By doing this we don't waste cycles refreshing the timer
510 * on packet-by-packet basis.
512 * With a 1MB block-size, on a 1Gbps line, it will take
513 * i) ~8 ms to fill a block + ii) memcpy etc.
514 * In this cut we are not accounting for the memcpy time.
516 * So, if the user sets the 'tmo' to 10ms then the timer
517 * will never fire while the block is still getting filled
518 * (which is what we want). However, the user could choose
519 * to close a block early and that's fine.
521 * But when the timer does fire, we check whether or not to refresh it.
522 * Since the tmo granularity is in msecs, it is not too expensive
523 * to refresh the timer, lets say every '8' msecs.
524 * Either the user can set the 'tmo' or we can derive it based on
525 * a) line-speed and b) block-size.
526 * prb_calc_retire_blk_tmo() calculates the tmo.
529 static void prb_retire_rx_blk_timer_expired(unsigned long data)
531 struct packet_sock *po = (struct packet_sock *)data;
532 struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
534 struct tpacket_block_desc *pbd;
536 spin_lock(&po->sk.sk_receive_queue.lock);
538 frozen = prb_queue_frozen(pkc);
539 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
541 if (unlikely(pkc->delete_blk_timer))
544 /* We only need to plug the race when the block is partially filled.
546 * lock(); increment BLOCK_NUM_PKTS; unlock()
547 * copy_bits() is in progress ...
548 * timer fires on other cpu:
549 * we can't retire the current block because copy_bits
553 if (BLOCK_NUM_PKTS(pbd)) {
554 while (atomic_read(&pkc->blk_fill_in_prog)) {
555 /* Waiting for skb_copy_bits to finish... */
560 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
562 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
563 if (!prb_dispatch_next_block(pkc, po))
568 /* Case 1. Queue was frozen because user-space was
571 if (prb_curr_blk_in_use(pkc, pbd)) {
573 * Ok, user-space is still behind.
574 * So just refresh the timer.
578 /* Case 2. queue was frozen,user-space caught up,
579 * now the link went idle && the timer fired.
580 * We don't have a block to close.So we open this
581 * block and restart the timer.
582 * opening a block thaws the queue,restarts timer
583 * Thawing/timer-refresh is a side effect.
585 prb_open_block(pkc, pbd);
592 _prb_refresh_rx_retire_blk_timer(pkc);
595 spin_unlock(&po->sk.sk_receive_queue.lock);
598 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
599 struct tpacket_block_desc *pbd1, __u32 status)
601 /* Flush everything minus the block header */
603 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
608 /* Skip the block header(we know header WILL fit in 4K) */
611 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
612 for (; start < end; start += PAGE_SIZE)
613 flush_dcache_page(pgv_to_page(start));
618 /* Now update the block status. */
620 BLOCK_STATUS(pbd1) = status;
622 /* Flush the block header */
624 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
626 flush_dcache_page(pgv_to_page(start));
636 * 2) Increment active_blk_num
638 * Note:We DONT refresh the timer on purpose.
639 * Because almost always the next block will be opened.
641 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
642 struct tpacket_block_desc *pbd1,
643 struct packet_sock *po, unsigned int stat)
645 __u32 status = TP_STATUS_USER | stat;
647 struct tpacket3_hdr *last_pkt;
648 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
650 if (po->stats.tp_drops)
651 status |= TP_STATUS_LOSING;
653 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
654 last_pkt->tp_next_offset = 0;
656 /* Get the ts of the last pkt */
657 if (BLOCK_NUM_PKTS(pbd1)) {
658 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
659 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
661 /* Ok, we tmo'd - so get the current time */
664 h1->ts_last_pkt.ts_sec = ts.tv_sec;
665 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
670 /* Flush the block */
671 prb_flush_block(pkc1, pbd1, status);
673 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
676 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
678 pkc->reset_pending_on_curr_blk = 0;
682 * Side effect of opening a block:
684 * 1) prb_queue is thawed.
685 * 2) retire_blk_timer is refreshed.
688 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
689 struct tpacket_block_desc *pbd1)
692 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
696 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) {
698 /* We could have just memset this but we will lose the
699 * flexibility of making the priv area sticky
701 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
702 BLOCK_NUM_PKTS(pbd1) = 0;
703 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
705 h1->ts_first_pkt.ts_sec = ts.tv_sec;
706 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
707 pkc1->pkblk_start = (char *)pbd1;
708 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
709 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
710 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
711 pbd1->version = pkc1->version;
712 pkc1->prev = pkc1->nxt_offset;
713 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
714 prb_thaw_queue(pkc1);
715 _prb_refresh_rx_retire_blk_timer(pkc1);
722 WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n",
723 pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num);
729 * Queue freeze logic:
730 * 1) Assume tp_block_nr = 8 blocks.
731 * 2) At time 't0', user opens Rx ring.
732 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
733 * 4) user-space is either sleeping or processing block '0'.
734 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
735 * it will close block-7,loop around and try to fill block '0'.
737 * __packet_lookup_frame_in_block
738 * prb_retire_current_block()
739 * prb_dispatch_next_block()
740 * |->(BLOCK_STATUS == USER) evaluates to true
741 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
742 * 6) Now there are two cases:
743 * 6.1) Link goes idle right after the queue is frozen.
744 * But remember, the last open_block() refreshed the timer.
745 * When this timer expires,it will refresh itself so that we can
746 * re-open block-0 in near future.
747 * 6.2) Link is busy and keeps on receiving packets. This is a simple
748 * case and __packet_lookup_frame_in_block will check if block-0
749 * is free and can now be re-used.
751 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
752 struct packet_sock *po)
754 pkc->reset_pending_on_curr_blk = 1;
755 po->stats_u.stats3.tp_freeze_q_cnt++;
758 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
761 * If the next block is free then we will dispatch it
762 * and return a good offset.
763 * Else, we will freeze the queue.
764 * So, caller must check the return value.
766 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
767 struct packet_sock *po)
769 struct tpacket_block_desc *pbd;
773 /* 1. Get current block num */
774 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
776 /* 2. If this block is currently in_use then freeze the queue */
777 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
778 prb_freeze_queue(pkc, po);
784 * open this block and return the offset where the first packet
785 * needs to get stored.
787 prb_open_block(pkc, pbd);
788 return (void *)pkc->nxt_offset;
791 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
792 struct packet_sock *po, unsigned int status)
794 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
796 /* retire/close the current block */
797 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
799 * Plug the case where copy_bits() is in progress on
800 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
801 * have space to copy the pkt in the current block and
802 * called prb_retire_current_block()
804 * We don't need to worry about the TMO case because
805 * the timer-handler already handled this case.
807 if (!(status & TP_STATUS_BLK_TMO)) {
808 while (atomic_read(&pkc->blk_fill_in_prog)) {
809 /* Waiting for skb_copy_bits to finish... */
813 prb_close_block(pkc, pbd, po, status);
817 WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
822 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
823 struct tpacket_block_desc *pbd)
825 return TP_STATUS_USER & BLOCK_STATUS(pbd);
828 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
830 return pkc->reset_pending_on_curr_blk;
833 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
835 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
836 atomic_dec(&pkc->blk_fill_in_prog);
839 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
840 struct tpacket3_hdr *ppd)
842 ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
845 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
846 struct tpacket3_hdr *ppd)
848 ppd->hv1.tp_rxhash = 0;
851 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
852 struct tpacket3_hdr *ppd)
854 if (vlan_tx_tag_present(pkc->skb)) {
855 ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
856 ppd->tp_status = TP_STATUS_VLAN_VALID;
858 ppd->hv1.tp_vlan_tci = 0;
859 ppd->tp_status = TP_STATUS_AVAILABLE;
863 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
864 struct tpacket3_hdr *ppd)
866 prb_fill_vlan_info(pkc, ppd);
868 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
869 prb_fill_rxhash(pkc, ppd);
871 prb_clear_rxhash(pkc, ppd);
874 static void prb_fill_curr_block(char *curr,
875 struct tpacket_kbdq_core *pkc,
876 struct tpacket_block_desc *pbd,
879 struct tpacket3_hdr *ppd;
881 ppd = (struct tpacket3_hdr *)curr;
882 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
884 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
885 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
886 BLOCK_NUM_PKTS(pbd) += 1;
887 atomic_inc(&pkc->blk_fill_in_prog);
888 prb_run_all_ft_ops(pkc, ppd);
891 /* Assumes caller has the sk->rx_queue.lock */
892 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
898 struct tpacket_kbdq_core *pkc;
899 struct tpacket_block_desc *pbd;
902 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
903 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
905 /* Queue is frozen when user space is lagging behind */
906 if (prb_queue_frozen(pkc)) {
908 * Check if that last block which caused the queue to freeze,
909 * is still in_use by user-space.
911 if (prb_curr_blk_in_use(pkc, pbd)) {
912 /* Can't record this packet */
916 * Ok, the block was released by user-space.
917 * Now let's open that block.
918 * opening a block also thaws the queue.
919 * Thawing is a side effect.
921 prb_open_block(pkc, pbd);
926 curr = pkc->nxt_offset;
928 end = (char *)pbd + pkc->kblk_size;
930 /* first try the current block */
931 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
932 prb_fill_curr_block(curr, pkc, pbd, len);
936 /* Ok, close the current block */
937 prb_retire_current_block(pkc, po, 0);
939 /* Now, try to dispatch the next block */
940 curr = (char *)prb_dispatch_next_block(pkc, po);
942 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
943 prb_fill_curr_block(curr, pkc, pbd, len);
948 * No free blocks are available.user_space hasn't caught up yet.
949 * Queue was just frozen and now this packet will get dropped.
954 static void *packet_current_rx_frame(struct packet_sock *po,
956 int status, unsigned int len)
959 switch (po->tp_version) {
962 curr = packet_lookup_frame(po, &po->rx_ring,
963 po->rx_ring.head, status);
966 return __packet_lookup_frame_in_block(po, skb, status, len);
968 WARN(1, "TPACKET version not supported\n");
974 static void *prb_lookup_block(struct packet_sock *po,
975 struct packet_ring_buffer *rb,
976 unsigned int previous,
979 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
980 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, previous);
982 if (status != BLOCK_STATUS(pbd))
987 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
990 if (rb->prb_bdqc.kactive_blk_num)
991 prev = rb->prb_bdqc.kactive_blk_num-1;
993 prev = rb->prb_bdqc.knum_blocks-1;
997 /* Assumes caller has held the rx_queue.lock */
998 static void *__prb_previous_block(struct packet_sock *po,
999 struct packet_ring_buffer *rb,
1002 unsigned int previous = prb_previous_blk_num(rb);
1003 return prb_lookup_block(po, rb, previous, status);
1006 static void *packet_previous_rx_frame(struct packet_sock *po,
1007 struct packet_ring_buffer *rb,
1010 if (po->tp_version <= TPACKET_V2)
1011 return packet_previous_frame(po, rb, status);
1013 return __prb_previous_block(po, rb, status);
1016 static void packet_increment_rx_head(struct packet_sock *po,
1017 struct packet_ring_buffer *rb)
1019 switch (po->tp_version) {
1022 return packet_increment_head(rb);
1025 WARN(1, "TPACKET version not supported.\n");
1031 static void *packet_previous_frame(struct packet_sock *po,
1032 struct packet_ring_buffer *rb,
1035 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1036 return packet_lookup_frame(po, rb, previous, status);
1039 static void packet_increment_head(struct packet_ring_buffer *buff)
1041 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1044 static void packet_sock_destruct(struct sock *sk)
1046 skb_queue_purge(&sk->sk_error_queue);
1048 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1049 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1051 if (!sock_flag(sk, SOCK_DEAD)) {
1052 pr_err("Attempt to release alive packet socket: %p\n", sk);
1056 sk_refcnt_debug_dec(sk);
1059 static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
1061 int x = atomic_read(&f->rr_cur) + 1;
1069 static struct sock *fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1071 u32 idx, hash = skb->rxhash;
1073 idx = ((u64)hash * num) >> 32;
1078 static struct sock *fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1082 cur = atomic_read(&f->rr_cur);
1083 while ((old = atomic_cmpxchg(&f->rr_cur, cur,
1084 fanout_rr_next(f, num))) != cur)
1089 static struct sock *fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1091 unsigned int cpu = smp_processor_id();
1093 return f->arr[cpu % num];
1096 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1097 struct packet_type *pt, struct net_device *orig_dev)
1099 struct packet_fanout *f = pt->af_packet_priv;
1100 unsigned int num = f->num_members;
1101 struct packet_sock *po;
1104 if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1111 case PACKET_FANOUT_HASH:
1114 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
1118 skb_get_rxhash(skb);
1119 sk = fanout_demux_hash(f, skb, num);
1121 case PACKET_FANOUT_LB:
1122 sk = fanout_demux_lb(f, skb, num);
1124 case PACKET_FANOUT_CPU:
1125 sk = fanout_demux_cpu(f, skb, num);
1131 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1134 DEFINE_MUTEX(fanout_mutex);
1135 EXPORT_SYMBOL_GPL(fanout_mutex);
1136 static LIST_HEAD(fanout_list);
1138 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1140 struct packet_fanout *f = po->fanout;
1142 spin_lock(&f->lock);
1143 f->arr[f->num_members] = sk;
1146 spin_unlock(&f->lock);
1149 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1151 struct packet_fanout *f = po->fanout;
1154 spin_lock(&f->lock);
1155 for (i = 0; i < f->num_members; i++) {
1156 if (f->arr[i] == sk)
1159 BUG_ON(i >= f->num_members);
1160 f->arr[i] = f->arr[f->num_members - 1];
1162 spin_unlock(&f->lock);
1165 static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
1167 if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
1173 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1175 struct packet_sock *po = pkt_sk(sk);
1176 struct packet_fanout *f, *match;
1177 u8 type = type_flags & 0xff;
1178 u8 defrag = (type_flags & PACKET_FANOUT_FLAG_DEFRAG) ? 1 : 0;
1182 case PACKET_FANOUT_HASH:
1183 case PACKET_FANOUT_LB:
1184 case PACKET_FANOUT_CPU:
1196 mutex_lock(&fanout_mutex);
1198 list_for_each_entry(f, &fanout_list, list) {
1200 read_pnet(&f->net) == sock_net(sk)) {
1206 if (match && match->defrag != defrag)
1210 match = kzalloc(sizeof(*match), GFP_KERNEL);
1213 write_pnet(&match->net, sock_net(sk));
1216 match->defrag = defrag;
1217 atomic_set(&match->rr_cur, 0);
1218 INIT_LIST_HEAD(&match->list);
1219 spin_lock_init(&match->lock);
1220 atomic_set(&match->sk_ref, 0);
1221 match->prot_hook.type = po->prot_hook.type;
1222 match->prot_hook.dev = po->prot_hook.dev;
1223 match->prot_hook.func = packet_rcv_fanout;
1224 match->prot_hook.af_packet_priv = match;
1225 match->prot_hook.id_match = match_fanout_group;
1226 dev_add_pack(&match->prot_hook);
1227 list_add(&match->list, &fanout_list);
1230 if (match->type == type &&
1231 match->prot_hook.type == po->prot_hook.type &&
1232 match->prot_hook.dev == po->prot_hook.dev) {
1234 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1235 __dev_remove_pack(&po->prot_hook);
1237 atomic_inc(&match->sk_ref);
1238 __fanout_link(sk, po);
1243 mutex_unlock(&fanout_mutex);
1247 static void fanout_release(struct sock *sk)
1249 struct packet_sock *po = pkt_sk(sk);
1250 struct packet_fanout *f;
1256 mutex_lock(&fanout_mutex);
1259 if (atomic_dec_and_test(&f->sk_ref)) {
1261 dev_remove_pack(&f->prot_hook);
1264 mutex_unlock(&fanout_mutex);
1267 static const struct proto_ops packet_ops;
1269 static const struct proto_ops packet_ops_spkt;
1271 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1272 struct packet_type *pt, struct net_device *orig_dev)
1275 struct sockaddr_pkt *spkt;
1278 * When we registered the protocol we saved the socket in the data
1279 * field for just this event.
1282 sk = pt->af_packet_priv;
1285 * Yank back the headers [hope the device set this
1286 * right or kerboom...]
1288 * Incoming packets have ll header pulled,
1291 * For outgoing ones skb->data == skb_mac_header(skb)
1292 * so that this procedure is noop.
1295 if (skb->pkt_type == PACKET_LOOPBACK)
1298 if (!net_eq(dev_net(dev), sock_net(sk)))
1301 skb = skb_share_check(skb, GFP_ATOMIC);
1305 /* drop any routing info */
1308 /* drop conntrack reference */
1311 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1313 skb_push(skb, skb->data - skb_mac_header(skb));
1316 * The SOCK_PACKET socket receives _all_ frames.
1319 spkt->spkt_family = dev->type;
1320 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1321 spkt->spkt_protocol = skb->protocol;
1324 * Charge the memory to the socket. This is done specifically
1325 * to prevent sockets using all the memory up.
1328 if (sock_queue_rcv_skb(sk, skb) == 0)
1339 * Output a raw packet to a device layer. This bypasses all the other
1340 * protocol layers and you must therefore supply it with a complete frame
1343 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
1344 struct msghdr *msg, size_t len)
1346 struct sock *sk = sock->sk;
1347 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
1348 struct sk_buff *skb = NULL;
1349 struct net_device *dev;
1355 * Get and verify the address.
1359 if (msg->msg_namelen < sizeof(struct sockaddr))
1361 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1362 proto = saddr->spkt_protocol;
1364 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
1367 * Find the device first to size check it
1370 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1373 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1379 if (!(dev->flags & IFF_UP))
1383 * You may not queue a frame bigger than the mtu. This is the lowest level
1384 * raw protocol and you must do your own fragmentation at this level.
1387 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1388 if (!netif_supports_nofcs(dev)) {
1389 err = -EPROTONOSUPPORT;
1392 extra_len = 4; /* We're doing our own CRC */
1396 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1400 size_t reserved = LL_RESERVED_SPACE(dev);
1401 int tlen = dev->needed_tailroom;
1402 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1405 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1408 /* FIXME: Save some space for broken drivers that write a hard
1409 * header at transmission time by themselves. PPP is the notable
1410 * one here. This should really be fixed at the driver level.
1412 skb_reserve(skb, reserved);
1413 skb_reset_network_header(skb);
1415 /* Try to align data part correctly */
1420 skb_reset_network_header(skb);
1422 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1428 if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
1429 /* Earlier code assumed this would be a VLAN pkt,
1430 * double-check this now that we have the actual
1433 struct ethhdr *ehdr;
1434 skb_reset_mac_header(skb);
1435 ehdr = eth_hdr(skb);
1436 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1442 skb->protocol = proto;
1444 skb->priority = sk->sk_priority;
1445 skb->mark = sk->sk_mark;
1446 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1450 if (unlikely(extra_len == 4))
1453 dev_queue_xmit(skb);
1464 static unsigned int run_filter(const struct sk_buff *skb,
1465 const struct sock *sk,
1468 struct sk_filter *filter;
1471 filter = rcu_dereference(sk->sk_filter);
1473 res = SK_RUN_FILTER(filter, skb);
1480 * This function makes lazy skb cloning in hope that most of packets
1481 * are discarded by BPF.
1483 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1484 * and skb->cb are mangled. It works because (and until) packets
1485 * falling here are owned by current CPU. Output packets are cloned
1486 * by dev_queue_xmit_nit(), input packets are processed by net_bh
1487 * sequencially, so that if we return skb to original state on exit,
1488 * we will not harm anyone.
1491 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1492 struct packet_type *pt, struct net_device *orig_dev)
1495 struct sockaddr_ll *sll;
1496 struct packet_sock *po;
1497 u8 *skb_head = skb->data;
1498 int skb_len = skb->len;
1499 unsigned int snaplen, res;
1501 if (skb->pkt_type == PACKET_LOOPBACK)
1504 sk = pt->af_packet_priv;
1507 if (!net_eq(dev_net(dev), sock_net(sk)))
1512 if (dev->header_ops) {
1513 /* The device has an explicit notion of ll header,
1514 * exported to higher levels.
1516 * Otherwise, the device hides details of its frame
1517 * structure, so that corresponding packet head is
1518 * never delivered to user.
1520 if (sk->sk_type != SOCK_DGRAM)
1521 skb_push(skb, skb->data - skb_mac_header(skb));
1522 else if (skb->pkt_type == PACKET_OUTGOING) {
1523 /* Special case: outgoing packets have ll header at head */
1524 skb_pull(skb, skb_network_offset(skb));
1530 res = run_filter(skb, sk, snaplen);
1532 goto drop_n_restore;
1536 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
1539 if (skb_shared(skb)) {
1540 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1544 if (skb_head != skb->data) {
1545 skb->data = skb_head;
1552 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
1555 sll = &PACKET_SKB_CB(skb)->sa.ll;
1556 sll->sll_family = AF_PACKET;
1557 sll->sll_hatype = dev->type;
1558 sll->sll_protocol = skb->protocol;
1559 sll->sll_pkttype = skb->pkt_type;
1560 if (unlikely(po->origdev))
1561 sll->sll_ifindex = orig_dev->ifindex;
1563 sll->sll_ifindex = dev->ifindex;
1565 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1567 PACKET_SKB_CB(skb)->origlen = skb->len;
1569 if (pskb_trim(skb, snaplen))
1572 skb_set_owner_r(skb, sk);
1576 /* drop conntrack reference */
1579 spin_lock(&sk->sk_receive_queue.lock);
1580 po->stats.tp_packets++;
1581 skb->dropcount = atomic_read(&sk->sk_drops);
1582 __skb_queue_tail(&sk->sk_receive_queue, skb);
1583 spin_unlock(&sk->sk_receive_queue.lock);
1584 sk->sk_data_ready(sk, skb->len);
1588 spin_lock(&sk->sk_receive_queue.lock);
1589 po->stats.tp_drops++;
1590 atomic_inc(&sk->sk_drops);
1591 spin_unlock(&sk->sk_receive_queue.lock);
1594 if (skb_head != skb->data && skb_shared(skb)) {
1595 skb->data = skb_head;
1603 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1604 struct packet_type *pt, struct net_device *orig_dev)
1607 struct packet_sock *po;
1608 struct sockaddr_ll *sll;
1610 struct tpacket_hdr *h1;
1611 struct tpacket2_hdr *h2;
1612 struct tpacket3_hdr *h3;
1615 u8 *skb_head = skb->data;
1616 int skb_len = skb->len;
1617 unsigned int snaplen, res;
1618 unsigned long status = TP_STATUS_USER;
1619 unsigned short macoff, netoff, hdrlen;
1620 struct sk_buff *copy_skb = NULL;
1623 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
1625 if (skb->pkt_type == PACKET_LOOPBACK)
1628 sk = pt->af_packet_priv;
1631 if (!net_eq(dev_net(dev), sock_net(sk)))
1634 if (dev->header_ops) {
1635 if (sk->sk_type != SOCK_DGRAM)
1636 skb_push(skb, skb->data - skb_mac_header(skb));
1637 else if (skb->pkt_type == PACKET_OUTGOING) {
1638 /* Special case: outgoing packets have ll header at head */
1639 skb_pull(skb, skb_network_offset(skb));
1643 if (skb->ip_summed == CHECKSUM_PARTIAL)
1644 status |= TP_STATUS_CSUMNOTREADY;
1648 res = run_filter(skb, sk, snaplen);
1650 goto drop_n_restore;
1654 if (sk->sk_type == SOCK_DGRAM) {
1655 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1658 unsigned int maclen = skb_network_offset(skb);
1659 netoff = TPACKET_ALIGN(po->tp_hdrlen +
1660 (maclen < 16 ? 16 : maclen)) +
1662 macoff = netoff - maclen;
1664 if (po->tp_version <= TPACKET_V2) {
1665 if (macoff + snaplen > po->rx_ring.frame_size) {
1666 if (po->copy_thresh &&
1667 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1668 if (skb_shared(skb)) {
1669 copy_skb = skb_clone(skb, GFP_ATOMIC);
1671 copy_skb = skb_get(skb);
1672 skb_head = skb->data;
1675 skb_set_owner_r(copy_skb, sk);
1677 snaplen = po->rx_ring.frame_size - macoff;
1678 if ((int)snaplen < 0)
1682 spin_lock(&sk->sk_receive_queue.lock);
1683 h.raw = packet_current_rx_frame(po, skb,
1684 TP_STATUS_KERNEL, (macoff+snaplen));
1687 if (po->tp_version <= TPACKET_V2) {
1688 packet_increment_rx_head(po, &po->rx_ring);
1690 * LOSING will be reported till you read the stats,
1691 * because it's COR - Clear On Read.
1692 * Anyways, moving it for V1/V2 only as V3 doesn't need this
1695 if (po->stats.tp_drops)
1696 status |= TP_STATUS_LOSING;
1698 po->stats.tp_packets++;
1700 status |= TP_STATUS_COPY;
1701 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1703 spin_unlock(&sk->sk_receive_queue.lock);
1705 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
1707 switch (po->tp_version) {
1709 h.h1->tp_len = skb->len;
1710 h.h1->tp_snaplen = snaplen;
1711 h.h1->tp_mac = macoff;
1712 h.h1->tp_net = netoff;
1713 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1714 && shhwtstamps->syststamp.tv64)
1715 tv = ktime_to_timeval(shhwtstamps->syststamp);
1716 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1717 && shhwtstamps->hwtstamp.tv64)
1718 tv = ktime_to_timeval(shhwtstamps->hwtstamp);
1719 else if (skb->tstamp.tv64)
1720 tv = ktime_to_timeval(skb->tstamp);
1722 do_gettimeofday(&tv);
1723 h.h1->tp_sec = tv.tv_sec;
1724 h.h1->tp_usec = tv.tv_usec;
1725 hdrlen = sizeof(*h.h1);
1728 h.h2->tp_len = skb->len;
1729 h.h2->tp_snaplen = snaplen;
1730 h.h2->tp_mac = macoff;
1731 h.h2->tp_net = netoff;
1732 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1733 && shhwtstamps->syststamp.tv64)
1734 ts = ktime_to_timespec(shhwtstamps->syststamp);
1735 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1736 && shhwtstamps->hwtstamp.tv64)
1737 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1738 else if (skb->tstamp.tv64)
1739 ts = ktime_to_timespec(skb->tstamp);
1741 getnstimeofday(&ts);
1742 h.h2->tp_sec = ts.tv_sec;
1743 h.h2->tp_nsec = ts.tv_nsec;
1744 if (vlan_tx_tag_present(skb)) {
1745 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
1746 status |= TP_STATUS_VLAN_VALID;
1748 h.h2->tp_vlan_tci = 0;
1750 h.h2->tp_padding = 0;
1751 hdrlen = sizeof(*h.h2);
1754 /* tp_nxt_offset,vlan are already populated above.
1755 * So DONT clear those fields here
1757 h.h3->tp_status |= status;
1758 h.h3->tp_len = skb->len;
1759 h.h3->tp_snaplen = snaplen;
1760 h.h3->tp_mac = macoff;
1761 h.h3->tp_net = netoff;
1762 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1763 && shhwtstamps->syststamp.tv64)
1764 ts = ktime_to_timespec(shhwtstamps->syststamp);
1765 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1766 && shhwtstamps->hwtstamp.tv64)
1767 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1768 else if (skb->tstamp.tv64)
1769 ts = ktime_to_timespec(skb->tstamp);
1771 getnstimeofday(&ts);
1772 h.h3->tp_sec = ts.tv_sec;
1773 h.h3->tp_nsec = ts.tv_nsec;
1774 hdrlen = sizeof(*h.h3);
1780 sll = h.raw + TPACKET_ALIGN(hdrlen);
1781 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1782 sll->sll_family = AF_PACKET;
1783 sll->sll_hatype = dev->type;
1784 sll->sll_protocol = skb->protocol;
1785 sll->sll_pkttype = skb->pkt_type;
1786 if (unlikely(po->origdev))
1787 sll->sll_ifindex = orig_dev->ifindex;
1789 sll->sll_ifindex = dev->ifindex;
1792 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
1796 if (po->tp_version <= TPACKET_V2) {
1797 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw
1798 + macoff + snaplen);
1799 for (start = h.raw; start < end; start += PAGE_SIZE)
1800 flush_dcache_page(pgv_to_page(start));
1805 if (po->tp_version <= TPACKET_V2)
1806 __packet_set_status(po, h.raw, status);
1808 prb_clear_blk_fill_status(&po->rx_ring);
1810 sk->sk_data_ready(sk, 0);
1813 if (skb_head != skb->data && skb_shared(skb)) {
1814 skb->data = skb_head;
1822 po->stats.tp_drops++;
1823 spin_unlock(&sk->sk_receive_queue.lock);
1825 sk->sk_data_ready(sk, 0);
1826 kfree_skb(copy_skb);
1827 goto drop_n_restore;
1830 static void tpacket_destruct_skb(struct sk_buff *skb)
1832 struct packet_sock *po = pkt_sk(skb->sk);
1835 if (likely(po->tx_ring.pg_vec)) {
1836 ph = skb_shinfo(skb)->destructor_arg;
1837 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
1838 atomic_dec(&po->tx_ring.pending);
1839 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
1845 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1846 void *frame, struct net_device *dev, int size_max,
1847 __be16 proto, unsigned char *addr, int hlen)
1850 struct tpacket_hdr *h1;
1851 struct tpacket2_hdr *h2;
1854 int to_write, offset, len, tp_len, nr_frags, len_max;
1855 struct socket *sock = po->sk.sk_socket;
1862 skb->protocol = proto;
1864 skb->priority = po->sk.sk_priority;
1865 skb->mark = po->sk.sk_mark;
1866 skb_shinfo(skb)->destructor_arg = ph.raw;
1868 switch (po->tp_version) {
1870 tp_len = ph.h2->tp_len;
1873 tp_len = ph.h1->tp_len;
1876 if (unlikely(tp_len > size_max)) {
1877 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
1881 skb_reserve(skb, hlen);
1882 skb_reset_network_header(skb);
1884 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
1887 if (sock->type == SOCK_DGRAM) {
1888 err = dev_hard_header(skb, dev, ntohs(proto), addr,
1890 if (unlikely(err < 0))
1892 } else if (dev->hard_header_len) {
1893 /* net device doesn't like empty head */
1894 if (unlikely(tp_len <= dev->hard_header_len)) {
1895 pr_err("packet size is too short (%d < %d)\n",
1896 tp_len, dev->hard_header_len);
1900 skb_push(skb, dev->hard_header_len);
1901 err = skb_store_bits(skb, 0, data,
1902 dev->hard_header_len);
1906 data += dev->hard_header_len;
1907 to_write -= dev->hard_header_len;
1911 offset = offset_in_page(data);
1912 len_max = PAGE_SIZE - offset;
1913 len = ((to_write > len_max) ? len_max : to_write);
1915 skb->data_len = to_write;
1916 skb->len += to_write;
1917 skb->truesize += to_write;
1918 atomic_add(to_write, &po->sk.sk_wmem_alloc);
1920 while (likely(to_write)) {
1921 nr_frags = skb_shinfo(skb)->nr_frags;
1923 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
1924 pr_err("Packet exceed the number of skb frags(%lu)\n",
1929 page = pgv_to_page(data);
1931 flush_dcache_page(page);
1933 skb_fill_page_desc(skb, nr_frags, page, offset, len);
1936 len_max = PAGE_SIZE;
1937 len = ((to_write > len_max) ? len_max : to_write);
1943 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
1945 struct sk_buff *skb;
1946 struct net_device *dev;
1948 bool need_rls_dev = false;
1949 int err, reserve = 0;
1951 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
1952 int tp_len, size_max;
1953 unsigned char *addr;
1955 int status = TP_STATUS_AVAILABLE;
1958 mutex_lock(&po->pg_vec_lock);
1961 if (saddr == NULL) {
1962 dev = po->prot_hook.dev;
1967 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
1969 if (msg->msg_namelen < (saddr->sll_halen
1970 + offsetof(struct sockaddr_ll,
1973 proto = saddr->sll_protocol;
1974 addr = saddr->sll_addr;
1975 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
1976 need_rls_dev = true;
1980 if (unlikely(dev == NULL))
1983 reserve = dev->hard_header_len;
1986 if (unlikely(!(dev->flags & IFF_UP)))
1989 size_max = po->tx_ring.frame_size
1990 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
1992 if (size_max > dev->mtu + reserve)
1993 size_max = dev->mtu + reserve;
1996 ph = packet_current_frame(po, &po->tx_ring,
1997 TP_STATUS_SEND_REQUEST);
1999 if (unlikely(ph == NULL)) {
2004 status = TP_STATUS_SEND_REQUEST;
2005 hlen = LL_RESERVED_SPACE(dev);
2006 tlen = dev->needed_tailroom;
2007 skb = sock_alloc_send_skb(&po->sk,
2008 hlen + tlen + sizeof(struct sockaddr_ll),
2011 if (unlikely(skb == NULL))
2014 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2017 if (unlikely(tp_len < 0)) {
2019 __packet_set_status(po, ph,
2020 TP_STATUS_AVAILABLE);
2021 packet_increment_head(&po->tx_ring);
2025 status = TP_STATUS_WRONG_FORMAT;
2031 skb->destructor = tpacket_destruct_skb;
2032 __packet_set_status(po, ph, TP_STATUS_SENDING);
2033 atomic_inc(&po->tx_ring.pending);
2035 status = TP_STATUS_SEND_REQUEST;
2036 err = dev_queue_xmit(skb);
2037 if (unlikely(err > 0)) {
2038 err = net_xmit_errno(err);
2039 if (err && __packet_get_status(po, ph) ==
2040 TP_STATUS_AVAILABLE) {
2041 /* skb was destructed already */
2046 * skb was dropped but not destructed yet;
2047 * let's treat it like congestion or err < 0
2051 packet_increment_head(&po->tx_ring);
2053 } while (likely((ph != NULL) ||
2054 ((!(msg->msg_flags & MSG_DONTWAIT)) &&
2055 (atomic_read(&po->tx_ring.pending))))
2062 __packet_set_status(po, ph, status);
2068 mutex_unlock(&po->pg_vec_lock);
2072 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2073 size_t reserve, size_t len,
2074 size_t linear, int noblock,
2077 struct sk_buff *skb;
2079 /* Under a page? Don't bother with paged skb. */
2080 if (prepad + len < PAGE_SIZE || !linear)
2083 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2088 skb_reserve(skb, reserve);
2089 skb_put(skb, linear);
2090 skb->data_len = len - linear;
2091 skb->len += len - linear;
2096 static int packet_snd(struct socket *sock,
2097 struct msghdr *msg, size_t len)
2099 struct sock *sk = sock->sk;
2100 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
2101 struct sk_buff *skb;
2102 struct net_device *dev;
2104 bool need_rls_dev = false;
2105 unsigned char *addr;
2106 int err, reserve = 0;
2107 struct virtio_net_hdr vnet_hdr = { 0 };
2110 struct packet_sock *po = pkt_sk(sk);
2111 unsigned short gso_type = 0;
2116 * Get and verify the address.
2119 if (saddr == NULL) {
2120 dev = po->prot_hook.dev;
2125 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2127 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2129 proto = saddr->sll_protocol;
2130 addr = saddr->sll_addr;
2131 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2132 need_rls_dev = true;
2138 if (sock->type == SOCK_RAW)
2139 reserve = dev->hard_header_len;
2142 if (!(dev->flags & IFF_UP))
2145 if (po->has_vnet_hdr) {
2146 vnet_hdr_len = sizeof(vnet_hdr);
2149 if (len < vnet_hdr_len)
2152 len -= vnet_hdr_len;
2154 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
2159 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2160 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
2162 vnet_hdr.hdr_len = vnet_hdr.csum_start +
2163 vnet_hdr.csum_offset + 2;
2166 if (vnet_hdr.hdr_len > len)
2169 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2170 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2171 case VIRTIO_NET_HDR_GSO_TCPV4:
2172 gso_type = SKB_GSO_TCPV4;
2174 case VIRTIO_NET_HDR_GSO_TCPV6:
2175 gso_type = SKB_GSO_TCPV6;
2177 case VIRTIO_NET_HDR_GSO_UDP:
2178 gso_type = SKB_GSO_UDP;
2184 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2185 gso_type |= SKB_GSO_TCP_ECN;
2187 if (vnet_hdr.gso_size == 0)
2193 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2194 if (!netif_supports_nofcs(dev)) {
2195 err = -EPROTONOSUPPORT;
2198 extra_len = 4; /* We're doing our own CRC */
2202 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2206 hlen = LL_RESERVED_SPACE(dev);
2207 tlen = dev->needed_tailroom;
2208 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
2209 msg->msg_flags & MSG_DONTWAIT, &err);
2213 skb_set_network_header(skb, reserve);
2216 if (sock->type == SOCK_DGRAM &&
2217 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
2220 /* Returns -EFAULT on error */
2221 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
2224 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2228 if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
2229 /* Earlier code assumed this would be a VLAN pkt,
2230 * double-check this now that we have the actual
2233 struct ethhdr *ehdr;
2234 skb_reset_mac_header(skb);
2235 ehdr = eth_hdr(skb);
2236 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2242 skb->protocol = proto;
2244 skb->priority = sk->sk_priority;
2245 skb->mark = sk->sk_mark;
2247 if (po->has_vnet_hdr) {
2248 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2249 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
2250 vnet_hdr.csum_offset)) {
2256 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
2257 skb_shinfo(skb)->gso_type = gso_type;
2259 /* Header must be checked, and gso_segs computed. */
2260 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2261 skb_shinfo(skb)->gso_segs = 0;
2263 len += vnet_hdr_len;
2266 if (unlikely(extra_len == 4))
2273 err = dev_queue_xmit(skb);
2274 if (err > 0 && (err = net_xmit_errno(err)) != 0)
2285 if (dev && need_rls_dev)
2291 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
2292 struct msghdr *msg, size_t len)
2294 struct sock *sk = sock->sk;
2295 struct packet_sock *po = pkt_sk(sk);
2296 if (po->tx_ring.pg_vec)
2297 return tpacket_snd(po, msg);
2299 return packet_snd(sock, msg, len);
2303 * Close a PACKET socket. This is fairly simple. We immediately go
2304 * to 'closed' state and remove our protocol entry in the device list.
2307 static int packet_release(struct socket *sock)
2309 struct sock *sk = sock->sk;
2310 struct packet_sock *po;
2312 union tpacket_req_u req_u;
2320 mutex_lock(&net->packet.sklist_lock);
2321 sk_del_node_init_rcu(sk);
2322 mutex_unlock(&net->packet.sklist_lock);
2325 sock_prot_inuse_add(net, sk->sk_prot, -1);
2328 spin_lock(&po->bind_lock);
2329 unregister_prot_hook(sk, false);
2330 if (po->prot_hook.dev) {
2331 dev_put(po->prot_hook.dev);
2332 po->prot_hook.dev = NULL;
2334 spin_unlock(&po->bind_lock);
2336 packet_flush_mclist(sk);
2338 memset(&req_u, 0, sizeof(req_u));
2340 if (po->rx_ring.pg_vec)
2341 packet_set_ring(sk, &req_u, 1, 0);
2343 if (po->tx_ring.pg_vec)
2344 packet_set_ring(sk, &req_u, 1, 1);
2350 * Now the socket is dead. No more input will appear.
2357 skb_queue_purge(&sk->sk_receive_queue);
2358 sk_refcnt_debug_release(sk);
2365 * Attach a packet hook.
2368 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
2370 struct packet_sock *po = pkt_sk(sk);
2381 spin_lock(&po->bind_lock);
2382 unregister_prot_hook(sk, true);
2384 po->prot_hook.type = protocol;
2385 if (po->prot_hook.dev)
2386 dev_put(po->prot_hook.dev);
2387 po->prot_hook.dev = dev;
2389 po->ifindex = dev ? dev->ifindex : 0;
2394 if (!dev || (dev->flags & IFF_UP)) {
2395 register_prot_hook(sk);
2397 sk->sk_err = ENETDOWN;
2398 if (!sock_flag(sk, SOCK_DEAD))
2399 sk->sk_error_report(sk);
2403 spin_unlock(&po->bind_lock);
2409 * Bind a packet socket to a device
2412 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2415 struct sock *sk = sock->sk;
2417 struct net_device *dev;
2424 if (addr_len != sizeof(struct sockaddr))
2426 strlcpy(name, uaddr->sa_data, sizeof(name));
2428 dev = dev_get_by_name(sock_net(sk), name);
2430 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
2434 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2436 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2437 struct sock *sk = sock->sk;
2438 struct net_device *dev = NULL;
2446 if (addr_len < sizeof(struct sockaddr_ll))
2448 if (sll->sll_family != AF_PACKET)
2451 if (sll->sll_ifindex) {
2453 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
2457 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
2463 static struct proto packet_proto = {
2465 .owner = THIS_MODULE,
2466 .obj_size = sizeof(struct packet_sock),
2470 * Create a packet of type SOCK_PACKET.
2473 static int packet_create(struct net *net, struct socket *sock, int protocol,
2477 struct packet_sock *po;
2478 __be16 proto = (__force __be16)protocol; /* weird, but documented */
2481 if (!capable(CAP_NET_RAW))
2483 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2484 sock->type != SOCK_PACKET)
2485 return -ESOCKTNOSUPPORT;
2487 sock->state = SS_UNCONNECTED;
2490 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
2494 sock->ops = &packet_ops;
2495 if (sock->type == SOCK_PACKET)
2496 sock->ops = &packet_ops_spkt;
2498 sock_init_data(sock, sk);
2501 sk->sk_family = PF_PACKET;
2504 sk->sk_destruct = packet_sock_destruct;
2505 sk_refcnt_debug_inc(sk);
2508 * Attach a protocol block
2511 spin_lock_init(&po->bind_lock);
2512 mutex_init(&po->pg_vec_lock);
2513 po->prot_hook.func = packet_rcv;
2515 if (sock->type == SOCK_PACKET)
2516 po->prot_hook.func = packet_rcv_spkt;
2518 po->prot_hook.af_packet_priv = sk;
2521 po->prot_hook.type = proto;
2522 register_prot_hook(sk);
2525 mutex_lock(&net->packet.sklist_lock);
2526 sk_add_node_rcu(sk, &net->packet.sklist);
2527 mutex_unlock(&net->packet.sklist_lock);
2530 sock_prot_inuse_add(net, &packet_proto, 1);
2538 static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
2540 struct sock_exterr_skb *serr;
2541 struct sk_buff *skb, *skb2;
2545 skb = skb_dequeue(&sk->sk_error_queue);
2551 msg->msg_flags |= MSG_TRUNC;
2554 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2558 sock_recv_timestamp(msg, sk, skb);
2560 serr = SKB_EXT_ERR(skb);
2561 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
2562 sizeof(serr->ee), &serr->ee);
2564 msg->msg_flags |= MSG_ERRQUEUE;
2567 /* Reset and regenerate socket error */
2568 spin_lock_bh(&sk->sk_error_queue.lock);
2570 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2571 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2572 spin_unlock_bh(&sk->sk_error_queue.lock);
2573 sk->sk_error_report(sk);
2575 spin_unlock_bh(&sk->sk_error_queue.lock);
2584 * Pull a packet from our receive queue and hand it to the user.
2585 * If necessary we block.
2588 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
2589 struct msghdr *msg, size_t len, int flags)
2591 struct sock *sk = sock->sk;
2592 struct sk_buff *skb;
2594 struct sockaddr_ll *sll;
2595 int vnet_hdr_len = 0;
2598 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
2602 /* What error should we return now? EUNATTACH? */
2603 if (pkt_sk(sk)->ifindex < 0)
2607 if (flags & MSG_ERRQUEUE) {
2608 err = packet_recv_error(sk, msg, len);
2613 * Call the generic datagram receiver. This handles all sorts
2614 * of horrible races and re-entrancy so we can forget about it
2615 * in the protocol layers.
2617 * Now it will return ENETDOWN, if device have just gone down,
2618 * but then it will block.
2621 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
2624 * An error occurred so return it. Because skb_recv_datagram()
2625 * handles the blocking we don't see and worry about blocking
2632 if (pkt_sk(sk)->has_vnet_hdr) {
2633 struct virtio_net_hdr vnet_hdr = { 0 };
2636 vnet_hdr_len = sizeof(vnet_hdr);
2637 if (len < vnet_hdr_len)
2640 len -= vnet_hdr_len;
2642 if (skb_is_gso(skb)) {
2643 struct skb_shared_info *sinfo = skb_shinfo(skb);
2645 /* This is a hint as to how much should be linear. */
2646 vnet_hdr.hdr_len = skb_headlen(skb);
2647 vnet_hdr.gso_size = sinfo->gso_size;
2648 if (sinfo->gso_type & SKB_GSO_TCPV4)
2649 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2650 else if (sinfo->gso_type & SKB_GSO_TCPV6)
2651 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2652 else if (sinfo->gso_type & SKB_GSO_UDP)
2653 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2654 else if (sinfo->gso_type & SKB_GSO_FCOE)
2658 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2659 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2661 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
2663 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2664 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
2665 vnet_hdr.csum_start = skb_checksum_start_offset(skb);
2666 vnet_hdr.csum_offset = skb->csum_offset;
2667 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2668 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
2669 } /* else everything is zero */
2671 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
2678 * If the address length field is there to be filled in, we fill
2682 sll = &PACKET_SKB_CB(skb)->sa.ll;
2683 if (sock->type == SOCK_PACKET)
2684 msg->msg_namelen = sizeof(struct sockaddr_pkt);
2686 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
2689 * You lose any data beyond the buffer you gave. If it worries a
2690 * user program they can ask the device for its MTU anyway.
2696 msg->msg_flags |= MSG_TRUNC;
2699 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2703 sock_recv_ts_and_drops(msg, sk, skb);
2706 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
2709 if (pkt_sk(sk)->auxdata) {
2710 struct tpacket_auxdata aux;
2712 aux.tp_status = TP_STATUS_USER;
2713 if (skb->ip_summed == CHECKSUM_PARTIAL)
2714 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
2715 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
2716 aux.tp_snaplen = skb->len;
2718 aux.tp_net = skb_network_offset(skb);
2719 if (vlan_tx_tag_present(skb)) {
2720 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
2721 aux.tp_status |= TP_STATUS_VLAN_VALID;
2723 aux.tp_vlan_tci = 0;
2726 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
2730 * Free or return the buffer as appropriate. Again this
2731 * hides all the races and re-entrancy issues from us.
2733 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
2736 skb_free_datagram(sk, skb);
2741 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
2742 int *uaddr_len, int peer)
2744 struct net_device *dev;
2745 struct sock *sk = sock->sk;
2750 uaddr->sa_family = AF_PACKET;
2752 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
2754 strncpy(uaddr->sa_data, dev->name, 14);
2756 memset(uaddr->sa_data, 0, 14);
2758 *uaddr_len = sizeof(*uaddr);
2763 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
2764 int *uaddr_len, int peer)
2766 struct net_device *dev;
2767 struct sock *sk = sock->sk;
2768 struct packet_sock *po = pkt_sk(sk);
2769 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
2774 sll->sll_family = AF_PACKET;
2775 sll->sll_ifindex = po->ifindex;
2776 sll->sll_protocol = po->num;
2777 sll->sll_pkttype = 0;
2779 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
2781 sll->sll_hatype = dev->type;
2782 sll->sll_halen = dev->addr_len;
2783 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
2785 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
2789 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
2794 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
2798 case PACKET_MR_MULTICAST:
2799 if (i->alen != dev->addr_len)
2802 return dev_mc_add(dev, i->addr);
2804 return dev_mc_del(dev, i->addr);
2806 case PACKET_MR_PROMISC:
2807 return dev_set_promiscuity(dev, what);
2809 case PACKET_MR_ALLMULTI:
2810 return dev_set_allmulti(dev, what);
2812 case PACKET_MR_UNICAST:
2813 if (i->alen != dev->addr_len)
2816 return dev_uc_add(dev, i->addr);
2818 return dev_uc_del(dev, i->addr);
2826 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
2828 for ( ; i; i = i->next) {
2829 if (i->ifindex == dev->ifindex)
2830 packet_dev_mc(dev, i, what);
2834 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
2836 struct packet_sock *po = pkt_sk(sk);
2837 struct packet_mclist *ml, *i;
2838 struct net_device *dev;
2844 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
2849 if (mreq->mr_alen > dev->addr_len)
2853 i = kmalloc(sizeof(*i), GFP_KERNEL);
2858 for (ml = po->mclist; ml; ml = ml->next) {
2859 if (ml->ifindex == mreq->mr_ifindex &&
2860 ml->type == mreq->mr_type &&
2861 ml->alen == mreq->mr_alen &&
2862 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2864 /* Free the new element ... */
2870 i->type = mreq->mr_type;
2871 i->ifindex = mreq->mr_ifindex;
2872 i->alen = mreq->mr_alen;
2873 memcpy(i->addr, mreq->mr_address, i->alen);
2875 i->next = po->mclist;
2877 err = packet_dev_mc(dev, i, 1);
2879 po->mclist = i->next;
2888 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
2890 struct packet_mclist *ml, **mlp;
2894 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
2895 if (ml->ifindex == mreq->mr_ifindex &&
2896 ml->type == mreq->mr_type &&
2897 ml->alen == mreq->mr_alen &&
2898 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2899 if (--ml->count == 0) {
2900 struct net_device *dev;
2902 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
2904 packet_dev_mc(dev, ml, -1);
2912 return -EADDRNOTAVAIL;
2915 static void packet_flush_mclist(struct sock *sk)
2917 struct packet_sock *po = pkt_sk(sk);
2918 struct packet_mclist *ml;
2924 while ((ml = po->mclist) != NULL) {
2925 struct net_device *dev;
2927 po->mclist = ml->next;
2928 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
2930 packet_dev_mc(dev, ml, -1);
2937 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2939 struct sock *sk = sock->sk;
2940 struct packet_sock *po = pkt_sk(sk);
2943 if (level != SOL_PACKET)
2944 return -ENOPROTOOPT;
2947 case PACKET_ADD_MEMBERSHIP:
2948 case PACKET_DROP_MEMBERSHIP:
2950 struct packet_mreq_max mreq;
2952 memset(&mreq, 0, sizeof(mreq));
2953 if (len < sizeof(struct packet_mreq))
2955 if (len > sizeof(mreq))
2957 if (copy_from_user(&mreq, optval, len))
2959 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
2961 if (optname == PACKET_ADD_MEMBERSHIP)
2962 ret = packet_mc_add(sk, &mreq);
2964 ret = packet_mc_drop(sk, &mreq);
2968 case PACKET_RX_RING:
2969 case PACKET_TX_RING:
2971 union tpacket_req_u req_u;
2974 switch (po->tp_version) {
2977 len = sizeof(req_u.req);
2981 len = sizeof(req_u.req3);
2986 if (pkt_sk(sk)->has_vnet_hdr)
2988 if (copy_from_user(&req_u.req, optval, len))
2990 return packet_set_ring(sk, &req_u, 0,
2991 optname == PACKET_TX_RING);
2993 case PACKET_COPY_THRESH:
2997 if (optlen != sizeof(val))
2999 if (copy_from_user(&val, optval, sizeof(val)))
3002 pkt_sk(sk)->copy_thresh = val;
3005 case PACKET_VERSION:
3009 if (optlen != sizeof(val))
3011 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3013 if (copy_from_user(&val, optval, sizeof(val)))
3019 po->tp_version = val;
3025 case PACKET_RESERVE:
3029 if (optlen != sizeof(val))
3031 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3033 if (copy_from_user(&val, optval, sizeof(val)))
3035 po->tp_reserve = val;
3042 if (optlen != sizeof(val))
3044 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3046 if (copy_from_user(&val, optval, sizeof(val)))
3048 po->tp_loss = !!val;
3051 case PACKET_AUXDATA:
3055 if (optlen < sizeof(val))
3057 if (copy_from_user(&val, optval, sizeof(val)))
3060 po->auxdata = !!val;
3063 case PACKET_ORIGDEV:
3067 if (optlen < sizeof(val))
3069 if (copy_from_user(&val, optval, sizeof(val)))
3072 po->origdev = !!val;
3075 case PACKET_VNET_HDR:
3079 if (sock->type != SOCK_RAW)
3081 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3083 if (optlen < sizeof(val))
3085 if (copy_from_user(&val, optval, sizeof(val)))
3088 po->has_vnet_hdr = !!val;
3091 case PACKET_TIMESTAMP:
3095 if (optlen != sizeof(val))
3097 if (copy_from_user(&val, optval, sizeof(val)))
3100 po->tp_tstamp = val;
3107 if (optlen != sizeof(val))
3109 if (copy_from_user(&val, optval, sizeof(val)))
3112 return fanout_add(sk, val & 0xffff, val >> 16);
3115 return -ENOPROTOOPT;
3119 static int packet_getsockopt(struct socket *sock, int level, int optname,
3120 char __user *optval, int __user *optlen)
3123 int val, lv = sizeof(val);
3124 struct sock *sk = sock->sk;
3125 struct packet_sock *po = pkt_sk(sk);
3127 struct tpacket_stats st;
3128 union tpacket_stats_u st_u;
3130 if (level != SOL_PACKET)
3131 return -ENOPROTOOPT;
3133 if (get_user(len, optlen))
3140 case PACKET_STATISTICS:
3141 spin_lock_bh(&sk->sk_receive_queue.lock);
3142 if (po->tp_version == TPACKET_V3) {
3143 lv = sizeof(struct tpacket_stats_v3);
3144 memcpy(&st_u.stats3, &po->stats,
3145 sizeof(struct tpacket_stats));
3146 st_u.stats3.tp_freeze_q_cnt =
3147 po->stats_u.stats3.tp_freeze_q_cnt;
3148 st_u.stats3.tp_packets += po->stats.tp_drops;
3149 data = &st_u.stats3;
3151 lv = sizeof(struct tpacket_stats);
3153 st.tp_packets += st.tp_drops;
3156 memset(&po->stats, 0, sizeof(st));
3157 spin_unlock_bh(&sk->sk_receive_queue.lock);
3159 case PACKET_AUXDATA:
3162 case PACKET_ORIGDEV:
3165 case PACKET_VNET_HDR:
3166 val = po->has_vnet_hdr;
3168 case PACKET_VERSION:
3169 val = po->tp_version;
3172 if (len > sizeof(int))
3174 if (copy_from_user(&val, optval, len))
3178 val = sizeof(struct tpacket_hdr);
3181 val = sizeof(struct tpacket2_hdr);
3184 val = sizeof(struct tpacket3_hdr);
3190 case PACKET_RESERVE:
3191 val = po->tp_reserve;
3196 case PACKET_TIMESTAMP:
3197 val = po->tp_tstamp;
3201 ((u32)po->fanout->id |
3202 ((u32)po->fanout->type << 16)) :
3206 return -ENOPROTOOPT;
3211 if (put_user(len, optlen))
3213 if (copy_to_user(optval, data, len))
3219 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
3222 struct hlist_node *node;
3223 struct net_device *dev = data;
3224 struct net *net = dev_net(dev);
3227 sk_for_each_rcu(sk, node, &net->packet.sklist) {
3228 struct packet_sock *po = pkt_sk(sk);
3231 case NETDEV_UNREGISTER:
3233 packet_dev_mclist(dev, po->mclist, -1);
3237 if (dev->ifindex == po->ifindex) {
3238 spin_lock(&po->bind_lock);
3240 __unregister_prot_hook(sk, false);
3241 sk->sk_err = ENETDOWN;
3242 if (!sock_flag(sk, SOCK_DEAD))
3243 sk->sk_error_report(sk);
3245 if (msg == NETDEV_UNREGISTER) {
3247 if (po->prot_hook.dev)
3248 dev_put(po->prot_hook.dev);
3249 po->prot_hook.dev = NULL;
3251 spin_unlock(&po->bind_lock);
3255 if (dev->ifindex == po->ifindex) {
3256 spin_lock(&po->bind_lock);
3258 register_prot_hook(sk);
3259 spin_unlock(&po->bind_lock);
3269 static int packet_ioctl(struct socket *sock, unsigned int cmd,
3272 struct sock *sk = sock->sk;
3277 int amount = sk_wmem_alloc_get(sk);
3279 return put_user(amount, (int __user *)arg);
3283 struct sk_buff *skb;
3286 spin_lock_bh(&sk->sk_receive_queue.lock);
3287 skb = skb_peek(&sk->sk_receive_queue);
3290 spin_unlock_bh(&sk->sk_receive_queue.lock);
3291 return put_user(amount, (int __user *)arg);
3294 return sock_get_timestamp(sk, (struct timeval __user *)arg);
3296 return sock_get_timestampns(sk, (struct timespec __user *)arg);
3306 case SIOCGIFBRDADDR:
3307 case SIOCSIFBRDADDR:
3308 case SIOCGIFNETMASK:
3309 case SIOCSIFNETMASK:
3310 case SIOCGIFDSTADDR:
3311 case SIOCSIFDSTADDR:
3313 return inet_dgram_ops.ioctl(sock, cmd, arg);
3317 return -ENOIOCTLCMD;
3322 static unsigned int packet_poll(struct file *file, struct socket *sock,
3325 struct sock *sk = sock->sk;
3326 struct packet_sock *po = pkt_sk(sk);
3327 unsigned int mask = datagram_poll(file, sock, wait);
3329 spin_lock_bh(&sk->sk_receive_queue.lock);
3330 if (po->rx_ring.pg_vec) {
3331 if (!packet_previous_rx_frame(po, &po->rx_ring,
3333 mask |= POLLIN | POLLRDNORM;
3335 spin_unlock_bh(&sk->sk_receive_queue.lock);
3336 spin_lock_bh(&sk->sk_write_queue.lock);
3337 if (po->tx_ring.pg_vec) {
3338 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3339 mask |= POLLOUT | POLLWRNORM;
3341 spin_unlock_bh(&sk->sk_write_queue.lock);
3346 /* Dirty? Well, I still did not learn better way to account
3350 static void packet_mm_open(struct vm_area_struct *vma)
3352 struct file *file = vma->vm_file;
3353 struct socket *sock = file->private_data;
3354 struct sock *sk = sock->sk;
3357 atomic_inc(&pkt_sk(sk)->mapped);
3360 static void packet_mm_close(struct vm_area_struct *vma)
3362 struct file *file = vma->vm_file;
3363 struct socket *sock = file->private_data;
3364 struct sock *sk = sock->sk;
3367 atomic_dec(&pkt_sk(sk)->mapped);
3370 static const struct vm_operations_struct packet_mmap_ops = {
3371 .open = packet_mm_open,
3372 .close = packet_mm_close,
3375 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3380 for (i = 0; i < len; i++) {
3381 if (likely(pg_vec[i].buffer)) {
3382 if (is_vmalloc_addr(pg_vec[i].buffer))
3383 vfree(pg_vec[i].buffer);
3385 free_pages((unsigned long)pg_vec[i].buffer,
3387 pg_vec[i].buffer = NULL;
3393 static char *alloc_one_pg_vec_page(unsigned long order)
3395 char *buffer = NULL;
3396 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3397 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
3399 buffer = (char *) __get_free_pages(gfp_flags, order);
3405 * __get_free_pages failed, fall back to vmalloc
3407 buffer = vzalloc((1 << order) * PAGE_SIZE);
3413 * vmalloc failed, lets dig into swap here
3415 gfp_flags &= ~__GFP_NORETRY;
3416 buffer = (char *)__get_free_pages(gfp_flags, order);
3421 * complete and utter failure
3426 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
3428 unsigned int block_nr = req->tp_block_nr;
3432 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
3433 if (unlikely(!pg_vec))
3436 for (i = 0; i < block_nr; i++) {
3437 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
3438 if (unlikely(!pg_vec[i].buffer))
3439 goto out_free_pgvec;
3446 free_pg_vec(pg_vec, order, block_nr);
3451 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3452 int closing, int tx_ring)
3454 struct pgv *pg_vec = NULL;
3455 struct packet_sock *po = pkt_sk(sk);
3456 int was_running, order = 0;
3457 struct packet_ring_buffer *rb;
3458 struct sk_buff_head *rb_queue;
3461 /* Added to avoid minimal code churn */
3462 struct tpacket_req *req = &req_u->req;
3464 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3465 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3466 WARN(1, "Tx-ring is not supported.\n");
3470 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3471 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
3475 if (atomic_read(&po->mapped))
3477 if (atomic_read(&rb->pending))
3481 if (req->tp_block_nr) {
3482 /* Sanity tests and some calculations */
3484 if (unlikely(rb->pg_vec))
3487 switch (po->tp_version) {
3489 po->tp_hdrlen = TPACKET_HDRLEN;
3492 po->tp_hdrlen = TPACKET2_HDRLEN;
3495 po->tp_hdrlen = TPACKET3_HDRLEN;
3500 if (unlikely((int)req->tp_block_size <= 0))
3502 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
3504 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
3507 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
3510 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3511 if (unlikely(rb->frames_per_block <= 0))
3513 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3518 order = get_order(req->tp_block_size);
3519 pg_vec = alloc_pg_vec(req, order);
3520 if (unlikely(!pg_vec))
3522 switch (po->tp_version) {
3524 /* Transmit path is not supported. We checked
3525 * it above but just being paranoid
3528 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3537 if (unlikely(req->tp_frame_nr))
3543 /* Detach socket from network */
3544 spin_lock(&po->bind_lock);
3545 was_running = po->running;
3549 __unregister_prot_hook(sk, false);
3551 spin_unlock(&po->bind_lock);
3556 mutex_lock(&po->pg_vec_lock);
3557 if (closing || atomic_read(&po->mapped) == 0) {
3559 spin_lock_bh(&rb_queue->lock);
3560 swap(rb->pg_vec, pg_vec);
3561 rb->frame_max = (req->tp_frame_nr - 1);
3563 rb->frame_size = req->tp_frame_size;
3564 spin_unlock_bh(&rb_queue->lock);
3566 swap(rb->pg_vec_order, order);
3567 swap(rb->pg_vec_len, req->tp_block_nr);
3569 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
3570 po->prot_hook.func = (po->rx_ring.pg_vec) ?
3571 tpacket_rcv : packet_rcv;
3572 skb_queue_purge(rb_queue);
3573 if (atomic_read(&po->mapped))
3574 pr_err("packet_mmap: vma is busy: %d\n",
3575 atomic_read(&po->mapped));
3577 mutex_unlock(&po->pg_vec_lock);
3579 spin_lock(&po->bind_lock);
3582 register_prot_hook(sk);
3584 spin_unlock(&po->bind_lock);
3585 if (closing && (po->tp_version > TPACKET_V2)) {
3586 /* Because we don't support block-based V3 on tx-ring */
3588 prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3593 free_pg_vec(pg_vec, order, req->tp_block_nr);
3598 static int packet_mmap(struct file *file, struct socket *sock,
3599 struct vm_area_struct *vma)
3601 struct sock *sk = sock->sk;
3602 struct packet_sock *po = pkt_sk(sk);
3603 unsigned long size, expected_size;
3604 struct packet_ring_buffer *rb;
3605 unsigned long start;
3612 mutex_lock(&po->pg_vec_lock);
3615 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3617 expected_size += rb->pg_vec_len
3623 if (expected_size == 0)
3626 size = vma->vm_end - vma->vm_start;
3627 if (size != expected_size)
3630 start = vma->vm_start;
3631 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3632 if (rb->pg_vec == NULL)
3635 for (i = 0; i < rb->pg_vec_len; i++) {
3637 void *kaddr = rb->pg_vec[i].buffer;
3640 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
3641 page = pgv_to_page(kaddr);
3642 err = vm_insert_page(vma, start, page);
3651 atomic_inc(&po->mapped);
3652 vma->vm_ops = &packet_mmap_ops;
3656 mutex_unlock(&po->pg_vec_lock);
3660 static const struct proto_ops packet_ops_spkt = {
3661 .family = PF_PACKET,
3662 .owner = THIS_MODULE,
3663 .release = packet_release,
3664 .bind = packet_bind_spkt,
3665 .connect = sock_no_connect,
3666 .socketpair = sock_no_socketpair,
3667 .accept = sock_no_accept,
3668 .getname = packet_getname_spkt,
3669 .poll = datagram_poll,
3670 .ioctl = packet_ioctl,
3671 .listen = sock_no_listen,
3672 .shutdown = sock_no_shutdown,
3673 .setsockopt = sock_no_setsockopt,
3674 .getsockopt = sock_no_getsockopt,
3675 .sendmsg = packet_sendmsg_spkt,
3676 .recvmsg = packet_recvmsg,
3677 .mmap = sock_no_mmap,
3678 .sendpage = sock_no_sendpage,
3681 static const struct proto_ops packet_ops = {
3682 .family = PF_PACKET,
3683 .owner = THIS_MODULE,
3684 .release = packet_release,
3685 .bind = packet_bind,
3686 .connect = sock_no_connect,
3687 .socketpair = sock_no_socketpair,
3688 .accept = sock_no_accept,
3689 .getname = packet_getname,
3690 .poll = packet_poll,
3691 .ioctl = packet_ioctl,
3692 .listen = sock_no_listen,
3693 .shutdown = sock_no_shutdown,
3694 .setsockopt = packet_setsockopt,
3695 .getsockopt = packet_getsockopt,
3696 .sendmsg = packet_sendmsg,
3697 .recvmsg = packet_recvmsg,
3698 .mmap = packet_mmap,
3699 .sendpage = sock_no_sendpage,
3702 static const struct net_proto_family packet_family_ops = {
3703 .family = PF_PACKET,
3704 .create = packet_create,
3705 .owner = THIS_MODULE,
3708 static struct notifier_block packet_netdev_notifier = {
3709 .notifier_call = packet_notifier,
3712 #ifdef CONFIG_PROC_FS
3714 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
3717 struct net *net = seq_file_net(seq);
3720 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
3723 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3725 struct net *net = seq_file_net(seq);
3726 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
3729 static void packet_seq_stop(struct seq_file *seq, void *v)
3735 static int packet_seq_show(struct seq_file *seq, void *v)
3737 if (v == SEQ_START_TOKEN)
3738 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
3740 struct sock *s = sk_entry(v);
3741 const struct packet_sock *po = pkt_sk(s);
3744 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
3746 atomic_read(&s->sk_refcnt),
3751 atomic_read(&s->sk_rmem_alloc),
3752 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
3759 static const struct seq_operations packet_seq_ops = {
3760 .start = packet_seq_start,
3761 .next = packet_seq_next,
3762 .stop = packet_seq_stop,
3763 .show = packet_seq_show,
3766 static int packet_seq_open(struct inode *inode, struct file *file)
3768 return seq_open_net(inode, file, &packet_seq_ops,
3769 sizeof(struct seq_net_private));
3772 static const struct file_operations packet_seq_fops = {
3773 .owner = THIS_MODULE,
3774 .open = packet_seq_open,
3776 .llseek = seq_lseek,
3777 .release = seq_release_net,
3782 static int __net_init packet_net_init(struct net *net)
3784 mutex_init(&net->packet.sklist_lock);
3785 INIT_HLIST_HEAD(&net->packet.sklist);
3787 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
3793 static void __net_exit packet_net_exit(struct net *net)
3795 proc_net_remove(net, "packet");
3798 static struct pernet_operations packet_net_ops = {
3799 .init = packet_net_init,
3800 .exit = packet_net_exit,
3804 static void __exit packet_exit(void)
3806 unregister_netdevice_notifier(&packet_netdev_notifier);
3807 unregister_pernet_subsys(&packet_net_ops);
3808 sock_unregister(PF_PACKET);
3809 proto_unregister(&packet_proto);
3812 static int __init packet_init(void)
3814 int rc = proto_register(&packet_proto, 0);
3819 sock_register(&packet_family_ops);
3820 register_pernet_subsys(&packet_net_ops);
3821 register_netdevice_notifier(&packet_netdev_notifier);
3826 module_init(packet_init);
3827 module_exit(packet_exit);
3828 MODULE_LICENSE("GPL");
3829 MODULE_ALIAS_NETPROTO(PF_PACKET);