1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * PACKET - implements raw packet sockets.
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Alan Cox, <gw4pts@gw4pts.ampr.org>
14 * Alan Cox : verify_area() now used correctly
15 * Alan Cox : new skbuff lists, look ma no backlogs!
16 * Alan Cox : tidied skbuff lists.
17 * Alan Cox : Now uses generic datagram routines I
18 * added. Also fixed the peek/read crash
19 * from all old Linux datagram code.
20 * Alan Cox : Uses the improved datagram code.
21 * Alan Cox : Added NULL's for socket options.
22 * Alan Cox : Re-commented the code.
23 * Alan Cox : Use new kernel side addressing
24 * Rob Janssen : Correct MTU usage.
25 * Dave Platt : Counter leaks caused by incorrect
26 * interrupt locking and some slightly
27 * dubious gcc output. Can you read
28 * compiler: it said _VOLATILE_
29 * Richard Kooijman : Timestamp fixes.
30 * Alan Cox : New buffers. Use sk->mac.raw.
31 * Alan Cox : sendmsg/recvmsg support.
32 * Alan Cox : Protocol setting support
33 * Alexey Kuznetsov : Untied from IPv4 stack.
34 * Cyrus Durgin : Fixed kerneld for kmod.
35 * Michal Ostrowski : Module initialization cleanup.
36 * Ulises Alonso : Frame number limit removal and
37 * packet_set_ring memory leak.
38 * Eric Biederman : Allow for > 8 byte hardware addresses.
39 * The convention is that longer addresses
40 * will simply extend the hardware address
41 * byte arrays at the end of sockaddr_ll
43 * Johann Baudy : Added TX RING.
44 * Chetan Loke : Implemented TPACKET_V3 block abstraction
46 * Copyright (C) 2011, <lokec@ccs.neu.edu>
49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
51 #include <linux/ethtool.h>
52 #include <linux/types.h>
54 #include <linux/capability.h>
55 #include <linux/fcntl.h>
56 #include <linux/socket.h>
58 #include <linux/inet.h>
59 #include <linux/netdevice.h>
60 #include <linux/if_packet.h>
61 #include <linux/wireless.h>
62 #include <linux/kernel.h>
63 #include <linux/kmod.h>
64 #include <linux/slab.h>
65 #include <linux/vmalloc.h>
66 #include <net/net_namespace.h>
68 #include <net/protocol.h>
69 #include <linux/skbuff.h>
71 #include <linux/errno.h>
72 #include <linux/timer.h>
73 #include <linux/uaccess.h>
74 #include <asm/ioctls.h>
76 #include <asm/cacheflush.h>
78 #include <linux/proc_fs.h>
79 #include <linux/seq_file.h>
80 #include <linux/poll.h>
81 #include <linux/module.h>
82 #include <linux/init.h>
83 #include <linux/mutex.h>
84 #include <linux/if_vlan.h>
85 #include <linux/virtio_net.h>
86 #include <linux/errqueue.h>
87 #include <linux/net_tstamp.h>
88 #include <linux/percpu.h>
90 #include <net/inet_common.h>
92 #include <linux/bpf.h>
93 #include <net/compat.h>
99 - If the device has no dev->header_ops->create, there is no LL header
100 visible above the device. In this case, its hard_header_len should be 0.
101 The device may prepend its own header internally. In this case, its
102 needed_headroom should be set to the space needed for it to add its
104 For example, a WiFi driver pretending to be an Ethernet driver should
105 set its hard_header_len to be the Ethernet header length, and set its
106 needed_headroom to be (the real WiFi header length - the fake Ethernet
108 - packet socket receives packets with pulled ll header,
109 so that SOCK_RAW should push it back.
114 Incoming, dev_has_header(dev) == true
115 mac_header -> ll header
118 Outgoing, dev_has_header(dev) == true
119 mac_header -> ll header
122 Incoming, dev_has_header(dev) == false
124 However drivers often make it point to the ll header.
125 This is incorrect because the ll header should be invisible to us.
128 Outgoing, dev_has_header(dev) == false
129 mac_header -> data. ll header is invisible to us.
133 If dev_has_header(dev) == false we are unable to restore the ll header,
134 because it is invisible to us.
140 dev_has_header(dev) == true
141 mac_header -> ll header
144 dev_has_header(dev) == false (ll header is invisible to us)
148 We should set network_header on output to the correct position,
149 packet classifier depends on it.
152 /* Private packet socket structures. */
154 /* identical to struct packet_mreq except it has
155 * a longer address field.
157 struct packet_mreq_max {
159 unsigned short mr_type;
160 unsigned short mr_alen;
161 unsigned char mr_address[MAX_ADDR_LEN];
165 struct tpacket_hdr *h1;
166 struct tpacket2_hdr *h2;
167 struct tpacket3_hdr *h3;
171 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
172 int closing, int tx_ring);
174 #define V3_ALIGNMENT (8)
176 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
178 #define BLK_PLUS_PRIV(sz_of_priv) \
179 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
181 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
182 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
183 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
184 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
185 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
186 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
189 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
190 struct packet_type *pt, struct net_device *orig_dev);
192 static void *packet_previous_frame(struct packet_sock *po,
193 struct packet_ring_buffer *rb,
195 static void packet_increment_head(struct packet_ring_buffer *buff);
196 static int prb_curr_blk_in_use(struct tpacket_block_desc *);
197 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
198 struct packet_sock *);
199 static void prb_retire_current_block(struct tpacket_kbdq_core *,
200 struct packet_sock *, unsigned int status);
201 static int prb_queue_frozen(struct tpacket_kbdq_core *);
202 static void prb_open_block(struct tpacket_kbdq_core *,
203 struct tpacket_block_desc *);
204 static void prb_retire_rx_blk_timer_expired(struct timer_list *);
205 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
206 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
207 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
208 struct tpacket3_hdr *);
209 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
210 struct tpacket3_hdr *);
211 static void packet_flush_mclist(struct sock *sk);
212 static u16 packet_pick_tx_queue(struct sk_buff *skb);
214 struct packet_skb_cb {
216 struct sockaddr_pkt pkt;
218 /* Trick: alias skb original length with
219 * ll.sll_family and ll.protocol in order
222 unsigned int origlen;
223 struct sockaddr_ll ll;
228 #define vio_le() virtio_legacy_is_little_endian()
230 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
232 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
233 #define GET_PBLOCK_DESC(x, bid) \
234 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
235 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
236 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
237 #define GET_NEXT_PRB_BLK_NUM(x) \
238 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
239 ((x)->kactive_blk_num+1) : 0)
241 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
242 static void __fanout_link(struct sock *sk, struct packet_sock *po);
244 static int packet_direct_xmit(struct sk_buff *skb)
246 return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
249 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
251 struct net_device *dev;
254 dev = rcu_dereference(po->cached_dev);
261 static void packet_cached_dev_assign(struct packet_sock *po,
262 struct net_device *dev)
264 rcu_assign_pointer(po->cached_dev, dev);
267 static void packet_cached_dev_reset(struct packet_sock *po)
269 RCU_INIT_POINTER(po->cached_dev, NULL);
272 static bool packet_use_direct_xmit(const struct packet_sock *po)
274 return po->xmit == packet_direct_xmit;
277 static u16 packet_pick_tx_queue(struct sk_buff *skb)
279 struct net_device *dev = skb->dev;
280 const struct net_device_ops *ops = dev->netdev_ops;
281 int cpu = raw_smp_processor_id();
285 skb->sender_cpu = cpu + 1;
287 skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
288 if (ops->ndo_select_queue) {
289 queue_index = ops->ndo_select_queue(dev, skb, NULL);
290 queue_index = netdev_cap_txqueue(dev, queue_index);
292 queue_index = netdev_pick_tx(dev, skb, NULL);
298 /* __register_prot_hook must be invoked through register_prot_hook
299 * or from a context in which asynchronous accesses to the packet
300 * socket is not possible (packet_create()).
302 static void __register_prot_hook(struct sock *sk)
304 struct packet_sock *po = pkt_sk(sk);
308 __fanout_link(sk, po);
310 dev_add_pack(&po->prot_hook);
317 static void register_prot_hook(struct sock *sk)
319 lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
320 __register_prot_hook(sk);
323 /* If the sync parameter is true, we will temporarily drop
324 * the po->bind_lock and do a synchronize_net to make sure no
325 * asynchronous packet processing paths still refer to the elements
326 * of po->prot_hook. If the sync parameter is false, it is the
327 * callers responsibility to take care of this.
329 static void __unregister_prot_hook(struct sock *sk, bool sync)
331 struct packet_sock *po = pkt_sk(sk);
333 lockdep_assert_held_once(&po->bind_lock);
338 __fanout_unlink(sk, po);
340 __dev_remove_pack(&po->prot_hook);
345 spin_unlock(&po->bind_lock);
347 spin_lock(&po->bind_lock);
351 static void unregister_prot_hook(struct sock *sk, bool sync)
353 struct packet_sock *po = pkt_sk(sk);
356 __unregister_prot_hook(sk, sync);
359 static inline struct page * __pure pgv_to_page(void *addr)
361 if (is_vmalloc_addr(addr))
362 return vmalloc_to_page(addr);
363 return virt_to_page(addr);
366 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
368 union tpacket_uhdr h;
371 switch (po->tp_version) {
373 h.h1->tp_status = status;
374 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
377 h.h2->tp_status = status;
378 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
381 h.h3->tp_status = status;
382 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
385 WARN(1, "TPACKET version not supported.\n");
392 static int __packet_get_status(const struct packet_sock *po, void *frame)
394 union tpacket_uhdr h;
399 switch (po->tp_version) {
401 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
402 return h.h1->tp_status;
404 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
405 return h.h2->tp_status;
407 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
408 return h.h3->tp_status;
410 WARN(1, "TPACKET version not supported.\n");
416 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
419 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
422 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
423 ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
424 return TP_STATUS_TS_RAW_HARDWARE;
426 if ((flags & SOF_TIMESTAMPING_SOFTWARE) &&
427 ktime_to_timespec64_cond(skb->tstamp, ts))
428 return TP_STATUS_TS_SOFTWARE;
433 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
436 union tpacket_uhdr h;
437 struct timespec64 ts;
440 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
445 * versions 1 through 3 overflow the timestamps in y2106, since they
446 * all store the seconds in a 32-bit unsigned integer.
447 * If we create a version 4, that should have a 64-bit timestamp,
448 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
451 switch (po->tp_version) {
453 h.h1->tp_sec = ts.tv_sec;
454 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
457 h.h2->tp_sec = ts.tv_sec;
458 h.h2->tp_nsec = ts.tv_nsec;
461 h.h3->tp_sec = ts.tv_sec;
462 h.h3->tp_nsec = ts.tv_nsec;
465 WARN(1, "TPACKET version not supported.\n");
469 /* one flush is safe, as both fields always lie on the same cacheline */
470 flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
476 static void *packet_lookup_frame(const struct packet_sock *po,
477 const struct packet_ring_buffer *rb,
478 unsigned int position,
481 unsigned int pg_vec_pos, frame_offset;
482 union tpacket_uhdr h;
484 pg_vec_pos = position / rb->frames_per_block;
485 frame_offset = position % rb->frames_per_block;
487 h.raw = rb->pg_vec[pg_vec_pos].buffer +
488 (frame_offset * rb->frame_size);
490 if (status != __packet_get_status(po, h.raw))
496 static void *packet_current_frame(struct packet_sock *po,
497 struct packet_ring_buffer *rb,
500 return packet_lookup_frame(po, rb, rb->head, status);
503 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
505 del_timer_sync(&pkc->retire_blk_timer);
508 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
509 struct sk_buff_head *rb_queue)
511 struct tpacket_kbdq_core *pkc;
513 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
515 spin_lock_bh(&rb_queue->lock);
516 pkc->delete_blk_timer = 1;
517 spin_unlock_bh(&rb_queue->lock);
519 prb_del_retire_blk_timer(pkc);
522 static void prb_setup_retire_blk_timer(struct packet_sock *po)
524 struct tpacket_kbdq_core *pkc;
526 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
527 timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
529 pkc->retire_blk_timer.expires = jiffies;
532 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
533 int blk_size_in_bytes)
535 struct net_device *dev;
536 unsigned int mbits, div;
537 struct ethtool_link_ksettings ecmd;
541 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
542 if (unlikely(!dev)) {
544 return DEFAULT_PRB_RETIRE_TOV;
546 err = __ethtool_get_link_ksettings(dev, &ecmd);
549 return DEFAULT_PRB_RETIRE_TOV;
551 /* If the link speed is so slow you don't really
552 * need to worry about perf anyways
554 if (ecmd.base.speed < SPEED_1000 ||
555 ecmd.base.speed == SPEED_UNKNOWN)
556 return DEFAULT_PRB_RETIRE_TOV;
558 div = ecmd.base.speed / 1000;
559 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
569 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
570 union tpacket_req_u *req_u)
572 p1->feature_req_word = req_u->req3.tp_feature_req_word;
575 static void init_prb_bdqc(struct packet_sock *po,
576 struct packet_ring_buffer *rb,
578 union tpacket_req_u *req_u)
580 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
581 struct tpacket_block_desc *pbd;
583 memset(p1, 0x0, sizeof(*p1));
585 p1->knxt_seq_num = 1;
587 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
588 p1->pkblk_start = pg_vec[0].buffer;
589 p1->kblk_size = req_u->req3.tp_block_size;
590 p1->knum_blocks = req_u->req3.tp_block_nr;
591 p1->hdrlen = po->tp_hdrlen;
592 p1->version = po->tp_version;
593 p1->last_kactive_blk_num = 0;
594 po->stats.stats3.tp_freeze_q_cnt = 0;
595 if (req_u->req3.tp_retire_blk_tov)
596 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
598 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
599 req_u->req3.tp_block_size);
600 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
601 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
602 rwlock_init(&p1->blk_fill_in_prog_lock);
604 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
605 prb_init_ft_ops(p1, req_u);
606 prb_setup_retire_blk_timer(po);
607 prb_open_block(p1, pbd);
610 /* Do NOT update the last_blk_num first.
611 * Assumes sk_buff_head lock is held.
613 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
615 mod_timer(&pkc->retire_blk_timer,
616 jiffies + pkc->tov_in_jiffies);
617 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
622 * 1) We refresh the timer only when we open a block.
623 * By doing this we don't waste cycles refreshing the timer
624 * on packet-by-packet basis.
626 * With a 1MB block-size, on a 1Gbps line, it will take
627 * i) ~8 ms to fill a block + ii) memcpy etc.
628 * In this cut we are not accounting for the memcpy time.
630 * So, if the user sets the 'tmo' to 10ms then the timer
631 * will never fire while the block is still getting filled
632 * (which is what we want). However, the user could choose
633 * to close a block early and that's fine.
635 * But when the timer does fire, we check whether or not to refresh it.
636 * Since the tmo granularity is in msecs, it is not too expensive
637 * to refresh the timer, lets say every '8' msecs.
638 * Either the user can set the 'tmo' or we can derive it based on
639 * a) line-speed and b) block-size.
640 * prb_calc_retire_blk_tmo() calculates the tmo.
643 static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
645 struct packet_sock *po =
646 from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
647 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
649 struct tpacket_block_desc *pbd;
651 spin_lock(&po->sk.sk_receive_queue.lock);
653 frozen = prb_queue_frozen(pkc);
654 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
656 if (unlikely(pkc->delete_blk_timer))
659 /* We only need to plug the race when the block is partially filled.
661 * lock(); increment BLOCK_NUM_PKTS; unlock()
662 * copy_bits() is in progress ...
663 * timer fires on other cpu:
664 * we can't retire the current block because copy_bits
668 if (BLOCK_NUM_PKTS(pbd)) {
669 /* Waiting for skb_copy_bits to finish... */
670 write_lock(&pkc->blk_fill_in_prog_lock);
671 write_unlock(&pkc->blk_fill_in_prog_lock);
674 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
676 if (!BLOCK_NUM_PKTS(pbd)) {
677 /* An empty block. Just refresh the timer. */
680 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
681 if (!prb_dispatch_next_block(pkc, po))
686 /* Case 1. Queue was frozen because user-space was
689 if (prb_curr_blk_in_use(pbd)) {
691 * Ok, user-space is still behind.
692 * So just refresh the timer.
696 /* Case 2. queue was frozen,user-space caught up,
697 * now the link went idle && the timer fired.
698 * We don't have a block to close.So we open this
699 * block and restart the timer.
700 * opening a block thaws the queue,restarts timer
701 * Thawing/timer-refresh is a side effect.
703 prb_open_block(pkc, pbd);
710 _prb_refresh_rx_retire_blk_timer(pkc);
713 spin_unlock(&po->sk.sk_receive_queue.lock);
716 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
717 struct tpacket_block_desc *pbd1, __u32 status)
719 /* Flush everything minus the block header */
721 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
726 /* Skip the block header(we know header WILL fit in 4K) */
729 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
730 for (; start < end; start += PAGE_SIZE)
731 flush_dcache_page(pgv_to_page(start));
736 /* Now update the block status. */
738 BLOCK_STATUS(pbd1) = status;
740 /* Flush the block header */
742 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
744 flush_dcache_page(pgv_to_page(start));
754 * 2) Increment active_blk_num
756 * Note:We DONT refresh the timer on purpose.
757 * Because almost always the next block will be opened.
759 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
760 struct tpacket_block_desc *pbd1,
761 struct packet_sock *po, unsigned int stat)
763 __u32 status = TP_STATUS_USER | stat;
765 struct tpacket3_hdr *last_pkt;
766 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
767 struct sock *sk = &po->sk;
769 if (atomic_read(&po->tp_drops))
770 status |= TP_STATUS_LOSING;
772 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
773 last_pkt->tp_next_offset = 0;
775 /* Get the ts of the last pkt */
776 if (BLOCK_NUM_PKTS(pbd1)) {
777 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
778 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
780 /* Ok, we tmo'd - so get the current time.
782 * It shouldn't really happen as we don't close empty
783 * blocks. See prb_retire_rx_blk_timer_expired().
785 struct timespec64 ts;
786 ktime_get_real_ts64(&ts);
787 h1->ts_last_pkt.ts_sec = ts.tv_sec;
788 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
793 /* Flush the block */
794 prb_flush_block(pkc1, pbd1, status);
796 sk->sk_data_ready(sk);
798 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
801 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
803 pkc->reset_pending_on_curr_blk = 0;
807 * Side effect of opening a block:
809 * 1) prb_queue is thawed.
810 * 2) retire_blk_timer is refreshed.
813 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
814 struct tpacket_block_desc *pbd1)
816 struct timespec64 ts;
817 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
821 /* We could have just memset this but we will lose the
822 * flexibility of making the priv area sticky
825 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
826 BLOCK_NUM_PKTS(pbd1) = 0;
827 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
829 ktime_get_real_ts64(&ts);
831 h1->ts_first_pkt.ts_sec = ts.tv_sec;
832 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
834 pkc1->pkblk_start = (char *)pbd1;
835 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
837 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
838 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
840 pbd1->version = pkc1->version;
841 pkc1->prev = pkc1->nxt_offset;
842 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
844 prb_thaw_queue(pkc1);
845 _prb_refresh_rx_retire_blk_timer(pkc1);
851 * Queue freeze logic:
852 * 1) Assume tp_block_nr = 8 blocks.
853 * 2) At time 't0', user opens Rx ring.
854 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
855 * 4) user-space is either sleeping or processing block '0'.
856 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
857 * it will close block-7,loop around and try to fill block '0'.
859 * __packet_lookup_frame_in_block
860 * prb_retire_current_block()
861 * prb_dispatch_next_block()
862 * |->(BLOCK_STATUS == USER) evaluates to true
863 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
864 * 6) Now there are two cases:
865 * 6.1) Link goes idle right after the queue is frozen.
866 * But remember, the last open_block() refreshed the timer.
867 * When this timer expires,it will refresh itself so that we can
868 * re-open block-0 in near future.
869 * 6.2) Link is busy and keeps on receiving packets. This is a simple
870 * case and __packet_lookup_frame_in_block will check if block-0
871 * is free and can now be re-used.
873 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
874 struct packet_sock *po)
876 pkc->reset_pending_on_curr_blk = 1;
877 po->stats.stats3.tp_freeze_q_cnt++;
880 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
883 * If the next block is free then we will dispatch it
884 * and return a good offset.
885 * Else, we will freeze the queue.
886 * So, caller must check the return value.
888 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
889 struct packet_sock *po)
891 struct tpacket_block_desc *pbd;
895 /* 1. Get current block num */
896 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
898 /* 2. If this block is currently in_use then freeze the queue */
899 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
900 prb_freeze_queue(pkc, po);
906 * open this block and return the offset where the first packet
907 * needs to get stored.
909 prb_open_block(pkc, pbd);
910 return (void *)pkc->nxt_offset;
913 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
914 struct packet_sock *po, unsigned int status)
916 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
918 /* retire/close the current block */
919 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
921 * Plug the case where copy_bits() is in progress on
922 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
923 * have space to copy the pkt in the current block and
924 * called prb_retire_current_block()
926 * We don't need to worry about the TMO case because
927 * the timer-handler already handled this case.
929 if (!(status & TP_STATUS_BLK_TMO)) {
930 /* Waiting for skb_copy_bits to finish... */
931 write_lock(&pkc->blk_fill_in_prog_lock);
932 write_unlock(&pkc->blk_fill_in_prog_lock);
934 prb_close_block(pkc, pbd, po, status);
939 static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
941 return TP_STATUS_USER & BLOCK_STATUS(pbd);
944 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
946 return pkc->reset_pending_on_curr_blk;
949 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
950 __releases(&pkc->blk_fill_in_prog_lock)
952 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
954 read_unlock(&pkc->blk_fill_in_prog_lock);
957 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
958 struct tpacket3_hdr *ppd)
960 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
963 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
964 struct tpacket3_hdr *ppd)
966 ppd->hv1.tp_rxhash = 0;
969 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
970 struct tpacket3_hdr *ppd)
972 if (skb_vlan_tag_present(pkc->skb)) {
973 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
974 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
975 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
977 ppd->hv1.tp_vlan_tci = 0;
978 ppd->hv1.tp_vlan_tpid = 0;
979 ppd->tp_status = TP_STATUS_AVAILABLE;
983 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
984 struct tpacket3_hdr *ppd)
986 ppd->hv1.tp_padding = 0;
987 prb_fill_vlan_info(pkc, ppd);
989 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
990 prb_fill_rxhash(pkc, ppd);
992 prb_clear_rxhash(pkc, ppd);
995 static void prb_fill_curr_block(char *curr,
996 struct tpacket_kbdq_core *pkc,
997 struct tpacket_block_desc *pbd,
999 __acquires(&pkc->blk_fill_in_prog_lock)
1001 struct tpacket3_hdr *ppd;
1003 ppd = (struct tpacket3_hdr *)curr;
1004 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1006 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1007 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1008 BLOCK_NUM_PKTS(pbd) += 1;
1009 read_lock(&pkc->blk_fill_in_prog_lock);
1010 prb_run_all_ft_ops(pkc, ppd);
1013 /* Assumes caller has the sk->rx_queue.lock */
1014 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1015 struct sk_buff *skb,
1019 struct tpacket_kbdq_core *pkc;
1020 struct tpacket_block_desc *pbd;
1023 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1024 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1026 /* Queue is frozen when user space is lagging behind */
1027 if (prb_queue_frozen(pkc)) {
1029 * Check if that last block which caused the queue to freeze,
1030 * is still in_use by user-space.
1032 if (prb_curr_blk_in_use(pbd)) {
1033 /* Can't record this packet */
1037 * Ok, the block was released by user-space.
1038 * Now let's open that block.
1039 * opening a block also thaws the queue.
1040 * Thawing is a side effect.
1042 prb_open_block(pkc, pbd);
1047 curr = pkc->nxt_offset;
1049 end = (char *)pbd + pkc->kblk_size;
1051 /* first try the current block */
1052 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1053 prb_fill_curr_block(curr, pkc, pbd, len);
1054 return (void *)curr;
1057 /* Ok, close the current block */
1058 prb_retire_current_block(pkc, po, 0);
1060 /* Now, try to dispatch the next block */
1061 curr = (char *)prb_dispatch_next_block(pkc, po);
1063 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1064 prb_fill_curr_block(curr, pkc, pbd, len);
1065 return (void *)curr;
1069 * No free blocks are available.user_space hasn't caught up yet.
1070 * Queue was just frozen and now this packet will get dropped.
1075 static void *packet_current_rx_frame(struct packet_sock *po,
1076 struct sk_buff *skb,
1077 int status, unsigned int len)
1080 switch (po->tp_version) {
1083 curr = packet_lookup_frame(po, &po->rx_ring,
1084 po->rx_ring.head, status);
1087 return __packet_lookup_frame_in_block(po, skb, len);
1089 WARN(1, "TPACKET version not supported\n");
1095 static void *prb_lookup_block(const struct packet_sock *po,
1096 const struct packet_ring_buffer *rb,
1100 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1101 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1103 if (status != BLOCK_STATUS(pbd))
1108 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1111 if (rb->prb_bdqc.kactive_blk_num)
1112 prev = rb->prb_bdqc.kactive_blk_num-1;
1114 prev = rb->prb_bdqc.knum_blocks-1;
1118 /* Assumes caller has held the rx_queue.lock */
1119 static void *__prb_previous_block(struct packet_sock *po,
1120 struct packet_ring_buffer *rb,
1123 unsigned int previous = prb_previous_blk_num(rb);
1124 return prb_lookup_block(po, rb, previous, status);
1127 static void *packet_previous_rx_frame(struct packet_sock *po,
1128 struct packet_ring_buffer *rb,
1131 if (po->tp_version <= TPACKET_V2)
1132 return packet_previous_frame(po, rb, status);
1134 return __prb_previous_block(po, rb, status);
1137 static void packet_increment_rx_head(struct packet_sock *po,
1138 struct packet_ring_buffer *rb)
1140 switch (po->tp_version) {
1143 return packet_increment_head(rb);
1146 WARN(1, "TPACKET version not supported.\n");
1152 static void *packet_previous_frame(struct packet_sock *po,
1153 struct packet_ring_buffer *rb,
1156 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1157 return packet_lookup_frame(po, rb, previous, status);
1160 static void packet_increment_head(struct packet_ring_buffer *buff)
1162 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1165 static void packet_inc_pending(struct packet_ring_buffer *rb)
1167 this_cpu_inc(*rb->pending_refcnt);
1170 static void packet_dec_pending(struct packet_ring_buffer *rb)
1172 this_cpu_dec(*rb->pending_refcnt);
1175 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1177 unsigned int refcnt = 0;
1180 /* We don't use pending refcount in rx_ring. */
1181 if (rb->pending_refcnt == NULL)
1184 for_each_possible_cpu(cpu)
1185 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1190 static int packet_alloc_pending(struct packet_sock *po)
1192 po->rx_ring.pending_refcnt = NULL;
1194 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1195 if (unlikely(po->tx_ring.pending_refcnt == NULL))
1201 static void packet_free_pending(struct packet_sock *po)
1203 free_percpu(po->tx_ring.pending_refcnt);
1206 #define ROOM_POW_OFF 2
1207 #define ROOM_NONE 0x0
1208 #define ROOM_LOW 0x1
1209 #define ROOM_NORMAL 0x2
1211 static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1215 len = READ_ONCE(po->rx_ring.frame_max) + 1;
1216 idx = READ_ONCE(po->rx_ring.head);
1218 idx += len >> pow_off;
1221 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1224 static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1228 len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1229 idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1231 idx += len >> pow_off;
1234 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1237 static int __packet_rcv_has_room(const struct packet_sock *po,
1238 const struct sk_buff *skb)
1240 const struct sock *sk = &po->sk;
1241 int ret = ROOM_NONE;
1243 if (po->prot_hook.func != tpacket_rcv) {
1244 int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1245 int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1246 - (skb ? skb->truesize : 0);
1248 if (avail > (rcvbuf >> ROOM_POW_OFF))
1256 if (po->tp_version == TPACKET_V3) {
1257 if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1259 else if (__tpacket_v3_has_room(po, 0))
1262 if (__tpacket_has_room(po, ROOM_POW_OFF))
1264 else if (__tpacket_has_room(po, 0))
1271 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1275 ret = __packet_rcv_has_room(po, skb);
1276 pressure = ret != ROOM_NORMAL;
1278 if (READ_ONCE(po->pressure) != pressure)
1279 WRITE_ONCE(po->pressure, pressure);
1284 static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1286 if (READ_ONCE(po->pressure) &&
1287 __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1288 WRITE_ONCE(po->pressure, 0);
1291 static void packet_sock_destruct(struct sock *sk)
1293 skb_queue_purge(&sk->sk_error_queue);
1295 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1296 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1298 if (!sock_flag(sk, SOCK_DEAD)) {
1299 pr_err("Attempt to release alive packet socket: %p\n", sk);
1303 sk_refcnt_debug_dec(sk);
1306 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1308 u32 *history = po->rollover->history;
1312 rxhash = skb_get_hash(skb);
1313 for (i = 0; i < ROLLOVER_HLEN; i++)
1314 if (READ_ONCE(history[i]) == rxhash)
1317 victim = prandom_u32() % ROLLOVER_HLEN;
1319 /* Avoid dirtying the cache line if possible */
1320 if (READ_ONCE(history[victim]) != rxhash)
1321 WRITE_ONCE(history[victim], rxhash);
1323 return count > (ROLLOVER_HLEN >> 1);
1326 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1327 struct sk_buff *skb,
1330 return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1333 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1334 struct sk_buff *skb,
1337 unsigned int val = atomic_inc_return(&f->rr_cur);
1342 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1343 struct sk_buff *skb,
1346 return smp_processor_id() % num;
1349 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1350 struct sk_buff *skb,
1353 return prandom_u32_max(num);
1356 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1357 struct sk_buff *skb,
1358 unsigned int idx, bool try_self,
1361 struct packet_sock *po, *po_next, *po_skip = NULL;
1362 unsigned int i, j, room = ROOM_NONE;
1364 po = pkt_sk(rcu_dereference(f->arr[idx]));
1367 room = packet_rcv_has_room(po, skb);
1368 if (room == ROOM_NORMAL ||
1369 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1374 i = j = min_t(int, po->rollover->sock, num - 1);
1376 po_next = pkt_sk(rcu_dereference(f->arr[i]));
1377 if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
1378 packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1380 po->rollover->sock = i;
1381 atomic_long_inc(&po->rollover->num);
1382 if (room == ROOM_LOW)
1383 atomic_long_inc(&po->rollover->num_huge);
1391 atomic_long_inc(&po->rollover->num_failed);
1395 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1396 struct sk_buff *skb,
1399 return skb_get_queue_mapping(skb) % num;
1402 static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1403 struct sk_buff *skb,
1406 struct bpf_prog *prog;
1407 unsigned int ret = 0;
1410 prog = rcu_dereference(f->bpf_prog);
1412 ret = bpf_prog_run_clear_cb(prog, skb) % num;
1418 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1420 return f->flags & (flag >> 8);
1423 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1424 struct packet_type *pt, struct net_device *orig_dev)
1426 struct packet_fanout *f = pt->af_packet_priv;
1427 unsigned int num = READ_ONCE(f->num_members);
1428 struct net *net = read_pnet(&f->net);
1429 struct packet_sock *po;
1432 if (!net_eq(dev_net(dev), net) || !num) {
1437 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1438 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1443 case PACKET_FANOUT_HASH:
1445 idx = fanout_demux_hash(f, skb, num);
1447 case PACKET_FANOUT_LB:
1448 idx = fanout_demux_lb(f, skb, num);
1450 case PACKET_FANOUT_CPU:
1451 idx = fanout_demux_cpu(f, skb, num);
1453 case PACKET_FANOUT_RND:
1454 idx = fanout_demux_rnd(f, skb, num);
1456 case PACKET_FANOUT_QM:
1457 idx = fanout_demux_qm(f, skb, num);
1459 case PACKET_FANOUT_ROLLOVER:
1460 idx = fanout_demux_rollover(f, skb, 0, false, num);
1462 case PACKET_FANOUT_CBPF:
1463 case PACKET_FANOUT_EBPF:
1464 idx = fanout_demux_bpf(f, skb, num);
1468 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1469 idx = fanout_demux_rollover(f, skb, idx, true, num);
1471 po = pkt_sk(rcu_dereference(f->arr[idx]));
1472 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1475 DEFINE_MUTEX(fanout_mutex);
1476 EXPORT_SYMBOL_GPL(fanout_mutex);
1477 static LIST_HEAD(fanout_list);
1478 static u16 fanout_next_id;
1480 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1482 struct packet_fanout *f = po->fanout;
1484 spin_lock(&f->lock);
1485 rcu_assign_pointer(f->arr[f->num_members], sk);
1488 if (f->num_members == 1)
1489 dev_add_pack(&f->prot_hook);
1490 spin_unlock(&f->lock);
1493 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1495 struct packet_fanout *f = po->fanout;
1498 spin_lock(&f->lock);
1499 for (i = 0; i < f->num_members; i++) {
1500 if (rcu_dereference_protected(f->arr[i],
1501 lockdep_is_held(&f->lock)) == sk)
1504 BUG_ON(i >= f->num_members);
1505 rcu_assign_pointer(f->arr[i],
1506 rcu_dereference_protected(f->arr[f->num_members - 1],
1507 lockdep_is_held(&f->lock)));
1509 if (f->num_members == 0)
1510 __dev_remove_pack(&f->prot_hook);
1511 spin_unlock(&f->lock);
1514 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1516 if (sk->sk_family != PF_PACKET)
1519 return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1522 static void fanout_init_data(struct packet_fanout *f)
1525 case PACKET_FANOUT_LB:
1526 atomic_set(&f->rr_cur, 0);
1528 case PACKET_FANOUT_CBPF:
1529 case PACKET_FANOUT_EBPF:
1530 RCU_INIT_POINTER(f->bpf_prog, NULL);
1535 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1537 struct bpf_prog *old;
1539 spin_lock(&f->lock);
1540 old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1541 rcu_assign_pointer(f->bpf_prog, new);
1542 spin_unlock(&f->lock);
1546 bpf_prog_destroy(old);
1550 static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
1553 struct bpf_prog *new;
1554 struct sock_fprog fprog;
1557 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1560 ret = copy_bpf_fprog_from_user(&fprog, data, len);
1564 ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1568 __fanout_set_data_bpf(po->fanout, new);
1572 static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
1575 struct bpf_prog *new;
1578 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1580 if (len != sizeof(fd))
1582 if (copy_from_sockptr(&fd, data, len))
1585 new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1587 return PTR_ERR(new);
1589 __fanout_set_data_bpf(po->fanout, new);
1593 static int fanout_set_data(struct packet_sock *po, sockptr_t data,
1596 switch (po->fanout->type) {
1597 case PACKET_FANOUT_CBPF:
1598 return fanout_set_data_cbpf(po, data, len);
1599 case PACKET_FANOUT_EBPF:
1600 return fanout_set_data_ebpf(po, data, len);
1606 static void fanout_release_data(struct packet_fanout *f)
1609 case PACKET_FANOUT_CBPF:
1610 case PACKET_FANOUT_EBPF:
1611 __fanout_set_data_bpf(f, NULL);
1615 static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1617 struct packet_fanout *f;
1619 list_for_each_entry(f, &fanout_list, list) {
1620 if (f->id == candidate_id &&
1621 read_pnet(&f->net) == sock_net(sk)) {
1628 static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1630 u16 id = fanout_next_id;
1633 if (__fanout_id_is_free(sk, id)) {
1635 fanout_next_id = id + 1;
1640 } while (id != fanout_next_id);
1645 static int fanout_add(struct sock *sk, struct fanout_args *args)
1647 struct packet_rollover *rollover = NULL;
1648 struct packet_sock *po = pkt_sk(sk);
1649 u16 type_flags = args->type_flags;
1650 struct packet_fanout *f, *match;
1651 u8 type = type_flags & 0xff;
1652 u8 flags = type_flags >> 8;
1657 case PACKET_FANOUT_ROLLOVER:
1658 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1661 case PACKET_FANOUT_HASH:
1662 case PACKET_FANOUT_LB:
1663 case PACKET_FANOUT_CPU:
1664 case PACKET_FANOUT_RND:
1665 case PACKET_FANOUT_QM:
1666 case PACKET_FANOUT_CBPF:
1667 case PACKET_FANOUT_EBPF:
1673 mutex_lock(&fanout_mutex);
1679 if (type == PACKET_FANOUT_ROLLOVER ||
1680 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1682 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1685 atomic_long_set(&rollover->num, 0);
1686 atomic_long_set(&rollover->num_huge, 0);
1687 atomic_long_set(&rollover->num_failed, 0);
1690 if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1695 if (!fanout_find_new_id(sk, &id)) {
1699 /* ephemeral flag for the first socket in the group: drop it */
1700 flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1704 list_for_each_entry(f, &fanout_list, list) {
1706 read_pnet(&f->net) == sock_net(sk)) {
1713 if (match->flags != flags)
1715 if (args->max_num_members &&
1716 args->max_num_members != match->max_num_members)
1719 if (args->max_num_members > PACKET_FANOUT_MAX)
1721 if (!args->max_num_members)
1722 /* legacy PACKET_FANOUT_MAX */
1723 args->max_num_members = 256;
1725 match = kvzalloc(struct_size(match, arr, args->max_num_members),
1729 write_pnet(&match->net, sock_net(sk));
1732 match->flags = flags;
1733 INIT_LIST_HEAD(&match->list);
1734 spin_lock_init(&match->lock);
1735 refcount_set(&match->sk_ref, 0);
1736 fanout_init_data(match);
1737 match->prot_hook.type = po->prot_hook.type;
1738 match->prot_hook.dev = po->prot_hook.dev;
1739 match->prot_hook.func = packet_rcv_fanout;
1740 match->prot_hook.af_packet_priv = match;
1741 match->prot_hook.af_packet_net = read_pnet(&match->net);
1742 match->prot_hook.id_match = match_fanout_group;
1743 match->max_num_members = args->max_num_members;
1744 list_add(&match->list, &fanout_list);
1748 spin_lock(&po->bind_lock);
1750 match->type == type &&
1751 match->prot_hook.type == po->prot_hook.type &&
1752 match->prot_hook.dev == po->prot_hook.dev) {
1754 if (refcount_read(&match->sk_ref) < match->max_num_members) {
1755 __dev_remove_pack(&po->prot_hook);
1757 /* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */
1758 WRITE_ONCE(po->fanout, match);
1760 po->rollover = rollover;
1762 refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1763 __fanout_link(sk, po);
1767 spin_unlock(&po->bind_lock);
1769 if (err && !refcount_read(&match->sk_ref)) {
1770 list_del(&match->list);
1776 mutex_unlock(&fanout_mutex);
1780 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1781 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1782 * It is the responsibility of the caller to call fanout_release_data() and
1783 * free the returned packet_fanout (after synchronize_net())
1785 static struct packet_fanout *fanout_release(struct sock *sk)
1787 struct packet_sock *po = pkt_sk(sk);
1788 struct packet_fanout *f;
1790 mutex_lock(&fanout_mutex);
1795 if (refcount_dec_and_test(&f->sk_ref))
1800 mutex_unlock(&fanout_mutex);
1805 static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1806 struct sk_buff *skb)
1808 /* Earlier code assumed this would be a VLAN pkt, double-check
1809 * this now that we have the actual packet in hand. We can only
1810 * do this check on Ethernet devices.
1812 if (unlikely(dev->type != ARPHRD_ETHER))
1815 skb_reset_mac_header(skb);
1816 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1819 static const struct proto_ops packet_ops;
1821 static const struct proto_ops packet_ops_spkt;
1823 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1824 struct packet_type *pt, struct net_device *orig_dev)
1827 struct sockaddr_pkt *spkt;
1830 * When we registered the protocol we saved the socket in the data
1831 * field for just this event.
1834 sk = pt->af_packet_priv;
1837 * Yank back the headers [hope the device set this
1838 * right or kerboom...]
1840 * Incoming packets have ll header pulled,
1843 * For outgoing ones skb->data == skb_mac_header(skb)
1844 * so that this procedure is noop.
1847 if (skb->pkt_type == PACKET_LOOPBACK)
1850 if (!net_eq(dev_net(dev), sock_net(sk)))
1853 skb = skb_share_check(skb, GFP_ATOMIC);
1857 /* drop any routing info */
1860 /* drop conntrack reference */
1863 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1865 skb_push(skb, skb->data - skb_mac_header(skb));
1868 * The SOCK_PACKET socket receives _all_ frames.
1871 spkt->spkt_family = dev->type;
1872 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1873 spkt->spkt_protocol = skb->protocol;
1876 * Charge the memory to the socket. This is done specifically
1877 * to prevent sockets using all the memory up.
1880 if (sock_queue_rcv_skb(sk, skb) == 0)
1889 static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1893 if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1894 sock->type == SOCK_RAW) {
1895 skb_reset_mac_header(skb);
1896 skb->protocol = dev_parse_header_protocol(skb);
1899 /* Move network header to the right position for VLAN tagged packets */
1900 if (likely(skb->dev->type == ARPHRD_ETHER) &&
1901 eth_type_vlan(skb->protocol) &&
1902 __vlan_get_protocol(skb, skb->protocol, &depth) != 0) {
1903 if (pskb_may_pull(skb, depth))
1904 skb_set_network_header(skb, depth);
1907 skb_probe_transport_header(skb);
1911 * Output a raw packet to a device layer. This bypasses all the other
1912 * protocol layers and you must therefore supply it with a complete frame
1915 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1918 struct sock *sk = sock->sk;
1919 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1920 struct sk_buff *skb = NULL;
1921 struct net_device *dev;
1922 struct sockcm_cookie sockc;
1928 * Get and verify the address.
1932 if (msg->msg_namelen < sizeof(struct sockaddr))
1934 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1935 proto = saddr->spkt_protocol;
1937 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
1940 * Find the device first to size check it
1943 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1946 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1952 if (!(dev->flags & IFF_UP))
1956 * You may not queue a frame bigger than the mtu. This is the lowest level
1957 * raw protocol and you must do your own fragmentation at this level.
1960 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1961 if (!netif_supports_nofcs(dev)) {
1962 err = -EPROTONOSUPPORT;
1965 extra_len = 4; /* We're doing our own CRC */
1969 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1973 size_t reserved = LL_RESERVED_SPACE(dev);
1974 int tlen = dev->needed_tailroom;
1975 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1978 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1981 /* FIXME: Save some space for broken drivers that write a hard
1982 * header at transmission time by themselves. PPP is the notable
1983 * one here. This should really be fixed at the driver level.
1985 skb_reserve(skb, reserved);
1986 skb_reset_network_header(skb);
1988 /* Try to align data part correctly */
1993 skb_reset_network_header(skb);
1995 err = memcpy_from_msg(skb_put(skb, len), msg, len);
2001 if (!dev_validate_header(dev, skb->data, len)) {
2005 if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
2006 !packet_extra_vlan_len_allowed(dev, skb)) {
2011 sockcm_init(&sockc, sk);
2012 if (msg->msg_controllen) {
2013 err = sock_cmsg_send(sk, msg, &sockc);
2018 skb->protocol = proto;
2020 skb->priority = sk->sk_priority;
2021 skb->mark = sk->sk_mark;
2022 skb->tstamp = sockc.transmit_time;
2024 skb_setup_tx_timestamp(skb, sockc.tsflags);
2026 if (unlikely(extra_len == 4))
2029 packet_parse_headers(skb, sock);
2031 dev_queue_xmit(skb);
2042 static unsigned int run_filter(struct sk_buff *skb,
2043 const struct sock *sk,
2046 struct sk_filter *filter;
2049 filter = rcu_dereference(sk->sk_filter);
2051 res = bpf_prog_run_clear_cb(filter->prog, skb);
2057 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2060 struct virtio_net_hdr vnet_hdr;
2062 if (*len < sizeof(vnet_hdr))
2064 *len -= sizeof(vnet_hdr);
2066 if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
2069 return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2073 * This function makes lazy skb cloning in hope that most of packets
2074 * are discarded by BPF.
2076 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2077 * and skb->cb are mangled. It works because (and until) packets
2078 * falling here are owned by current CPU. Output packets are cloned
2079 * by dev_queue_xmit_nit(), input packets are processed by net_bh
2080 * sequentially, so that if we return skb to original state on exit,
2081 * we will not harm anyone.
2084 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2085 struct packet_type *pt, struct net_device *orig_dev)
2088 struct sockaddr_ll *sll;
2089 struct packet_sock *po;
2090 u8 *skb_head = skb->data;
2091 int skb_len = skb->len;
2092 unsigned int snaplen, res;
2093 bool is_drop_n_account = false;
2095 if (skb->pkt_type == PACKET_LOOPBACK)
2098 sk = pt->af_packet_priv;
2101 if (!net_eq(dev_net(dev), sock_net(sk)))
2106 if (dev_has_header(dev)) {
2107 /* The device has an explicit notion of ll header,
2108 * exported to higher levels.
2110 * Otherwise, the device hides details of its frame
2111 * structure, so that corresponding packet head is
2112 * never delivered to user.
2114 if (sk->sk_type != SOCK_DGRAM)
2115 skb_push(skb, skb->data - skb_mac_header(skb));
2116 else if (skb->pkt_type == PACKET_OUTGOING) {
2117 /* Special case: outgoing packets have ll header at head */
2118 skb_pull(skb, skb_network_offset(skb));
2124 res = run_filter(skb, sk, snaplen);
2126 goto drop_n_restore;
2130 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2133 if (skb_shared(skb)) {
2134 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2138 if (skb_head != skb->data) {
2139 skb->data = skb_head;
2146 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2148 sll = &PACKET_SKB_CB(skb)->sa.ll;
2149 sll->sll_hatype = dev->type;
2150 sll->sll_pkttype = skb->pkt_type;
2151 if (unlikely(po->origdev))
2152 sll->sll_ifindex = orig_dev->ifindex;
2154 sll->sll_ifindex = dev->ifindex;
2156 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2158 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2159 * Use their space for storing the original skb length.
2161 PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2163 if (pskb_trim(skb, snaplen))
2166 skb_set_owner_r(skb, sk);
2170 /* drop conntrack reference */
2173 spin_lock(&sk->sk_receive_queue.lock);
2174 po->stats.stats1.tp_packets++;
2175 sock_skb_set_dropcount(sk, skb);
2176 __skb_queue_tail(&sk->sk_receive_queue, skb);
2177 spin_unlock(&sk->sk_receive_queue.lock);
2178 sk->sk_data_ready(sk);
2182 is_drop_n_account = true;
2183 atomic_inc(&po->tp_drops);
2184 atomic_inc(&sk->sk_drops);
2187 if (skb_head != skb->data && skb_shared(skb)) {
2188 skb->data = skb_head;
2192 if (!is_drop_n_account)
2199 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2200 struct packet_type *pt, struct net_device *orig_dev)
2203 struct packet_sock *po;
2204 struct sockaddr_ll *sll;
2205 union tpacket_uhdr h;
2206 u8 *skb_head = skb->data;
2207 int skb_len = skb->len;
2208 unsigned int snaplen, res;
2209 unsigned long status = TP_STATUS_USER;
2210 unsigned short macoff, hdrlen;
2211 unsigned int netoff;
2212 struct sk_buff *copy_skb = NULL;
2213 struct timespec64 ts;
2215 bool is_drop_n_account = false;
2216 unsigned int slot_id = 0;
2217 bool do_vnet = false;
2219 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2220 * We may add members to them until current aligned size without forcing
2221 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2223 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2224 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2226 if (skb->pkt_type == PACKET_LOOPBACK)
2229 sk = pt->af_packet_priv;
2232 if (!net_eq(dev_net(dev), sock_net(sk)))
2235 if (dev_has_header(dev)) {
2236 if (sk->sk_type != SOCK_DGRAM)
2237 skb_push(skb, skb->data - skb_mac_header(skb));
2238 else if (skb->pkt_type == PACKET_OUTGOING) {
2239 /* Special case: outgoing packets have ll header at head */
2240 skb_pull(skb, skb_network_offset(skb));
2246 res = run_filter(skb, sk, snaplen);
2248 goto drop_n_restore;
2250 /* If we are flooded, just give up */
2251 if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2252 atomic_inc(&po->tp_drops);
2253 goto drop_n_restore;
2256 if (skb->ip_summed == CHECKSUM_PARTIAL)
2257 status |= TP_STATUS_CSUMNOTREADY;
2258 else if (skb->pkt_type != PACKET_OUTGOING &&
2259 skb_csum_unnecessary(skb))
2260 status |= TP_STATUS_CSUM_VALID;
2265 if (sk->sk_type == SOCK_DGRAM) {
2266 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2269 unsigned int maclen = skb_network_offset(skb);
2270 netoff = TPACKET_ALIGN(po->tp_hdrlen +
2271 (maclen < 16 ? 16 : maclen)) +
2273 if (po->has_vnet_hdr) {
2274 netoff += sizeof(struct virtio_net_hdr);
2277 macoff = netoff - maclen;
2279 if (netoff > USHRT_MAX) {
2280 atomic_inc(&po->tp_drops);
2281 goto drop_n_restore;
2283 if (po->tp_version <= TPACKET_V2) {
2284 if (macoff + snaplen > po->rx_ring.frame_size) {
2285 if (po->copy_thresh &&
2286 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2287 if (skb_shared(skb)) {
2288 copy_skb = skb_clone(skb, GFP_ATOMIC);
2290 copy_skb = skb_get(skb);
2291 skb_head = skb->data;
2294 memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0,
2295 sizeof(PACKET_SKB_CB(copy_skb)->sa.ll));
2296 skb_set_owner_r(copy_skb, sk);
2299 snaplen = po->rx_ring.frame_size - macoff;
2300 if ((int)snaplen < 0) {
2305 } else if (unlikely(macoff + snaplen >
2306 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2309 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2310 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2311 snaplen, nval, macoff);
2313 if (unlikely((int)snaplen < 0)) {
2315 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2319 spin_lock(&sk->sk_receive_queue.lock);
2320 h.raw = packet_current_rx_frame(po, skb,
2321 TP_STATUS_KERNEL, (macoff+snaplen));
2323 goto drop_n_account;
2325 if (po->tp_version <= TPACKET_V2) {
2326 slot_id = po->rx_ring.head;
2327 if (test_bit(slot_id, po->rx_ring.rx_owner_map))
2328 goto drop_n_account;
2329 __set_bit(slot_id, po->rx_ring.rx_owner_map);
2333 virtio_net_hdr_from_skb(skb, h.raw + macoff -
2334 sizeof(struct virtio_net_hdr),
2335 vio_le(), true, 0)) {
2336 if (po->tp_version == TPACKET_V3)
2337 prb_clear_blk_fill_status(&po->rx_ring);
2338 goto drop_n_account;
2341 if (po->tp_version <= TPACKET_V2) {
2342 packet_increment_rx_head(po, &po->rx_ring);
2344 * LOSING will be reported till you read the stats,
2345 * because it's COR - Clear On Read.
2346 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2349 if (atomic_read(&po->tp_drops))
2350 status |= TP_STATUS_LOSING;
2353 po->stats.stats1.tp_packets++;
2355 status |= TP_STATUS_COPY;
2356 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2358 spin_unlock(&sk->sk_receive_queue.lock);
2360 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2362 /* Always timestamp; prefer an existing software timestamp taken
2363 * closer to the time of capture.
2365 ts_status = tpacket_get_timestamp(skb, &ts,
2366 po->tp_tstamp | SOF_TIMESTAMPING_SOFTWARE);
2368 ktime_get_real_ts64(&ts);
2370 status |= ts_status;
2372 switch (po->tp_version) {
2374 h.h1->tp_len = skb->len;
2375 h.h1->tp_snaplen = snaplen;
2376 h.h1->tp_mac = macoff;
2377 h.h1->tp_net = netoff;
2378 h.h1->tp_sec = ts.tv_sec;
2379 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2380 hdrlen = sizeof(*h.h1);
2383 h.h2->tp_len = skb->len;
2384 h.h2->tp_snaplen = snaplen;
2385 h.h2->tp_mac = macoff;
2386 h.h2->tp_net = netoff;
2387 h.h2->tp_sec = ts.tv_sec;
2388 h.h2->tp_nsec = ts.tv_nsec;
2389 if (skb_vlan_tag_present(skb)) {
2390 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2391 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2392 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2394 h.h2->tp_vlan_tci = 0;
2395 h.h2->tp_vlan_tpid = 0;
2397 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2398 hdrlen = sizeof(*h.h2);
2401 /* tp_nxt_offset,vlan are already populated above.
2402 * So DONT clear those fields here
2404 h.h3->tp_status |= status;
2405 h.h3->tp_len = skb->len;
2406 h.h3->tp_snaplen = snaplen;
2407 h.h3->tp_mac = macoff;
2408 h.h3->tp_net = netoff;
2409 h.h3->tp_sec = ts.tv_sec;
2410 h.h3->tp_nsec = ts.tv_nsec;
2411 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2412 hdrlen = sizeof(*h.h3);
2418 sll = h.raw + TPACKET_ALIGN(hdrlen);
2419 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2420 sll->sll_family = AF_PACKET;
2421 sll->sll_hatype = dev->type;
2422 sll->sll_protocol = skb->protocol;
2423 sll->sll_pkttype = skb->pkt_type;
2424 if (unlikely(po->origdev))
2425 sll->sll_ifindex = orig_dev->ifindex;
2427 sll->sll_ifindex = dev->ifindex;
2431 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2432 if (po->tp_version <= TPACKET_V2) {
2435 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2438 for (start = h.raw; start < end; start += PAGE_SIZE)
2439 flush_dcache_page(pgv_to_page(start));
2444 if (po->tp_version <= TPACKET_V2) {
2445 spin_lock(&sk->sk_receive_queue.lock);
2446 __packet_set_status(po, h.raw, status);
2447 __clear_bit(slot_id, po->rx_ring.rx_owner_map);
2448 spin_unlock(&sk->sk_receive_queue.lock);
2449 sk->sk_data_ready(sk);
2450 } else if (po->tp_version == TPACKET_V3) {
2451 prb_clear_blk_fill_status(&po->rx_ring);
2455 if (skb_head != skb->data && skb_shared(skb)) {
2456 skb->data = skb_head;
2460 if (!is_drop_n_account)
2467 spin_unlock(&sk->sk_receive_queue.lock);
2468 atomic_inc(&po->tp_drops);
2469 is_drop_n_account = true;
2471 sk->sk_data_ready(sk);
2472 kfree_skb(copy_skb);
2473 goto drop_n_restore;
2476 static void tpacket_destruct_skb(struct sk_buff *skb)
2478 struct packet_sock *po = pkt_sk(skb->sk);
2480 if (likely(po->tx_ring.pg_vec)) {
2484 ph = skb_zcopy_get_nouarg(skb);
2485 packet_dec_pending(&po->tx_ring);
2487 ts = __packet_set_timestamp(po, ph, skb);
2488 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2490 if (!packet_read_pending(&po->tx_ring))
2491 complete(&po->skb_completion);
2497 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2499 if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2500 (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2501 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2502 __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2503 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2504 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2505 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2507 if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2513 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2514 struct virtio_net_hdr *vnet_hdr)
2516 if (*len < sizeof(*vnet_hdr))
2518 *len -= sizeof(*vnet_hdr);
2520 if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2523 return __packet_snd_vnet_parse(vnet_hdr, *len);
2526 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2527 void *frame, struct net_device *dev, void *data, int tp_len,
2528 __be16 proto, unsigned char *addr, int hlen, int copylen,
2529 const struct sockcm_cookie *sockc)
2531 union tpacket_uhdr ph;
2532 int to_write, offset, len, nr_frags, len_max;
2533 struct socket *sock = po->sk.sk_socket;
2539 skb->protocol = proto;
2541 skb->priority = po->sk.sk_priority;
2542 skb->mark = po->sk.sk_mark;
2543 skb->tstamp = sockc->transmit_time;
2544 skb_setup_tx_timestamp(skb, sockc->tsflags);
2545 skb_zcopy_set_nouarg(skb, ph.raw);
2547 skb_reserve(skb, hlen);
2548 skb_reset_network_header(skb);
2552 if (sock->type == SOCK_DGRAM) {
2553 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2555 if (unlikely(err < 0))
2557 } else if (copylen) {
2558 int hdrlen = min_t(int, copylen, tp_len);
2560 skb_push(skb, dev->hard_header_len);
2561 skb_put(skb, copylen - dev->hard_header_len);
2562 err = skb_store_bits(skb, 0, data, hdrlen);
2565 if (!dev_validate_header(dev, skb->data, hdrlen))
2572 offset = offset_in_page(data);
2573 len_max = PAGE_SIZE - offset;
2574 len = ((to_write > len_max) ? len_max : to_write);
2576 skb->data_len = to_write;
2577 skb->len += to_write;
2578 skb->truesize += to_write;
2579 refcount_add(to_write, &po->sk.sk_wmem_alloc);
2581 while (likely(to_write)) {
2582 nr_frags = skb_shinfo(skb)->nr_frags;
2584 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2585 pr_err("Packet exceed the number of skb frags(%lu)\n",
2590 page = pgv_to_page(data);
2592 flush_dcache_page(page);
2594 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2597 len_max = PAGE_SIZE;
2598 len = ((to_write > len_max) ? len_max : to_write);
2601 packet_parse_headers(skb, sock);
2606 static int tpacket_parse_header(struct packet_sock *po, void *frame,
2607 int size_max, void **data)
2609 union tpacket_uhdr ph;
2614 switch (po->tp_version) {
2616 if (ph.h3->tp_next_offset != 0) {
2617 pr_warn_once("variable sized slot not supported");
2620 tp_len = ph.h3->tp_len;
2623 tp_len = ph.h2->tp_len;
2626 tp_len = ph.h1->tp_len;
2629 if (unlikely(tp_len > size_max)) {
2630 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2634 if (unlikely(po->tp_tx_has_off)) {
2635 int off_min, off_max;
2637 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2638 off_max = po->tx_ring.frame_size - tp_len;
2639 if (po->sk.sk_type == SOCK_DGRAM) {
2640 switch (po->tp_version) {
2642 off = ph.h3->tp_net;
2645 off = ph.h2->tp_net;
2648 off = ph.h1->tp_net;
2652 switch (po->tp_version) {
2654 off = ph.h3->tp_mac;
2657 off = ph.h2->tp_mac;
2660 off = ph.h1->tp_mac;
2664 if (unlikely((off < off_min) || (off_max < off)))
2667 off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2670 *data = frame + off;
2674 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2676 struct sk_buff *skb = NULL;
2677 struct net_device *dev;
2678 struct virtio_net_hdr *vnet_hdr = NULL;
2679 struct sockcm_cookie sockc;
2681 int err, reserve = 0;
2683 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2684 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2685 unsigned char *addr = NULL;
2686 int tp_len, size_max;
2689 int status = TP_STATUS_AVAILABLE;
2690 int hlen, tlen, copylen = 0;
2693 mutex_lock(&po->pg_vec_lock);
2695 /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2696 * we need to confirm it under protection of pg_vec_lock.
2698 if (unlikely(!po->tx_ring.pg_vec)) {
2702 if (likely(saddr == NULL)) {
2703 dev = packet_cached_dev_get(po);
2704 proto = READ_ONCE(po->num);
2707 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2709 if (msg->msg_namelen < (saddr->sll_halen
2710 + offsetof(struct sockaddr_ll,
2713 proto = saddr->sll_protocol;
2714 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2715 if (po->sk.sk_socket->type == SOCK_DGRAM) {
2716 if (dev && msg->msg_namelen < dev->addr_len +
2717 offsetof(struct sockaddr_ll, sll_addr))
2719 addr = saddr->sll_addr;
2724 if (unlikely(dev == NULL))
2727 if (unlikely(!(dev->flags & IFF_UP)))
2730 sockcm_init(&sockc, &po->sk);
2731 if (msg->msg_controllen) {
2732 err = sock_cmsg_send(&po->sk, msg, &sockc);
2737 if (po->sk.sk_socket->type == SOCK_RAW)
2738 reserve = dev->hard_header_len;
2739 size_max = po->tx_ring.frame_size
2740 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2742 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2743 size_max = dev->mtu + reserve + VLAN_HLEN;
2745 reinit_completion(&po->skb_completion);
2748 ph = packet_current_frame(po, &po->tx_ring,
2749 TP_STATUS_SEND_REQUEST);
2750 if (unlikely(ph == NULL)) {
2751 if (need_wait && skb) {
2752 timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2753 timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2755 err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2759 /* check for additional frames */
2764 tp_len = tpacket_parse_header(po, ph, size_max, &data);
2768 status = TP_STATUS_SEND_REQUEST;
2769 hlen = LL_RESERVED_SPACE(dev);
2770 tlen = dev->needed_tailroom;
2771 if (po->has_vnet_hdr) {
2773 data += sizeof(*vnet_hdr);
2774 tp_len -= sizeof(*vnet_hdr);
2776 __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2780 copylen = __virtio16_to_cpu(vio_le(),
2783 copylen = max_t(int, copylen, dev->hard_header_len);
2784 skb = sock_alloc_send_skb(&po->sk,
2785 hlen + tlen + sizeof(struct sockaddr_ll) +
2786 (copylen - dev->hard_header_len),
2789 if (unlikely(skb == NULL)) {
2790 /* we assume the socket was initially writeable ... */
2791 if (likely(len_sum > 0))
2795 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2796 addr, hlen, copylen, &sockc);
2797 if (likely(tp_len >= 0) &&
2798 tp_len > dev->mtu + reserve &&
2799 !po->has_vnet_hdr &&
2800 !packet_extra_vlan_len_allowed(dev, skb))
2803 if (unlikely(tp_len < 0)) {
2806 __packet_set_status(po, ph,
2807 TP_STATUS_AVAILABLE);
2808 packet_increment_head(&po->tx_ring);
2812 status = TP_STATUS_WRONG_FORMAT;
2818 if (po->has_vnet_hdr) {
2819 if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2823 virtio_net_hdr_set_proto(skb, vnet_hdr);
2826 skb->destructor = tpacket_destruct_skb;
2827 __packet_set_status(po, ph, TP_STATUS_SENDING);
2828 packet_inc_pending(&po->tx_ring);
2830 status = TP_STATUS_SEND_REQUEST;
2831 err = po->xmit(skb);
2832 if (unlikely(err != 0)) {
2834 err = net_xmit_errno(err);
2835 if (err && __packet_get_status(po, ph) ==
2836 TP_STATUS_AVAILABLE) {
2837 /* skb was destructed already */
2842 * skb was dropped but not destructed yet;
2843 * let's treat it like congestion or err < 0
2847 packet_increment_head(&po->tx_ring);
2849 } while (likely((ph != NULL) ||
2850 /* Note: packet_read_pending() might be slow if we have
2851 * to call it as it's per_cpu variable, but in fast-path
2852 * we already short-circuit the loop with the first
2853 * condition, and luckily don't have to go that path
2856 (need_wait && packet_read_pending(&po->tx_ring))));
2862 __packet_set_status(po, ph, status);
2867 mutex_unlock(&po->pg_vec_lock);
2871 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2872 size_t reserve, size_t len,
2873 size_t linear, int noblock,
2876 struct sk_buff *skb;
2878 /* Under a page? Don't bother with paged skb. */
2879 if (prepad + len < PAGE_SIZE || !linear)
2882 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2887 skb_reserve(skb, reserve);
2888 skb_put(skb, linear);
2889 skb->data_len = len - linear;
2890 skb->len += len - linear;
2895 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2897 struct sock *sk = sock->sk;
2898 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2899 struct sk_buff *skb;
2900 struct net_device *dev;
2902 unsigned char *addr = NULL;
2903 int err, reserve = 0;
2904 struct sockcm_cookie sockc;
2905 struct virtio_net_hdr vnet_hdr = { 0 };
2907 struct packet_sock *po = pkt_sk(sk);
2908 bool has_vnet_hdr = false;
2909 int hlen, tlen, linear;
2913 * Get and verify the address.
2916 if (likely(saddr == NULL)) {
2917 dev = packet_cached_dev_get(po);
2918 proto = READ_ONCE(po->num);
2921 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2923 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2925 proto = saddr->sll_protocol;
2926 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2927 if (sock->type == SOCK_DGRAM) {
2928 if (dev && msg->msg_namelen < dev->addr_len +
2929 offsetof(struct sockaddr_ll, sll_addr))
2931 addr = saddr->sll_addr;
2936 if (unlikely(dev == NULL))
2939 if (unlikely(!(dev->flags & IFF_UP)))
2942 sockcm_init(&sockc, sk);
2943 sockc.mark = sk->sk_mark;
2944 if (msg->msg_controllen) {
2945 err = sock_cmsg_send(sk, msg, &sockc);
2950 if (sock->type == SOCK_RAW)
2951 reserve = dev->hard_header_len;
2952 if (po->has_vnet_hdr) {
2953 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2956 has_vnet_hdr = true;
2959 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2960 if (!netif_supports_nofcs(dev)) {
2961 err = -EPROTONOSUPPORT;
2964 extra_len = 4; /* We're doing our own CRC */
2968 if (!vnet_hdr.gso_type &&
2969 (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2973 hlen = LL_RESERVED_SPACE(dev);
2974 tlen = dev->needed_tailroom;
2975 linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
2976 linear = max(linear, min_t(int, len, dev->hard_header_len));
2977 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
2978 msg->msg_flags & MSG_DONTWAIT, &err);
2982 skb_reset_network_header(skb);
2985 if (sock->type == SOCK_DGRAM) {
2986 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2987 if (unlikely(offset < 0))
2989 } else if (reserve) {
2990 skb_reserve(skb, -reserve);
2991 if (len < reserve + sizeof(struct ipv6hdr) &&
2992 dev->min_header_len != dev->hard_header_len)
2993 skb_reset_network_header(skb);
2996 /* Returns -EFAULT on error */
2997 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
3001 if ((sock->type == SOCK_RAW &&
3002 !dev_validate_header(dev, skb->data, len)) || !skb->len) {
3007 skb_setup_tx_timestamp(skb, sockc.tsflags);
3009 if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
3010 !packet_extra_vlan_len_allowed(dev, skb)) {
3015 skb->protocol = proto;
3017 skb->priority = sk->sk_priority;
3018 skb->mark = sockc.mark;
3019 skb->tstamp = sockc.transmit_time;
3021 if (unlikely(extra_len == 4))
3024 packet_parse_headers(skb, sock);
3027 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
3030 len += sizeof(vnet_hdr);
3031 virtio_net_hdr_set_proto(skb, &vnet_hdr);
3034 err = po->xmit(skb);
3035 if (unlikely(err != 0)) {
3037 err = net_xmit_errno(err);
3054 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
3056 struct sock *sk = sock->sk;
3057 struct packet_sock *po = pkt_sk(sk);
3059 /* Reading tx_ring.pg_vec without holding pg_vec_lock is racy.
3060 * tpacket_snd() will redo the check safely.
3062 if (data_race(po->tx_ring.pg_vec))
3063 return tpacket_snd(po, msg);
3065 return packet_snd(sock, msg, len);
3069 * Close a PACKET socket. This is fairly simple. We immediately go
3070 * to 'closed' state and remove our protocol entry in the device list.
3073 static int packet_release(struct socket *sock)
3075 struct sock *sk = sock->sk;
3076 struct packet_sock *po;
3077 struct packet_fanout *f;
3079 union tpacket_req_u req_u;
3087 mutex_lock(&net->packet.sklist_lock);
3088 sk_del_node_init_rcu(sk);
3089 mutex_unlock(&net->packet.sklist_lock);
3092 sock_prot_inuse_add(net, sk->sk_prot, -1);
3095 spin_lock(&po->bind_lock);
3096 unregister_prot_hook(sk, false);
3097 packet_cached_dev_reset(po);
3099 if (po->prot_hook.dev) {
3100 dev_put(po->prot_hook.dev);
3101 po->prot_hook.dev = NULL;
3103 spin_unlock(&po->bind_lock);
3105 packet_flush_mclist(sk);
3108 if (po->rx_ring.pg_vec) {
3109 memset(&req_u, 0, sizeof(req_u));
3110 packet_set_ring(sk, &req_u, 1, 0);
3113 if (po->tx_ring.pg_vec) {
3114 memset(&req_u, 0, sizeof(req_u));
3115 packet_set_ring(sk, &req_u, 1, 1);
3119 f = fanout_release(sk);
3123 kfree(po->rollover);
3125 fanout_release_data(f);
3129 * Now the socket is dead. No more input will appear.
3136 skb_queue_purge(&sk->sk_receive_queue);
3137 packet_free_pending(po);
3138 sk_refcnt_debug_release(sk);
3145 * Attach a packet hook.
3148 static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3151 struct packet_sock *po = pkt_sk(sk);
3152 struct net_device *dev_curr;
3155 struct net_device *dev = NULL;
3157 bool unlisted = false;
3160 spin_lock(&po->bind_lock);
3169 dev = dev_get_by_name_rcu(sock_net(sk), name);
3174 } else if (ifindex) {
3175 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3184 proto_curr = po->prot_hook.type;
3185 dev_curr = po->prot_hook.dev;
3187 need_rehook = proto_curr != proto || dev_curr != dev;
3192 /* prevents packet_notifier() from calling
3193 * register_prot_hook()
3195 WRITE_ONCE(po->num, 0);
3196 __unregister_prot_hook(sk, true);
3198 dev_curr = po->prot_hook.dev;
3200 unlisted = !dev_get_by_index_rcu(sock_net(sk),
3204 BUG_ON(po->running);
3205 WRITE_ONCE(po->num, proto);
3206 po->prot_hook.type = proto;
3208 if (unlikely(unlisted)) {
3210 po->prot_hook.dev = NULL;
3211 WRITE_ONCE(po->ifindex, -1);
3212 packet_cached_dev_reset(po);
3214 po->prot_hook.dev = dev;
3215 WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
3216 packet_cached_dev_assign(po, dev);
3221 if (proto == 0 || !need_rehook)
3224 if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3225 register_prot_hook(sk);
3227 sk->sk_err = ENETDOWN;
3228 if (!sock_flag(sk, SOCK_DEAD))
3229 sk_error_report(sk);
3234 spin_unlock(&po->bind_lock);
3240 * Bind a packet socket to a device
3243 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3246 struct sock *sk = sock->sk;
3247 char name[sizeof(uaddr->sa_data) + 1];
3253 if (addr_len != sizeof(struct sockaddr))
3255 /* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3258 memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3259 name[sizeof(uaddr->sa_data)] = 0;
3261 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3264 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3266 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3267 struct sock *sk = sock->sk;
3273 if (addr_len < sizeof(struct sockaddr_ll))
3275 if (sll->sll_family != AF_PACKET)
3278 return packet_do_bind(sk, NULL, sll->sll_ifindex,
3279 sll->sll_protocol ? : pkt_sk(sk)->num);
3282 static struct proto packet_proto = {
3284 .owner = THIS_MODULE,
3285 .obj_size = sizeof(struct packet_sock),
3289 * Create a packet of type SOCK_PACKET.
3292 static int packet_create(struct net *net, struct socket *sock, int protocol,
3296 struct packet_sock *po;
3297 __be16 proto = (__force __be16)protocol; /* weird, but documented */
3300 if (!ns_capable(net->user_ns, CAP_NET_RAW))
3302 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3303 sock->type != SOCK_PACKET)
3304 return -ESOCKTNOSUPPORT;
3306 sock->state = SS_UNCONNECTED;
3309 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3313 sock->ops = &packet_ops;
3314 if (sock->type == SOCK_PACKET)
3315 sock->ops = &packet_ops_spkt;
3317 sock_init_data(sock, sk);
3320 init_completion(&po->skb_completion);
3321 sk->sk_family = PF_PACKET;
3323 po->xmit = dev_queue_xmit;
3325 err = packet_alloc_pending(po);
3329 packet_cached_dev_reset(po);
3331 sk->sk_destruct = packet_sock_destruct;
3332 sk_refcnt_debug_inc(sk);
3335 * Attach a protocol block
3338 spin_lock_init(&po->bind_lock);
3339 mutex_init(&po->pg_vec_lock);
3340 po->rollover = NULL;
3341 po->prot_hook.func = packet_rcv;
3343 if (sock->type == SOCK_PACKET)
3344 po->prot_hook.func = packet_rcv_spkt;
3346 po->prot_hook.af_packet_priv = sk;
3347 po->prot_hook.af_packet_net = sock_net(sk);
3350 po->prot_hook.type = proto;
3351 __register_prot_hook(sk);
3354 mutex_lock(&net->packet.sklist_lock);
3355 sk_add_node_tail_rcu(sk, &net->packet.sklist);
3356 mutex_unlock(&net->packet.sklist_lock);
3359 sock_prot_inuse_add(net, &packet_proto, 1);
3370 * Pull a packet from our receive queue and hand it to the user.
3371 * If necessary we block.
3374 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3377 struct sock *sk = sock->sk;
3378 struct sk_buff *skb;
3380 int vnet_hdr_len = 0;
3381 unsigned int origlen = 0;
3384 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3388 /* What error should we return now? EUNATTACH? */
3389 if (pkt_sk(sk)->ifindex < 0)
3393 if (flags & MSG_ERRQUEUE) {
3394 err = sock_recv_errqueue(sk, msg, len,
3395 SOL_PACKET, PACKET_TX_TIMESTAMP);
3400 * Call the generic datagram receiver. This handles all sorts
3401 * of horrible races and re-entrancy so we can forget about it
3402 * in the protocol layers.
3404 * Now it will return ENETDOWN, if device have just gone down,
3405 * but then it will block.
3408 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3411 * An error occurred so return it. Because skb_recv_datagram()
3412 * handles the blocking we don't see and worry about blocking
3419 packet_rcv_try_clear_pressure(pkt_sk(sk));
3421 if (pkt_sk(sk)->has_vnet_hdr) {
3422 err = packet_rcv_vnet(msg, skb, &len);
3425 vnet_hdr_len = sizeof(struct virtio_net_hdr);
3428 /* You lose any data beyond the buffer you gave. If it worries
3429 * a user program they can ask the device for its MTU
3435 msg->msg_flags |= MSG_TRUNC;
3438 err = skb_copy_datagram_msg(skb, 0, msg, copied);
3442 if (sock->type != SOCK_PACKET) {
3443 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3445 /* Original length was stored in sockaddr_ll fields */
3446 origlen = PACKET_SKB_CB(skb)->sa.origlen;
3447 sll->sll_family = AF_PACKET;
3448 sll->sll_protocol = skb->protocol;
3451 sock_recv_ts_and_drops(msg, sk, skb);
3453 if (msg->msg_name) {
3454 const size_t max_len = min(sizeof(skb->cb),
3455 sizeof(struct sockaddr_storage));
3458 /* If the address length field is there to be filled
3459 * in, we fill it in now.
3461 if (sock->type == SOCK_PACKET) {
3462 __sockaddr_check_size(sizeof(struct sockaddr_pkt));
3463 msg->msg_namelen = sizeof(struct sockaddr_pkt);
3464 copy_len = msg->msg_namelen;
3466 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3468 msg->msg_namelen = sll->sll_halen +
3469 offsetof(struct sockaddr_ll, sll_addr);
3470 copy_len = msg->msg_namelen;
3471 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3472 memset(msg->msg_name +
3473 offsetof(struct sockaddr_ll, sll_addr),
3474 0, sizeof(sll->sll_addr));
3475 msg->msg_namelen = sizeof(struct sockaddr_ll);
3478 if (WARN_ON_ONCE(copy_len > max_len)) {
3480 msg->msg_namelen = copy_len;
3482 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
3485 if (pkt_sk(sk)->auxdata) {
3486 struct tpacket_auxdata aux;
3488 aux.tp_status = TP_STATUS_USER;
3489 if (skb->ip_summed == CHECKSUM_PARTIAL)
3490 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3491 else if (skb->pkt_type != PACKET_OUTGOING &&
3492 skb_csum_unnecessary(skb))
3493 aux.tp_status |= TP_STATUS_CSUM_VALID;
3495 aux.tp_len = origlen;
3496 aux.tp_snaplen = skb->len;
3498 aux.tp_net = skb_network_offset(skb);
3499 if (skb_vlan_tag_present(skb)) {
3500 aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3501 aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3502 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3504 aux.tp_vlan_tci = 0;
3505 aux.tp_vlan_tpid = 0;
3507 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3511 * Free or return the buffer as appropriate. Again this
3512 * hides all the races and re-entrancy issues from us.
3514 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3517 skb_free_datagram(sk, skb);
3522 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3525 struct net_device *dev;
3526 struct sock *sk = sock->sk;
3531 uaddr->sa_family = AF_PACKET;
3532 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3534 dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
3536 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3539 return sizeof(*uaddr);
3542 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3545 struct net_device *dev;
3546 struct sock *sk = sock->sk;
3547 struct packet_sock *po = pkt_sk(sk);
3548 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3554 ifindex = READ_ONCE(po->ifindex);
3555 sll->sll_family = AF_PACKET;
3556 sll->sll_ifindex = ifindex;
3557 sll->sll_protocol = READ_ONCE(po->num);
3558 sll->sll_pkttype = 0;
3560 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3562 sll->sll_hatype = dev->type;
3563 sll->sll_halen = dev->addr_len;
3564 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3566 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
3571 return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3574 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3578 case PACKET_MR_MULTICAST:
3579 if (i->alen != dev->addr_len)
3582 return dev_mc_add(dev, i->addr);
3584 return dev_mc_del(dev, i->addr);
3586 case PACKET_MR_PROMISC:
3587 return dev_set_promiscuity(dev, what);
3588 case PACKET_MR_ALLMULTI:
3589 return dev_set_allmulti(dev, what);
3590 case PACKET_MR_UNICAST:
3591 if (i->alen != dev->addr_len)
3594 return dev_uc_add(dev, i->addr);
3596 return dev_uc_del(dev, i->addr);
3604 static void packet_dev_mclist_delete(struct net_device *dev,
3605 struct packet_mclist **mlp)
3607 struct packet_mclist *ml;
3609 while ((ml = *mlp) != NULL) {
3610 if (ml->ifindex == dev->ifindex) {
3611 packet_dev_mc(dev, ml, -1);
3619 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3621 struct packet_sock *po = pkt_sk(sk);
3622 struct packet_mclist *ml, *i;
3623 struct net_device *dev;
3629 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3634 if (mreq->mr_alen > dev->addr_len)
3638 i = kmalloc(sizeof(*i), GFP_KERNEL);
3643 for (ml = po->mclist; ml; ml = ml->next) {
3644 if (ml->ifindex == mreq->mr_ifindex &&
3645 ml->type == mreq->mr_type &&
3646 ml->alen == mreq->mr_alen &&
3647 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3649 /* Free the new element ... */
3655 i->type = mreq->mr_type;
3656 i->ifindex = mreq->mr_ifindex;
3657 i->alen = mreq->mr_alen;
3658 memcpy(i->addr, mreq->mr_address, i->alen);
3659 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3661 i->next = po->mclist;
3663 err = packet_dev_mc(dev, i, 1);
3665 po->mclist = i->next;
3674 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3676 struct packet_mclist *ml, **mlp;
3680 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3681 if (ml->ifindex == mreq->mr_ifindex &&
3682 ml->type == mreq->mr_type &&
3683 ml->alen == mreq->mr_alen &&
3684 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3685 if (--ml->count == 0) {
3686 struct net_device *dev;
3688 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3690 packet_dev_mc(dev, ml, -1);
3700 static void packet_flush_mclist(struct sock *sk)
3702 struct packet_sock *po = pkt_sk(sk);
3703 struct packet_mclist *ml;
3709 while ((ml = po->mclist) != NULL) {
3710 struct net_device *dev;
3712 po->mclist = ml->next;
3713 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3715 packet_dev_mc(dev, ml, -1);
3722 packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
3723 unsigned int optlen)
3725 struct sock *sk = sock->sk;
3726 struct packet_sock *po = pkt_sk(sk);
3729 if (level != SOL_PACKET)
3730 return -ENOPROTOOPT;
3733 case PACKET_ADD_MEMBERSHIP:
3734 case PACKET_DROP_MEMBERSHIP:
3736 struct packet_mreq_max mreq;
3738 memset(&mreq, 0, sizeof(mreq));
3739 if (len < sizeof(struct packet_mreq))
3741 if (len > sizeof(mreq))
3743 if (copy_from_sockptr(&mreq, optval, len))
3745 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3747 if (optname == PACKET_ADD_MEMBERSHIP)
3748 ret = packet_mc_add(sk, &mreq);
3750 ret = packet_mc_drop(sk, &mreq);
3754 case PACKET_RX_RING:
3755 case PACKET_TX_RING:
3757 union tpacket_req_u req_u;
3761 switch (po->tp_version) {
3764 len = sizeof(req_u.req);
3768 len = sizeof(req_u.req3);
3774 if (copy_from_sockptr(&req_u.req, optval, len))
3777 ret = packet_set_ring(sk, &req_u, 0,
3778 optname == PACKET_TX_RING);
3783 case PACKET_COPY_THRESH:
3787 if (optlen != sizeof(val))
3789 if (copy_from_sockptr(&val, optval, sizeof(val)))
3792 pkt_sk(sk)->copy_thresh = val;
3795 case PACKET_VERSION:
3799 if (optlen != sizeof(val))
3801 if (copy_from_sockptr(&val, optval, sizeof(val)))
3812 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3815 po->tp_version = val;
3821 case PACKET_RESERVE:
3825 if (optlen != sizeof(val))
3827 if (copy_from_sockptr(&val, optval, sizeof(val)))
3832 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3835 po->tp_reserve = val;
3845 if (optlen != sizeof(val))
3847 if (copy_from_sockptr(&val, optval, sizeof(val)))
3851 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3854 po->tp_loss = !!val;
3860 case PACKET_AUXDATA:
3864 if (optlen < sizeof(val))
3866 if (copy_from_sockptr(&val, optval, sizeof(val)))
3870 po->auxdata = !!val;
3874 case PACKET_ORIGDEV:
3878 if (optlen < sizeof(val))
3880 if (copy_from_sockptr(&val, optval, sizeof(val)))
3884 po->origdev = !!val;
3888 case PACKET_VNET_HDR:
3892 if (sock->type != SOCK_RAW)
3894 if (optlen < sizeof(val))
3896 if (copy_from_sockptr(&val, optval, sizeof(val)))
3900 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3903 po->has_vnet_hdr = !!val;
3909 case PACKET_TIMESTAMP:
3913 if (optlen != sizeof(val))
3915 if (copy_from_sockptr(&val, optval, sizeof(val)))
3918 po->tp_tstamp = val;
3923 struct fanout_args args = { 0 };
3925 if (optlen != sizeof(int) && optlen != sizeof(args))
3927 if (copy_from_sockptr(&args, optval, optlen))
3930 return fanout_add(sk, &args);
3932 case PACKET_FANOUT_DATA:
3934 /* Paired with the WRITE_ONCE() in fanout_add() */
3935 if (!READ_ONCE(po->fanout))
3938 return fanout_set_data(po, optval, optlen);
3940 case PACKET_IGNORE_OUTGOING:
3944 if (optlen != sizeof(val))
3946 if (copy_from_sockptr(&val, optval, sizeof(val)))
3948 if (val < 0 || val > 1)
3951 po->prot_hook.ignore_outgoing = !!val;
3954 case PACKET_TX_HAS_OFF:
3958 if (optlen != sizeof(val))
3960 if (copy_from_sockptr(&val, optval, sizeof(val)))
3964 if (!po->rx_ring.pg_vec && !po->tx_ring.pg_vec)
3965 po->tp_tx_has_off = !!val;
3970 case PACKET_QDISC_BYPASS:
3974 if (optlen != sizeof(val))
3976 if (copy_from_sockptr(&val, optval, sizeof(val)))
3979 po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3983 return -ENOPROTOOPT;
3987 static int packet_getsockopt(struct socket *sock, int level, int optname,
3988 char __user *optval, int __user *optlen)
3991 int val, lv = sizeof(val);
3992 struct sock *sk = sock->sk;
3993 struct packet_sock *po = pkt_sk(sk);
3995 union tpacket_stats_u st;
3996 struct tpacket_rollover_stats rstats;
3999 if (level != SOL_PACKET)
4000 return -ENOPROTOOPT;
4002 if (get_user(len, optlen))
4009 case PACKET_STATISTICS:
4010 spin_lock_bh(&sk->sk_receive_queue.lock);
4011 memcpy(&st, &po->stats, sizeof(st));
4012 memset(&po->stats, 0, sizeof(po->stats));
4013 spin_unlock_bh(&sk->sk_receive_queue.lock);
4014 drops = atomic_xchg(&po->tp_drops, 0);
4016 if (po->tp_version == TPACKET_V3) {
4017 lv = sizeof(struct tpacket_stats_v3);
4018 st.stats3.tp_drops = drops;
4019 st.stats3.tp_packets += drops;
4022 lv = sizeof(struct tpacket_stats);
4023 st.stats1.tp_drops = drops;
4024 st.stats1.tp_packets += drops;
4029 case PACKET_AUXDATA:
4032 case PACKET_ORIGDEV:
4035 case PACKET_VNET_HDR:
4036 val = po->has_vnet_hdr;
4038 case PACKET_VERSION:
4039 val = po->tp_version;
4042 if (len > sizeof(int))
4044 if (len < sizeof(int))
4046 if (copy_from_user(&val, optval, len))
4050 val = sizeof(struct tpacket_hdr);
4053 val = sizeof(struct tpacket2_hdr);
4056 val = sizeof(struct tpacket3_hdr);
4062 case PACKET_RESERVE:
4063 val = po->tp_reserve;
4068 case PACKET_TIMESTAMP:
4069 val = po->tp_tstamp;
4073 ((u32)po->fanout->id |
4074 ((u32)po->fanout->type << 16) |
4075 ((u32)po->fanout->flags << 24)) :
4078 case PACKET_IGNORE_OUTGOING:
4079 val = po->prot_hook.ignore_outgoing;
4081 case PACKET_ROLLOVER_STATS:
4084 rstats.tp_all = atomic_long_read(&po->rollover->num);
4085 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4086 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4088 lv = sizeof(rstats);
4090 case PACKET_TX_HAS_OFF:
4091 val = po->tp_tx_has_off;
4093 case PACKET_QDISC_BYPASS:
4094 val = packet_use_direct_xmit(po);
4097 return -ENOPROTOOPT;
4102 if (put_user(len, optlen))
4104 if (copy_to_user(optval, data, len))
4109 static int packet_notifier(struct notifier_block *this,
4110 unsigned long msg, void *ptr)
4113 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4114 struct net *net = dev_net(dev);
4117 sk_for_each_rcu(sk, &net->packet.sklist) {
4118 struct packet_sock *po = pkt_sk(sk);
4121 case NETDEV_UNREGISTER:
4123 packet_dev_mclist_delete(dev, &po->mclist);
4127 if (dev->ifindex == po->ifindex) {
4128 spin_lock(&po->bind_lock);
4130 __unregister_prot_hook(sk, false);
4131 sk->sk_err = ENETDOWN;
4132 if (!sock_flag(sk, SOCK_DEAD))
4133 sk_error_report(sk);
4135 if (msg == NETDEV_UNREGISTER) {
4136 packet_cached_dev_reset(po);
4137 WRITE_ONCE(po->ifindex, -1);
4138 dev_put(po->prot_hook.dev);
4139 po->prot_hook.dev = NULL;
4141 spin_unlock(&po->bind_lock);
4145 if (dev->ifindex == po->ifindex) {
4146 spin_lock(&po->bind_lock);
4148 register_prot_hook(sk);
4149 spin_unlock(&po->bind_lock);
4159 static int packet_ioctl(struct socket *sock, unsigned int cmd,
4162 struct sock *sk = sock->sk;
4167 int amount = sk_wmem_alloc_get(sk);
4169 return put_user(amount, (int __user *)arg);
4173 struct sk_buff *skb;
4176 spin_lock_bh(&sk->sk_receive_queue.lock);
4177 skb = skb_peek(&sk->sk_receive_queue);
4180 spin_unlock_bh(&sk->sk_receive_queue.lock);
4181 return put_user(amount, (int __user *)arg);
4191 case SIOCGIFBRDADDR:
4192 case SIOCSIFBRDADDR:
4193 case SIOCGIFNETMASK:
4194 case SIOCSIFNETMASK:
4195 case SIOCGIFDSTADDR:
4196 case SIOCSIFDSTADDR:
4198 return inet_dgram_ops.ioctl(sock, cmd, arg);
4202 return -ENOIOCTLCMD;
4207 static __poll_t packet_poll(struct file *file, struct socket *sock,
4210 struct sock *sk = sock->sk;
4211 struct packet_sock *po = pkt_sk(sk);
4212 __poll_t mask = datagram_poll(file, sock, wait);
4214 spin_lock_bh(&sk->sk_receive_queue.lock);
4215 if (po->rx_ring.pg_vec) {
4216 if (!packet_previous_rx_frame(po, &po->rx_ring,
4218 mask |= EPOLLIN | EPOLLRDNORM;
4220 packet_rcv_try_clear_pressure(po);
4221 spin_unlock_bh(&sk->sk_receive_queue.lock);
4222 spin_lock_bh(&sk->sk_write_queue.lock);
4223 if (po->tx_ring.pg_vec) {
4224 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4225 mask |= EPOLLOUT | EPOLLWRNORM;
4227 spin_unlock_bh(&sk->sk_write_queue.lock);
4232 /* Dirty? Well, I still did not learn better way to account
4236 static void packet_mm_open(struct vm_area_struct *vma)
4238 struct file *file = vma->vm_file;
4239 struct socket *sock = file->private_data;
4240 struct sock *sk = sock->sk;
4243 atomic_inc(&pkt_sk(sk)->mapped);
4246 static void packet_mm_close(struct vm_area_struct *vma)
4248 struct file *file = vma->vm_file;
4249 struct socket *sock = file->private_data;
4250 struct sock *sk = sock->sk;
4253 atomic_dec(&pkt_sk(sk)->mapped);
4256 static const struct vm_operations_struct packet_mmap_ops = {
4257 .open = packet_mm_open,
4258 .close = packet_mm_close,
4261 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4266 for (i = 0; i < len; i++) {
4267 if (likely(pg_vec[i].buffer)) {
4268 if (is_vmalloc_addr(pg_vec[i].buffer))
4269 vfree(pg_vec[i].buffer);
4271 free_pages((unsigned long)pg_vec[i].buffer,
4273 pg_vec[i].buffer = NULL;
4279 static char *alloc_one_pg_vec_page(unsigned long order)
4282 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4283 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4285 buffer = (char *) __get_free_pages(gfp_flags, order);
4289 /* __get_free_pages failed, fall back to vmalloc */
4290 buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4294 /* vmalloc failed, lets dig into swap here */
4295 gfp_flags &= ~__GFP_NORETRY;
4296 buffer = (char *) __get_free_pages(gfp_flags, order);
4300 /* complete and utter failure */
4304 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4306 unsigned int block_nr = req->tp_block_nr;
4310 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4311 if (unlikely(!pg_vec))
4314 for (i = 0; i < block_nr; i++) {
4315 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4316 if (unlikely(!pg_vec[i].buffer))
4317 goto out_free_pgvec;
4324 free_pg_vec(pg_vec, order, block_nr);
4329 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4330 int closing, int tx_ring)
4332 struct pgv *pg_vec = NULL;
4333 struct packet_sock *po = pkt_sk(sk);
4334 unsigned long *rx_owner_map = NULL;
4335 int was_running, order = 0;
4336 struct packet_ring_buffer *rb;
4337 struct sk_buff_head *rb_queue;
4340 /* Added to avoid minimal code churn */
4341 struct tpacket_req *req = &req_u->req;
4343 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4344 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4348 if (atomic_read(&po->mapped))
4350 if (packet_read_pending(rb))
4354 if (req->tp_block_nr) {
4355 unsigned int min_frame_size;
4357 /* Sanity tests and some calculations */
4359 if (unlikely(rb->pg_vec))
4362 switch (po->tp_version) {
4364 po->tp_hdrlen = TPACKET_HDRLEN;
4367 po->tp_hdrlen = TPACKET2_HDRLEN;
4370 po->tp_hdrlen = TPACKET3_HDRLEN;
4375 if (unlikely((int)req->tp_block_size <= 0))
4377 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4379 min_frame_size = po->tp_hdrlen + po->tp_reserve;
4380 if (po->tp_version >= TPACKET_V3 &&
4381 req->tp_block_size <
4382 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4384 if (unlikely(req->tp_frame_size < min_frame_size))
4386 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4389 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4390 if (unlikely(rb->frames_per_block == 0))
4392 if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4394 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4399 order = get_order(req->tp_block_size);
4400 pg_vec = alloc_pg_vec(req, order);
4401 if (unlikely(!pg_vec))
4403 switch (po->tp_version) {
4405 /* Block transmit is not supported yet */
4407 init_prb_bdqc(po, rb, pg_vec, req_u);
4409 struct tpacket_req3 *req3 = &req_u->req3;
4411 if (req3->tp_retire_blk_tov ||
4412 req3->tp_sizeof_priv ||
4413 req3->tp_feature_req_word) {
4415 goto out_free_pg_vec;
4421 rx_owner_map = bitmap_alloc(req->tp_frame_nr,
4422 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
4424 goto out_free_pg_vec;
4432 if (unlikely(req->tp_frame_nr))
4437 /* Detach socket from network */
4438 spin_lock(&po->bind_lock);
4439 was_running = po->running;
4442 WRITE_ONCE(po->num, 0);
4443 __unregister_prot_hook(sk, false);
4445 spin_unlock(&po->bind_lock);
4450 mutex_lock(&po->pg_vec_lock);
4451 if (closing || atomic_read(&po->mapped) == 0) {
4453 spin_lock_bh(&rb_queue->lock);
4454 swap(rb->pg_vec, pg_vec);
4455 if (po->tp_version <= TPACKET_V2)
4456 swap(rb->rx_owner_map, rx_owner_map);
4457 rb->frame_max = (req->tp_frame_nr - 1);
4459 rb->frame_size = req->tp_frame_size;
4460 spin_unlock_bh(&rb_queue->lock);
4462 swap(rb->pg_vec_order, order);
4463 swap(rb->pg_vec_len, req->tp_block_nr);
4465 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4466 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4467 tpacket_rcv : packet_rcv;
4468 skb_queue_purge(rb_queue);
4469 if (atomic_read(&po->mapped))
4470 pr_err("packet_mmap: vma is busy: %d\n",
4471 atomic_read(&po->mapped));
4473 mutex_unlock(&po->pg_vec_lock);
4475 spin_lock(&po->bind_lock);
4477 WRITE_ONCE(po->num, num);
4478 register_prot_hook(sk);
4480 spin_unlock(&po->bind_lock);
4481 if (pg_vec && (po->tp_version > TPACKET_V2)) {
4482 /* Because we don't support block-based V3 on tx-ring */
4484 prb_shutdown_retire_blk_timer(po, rb_queue);
4489 bitmap_free(rx_owner_map);
4490 free_pg_vec(pg_vec, order, req->tp_block_nr);
4496 static int packet_mmap(struct file *file, struct socket *sock,
4497 struct vm_area_struct *vma)
4499 struct sock *sk = sock->sk;
4500 struct packet_sock *po = pkt_sk(sk);
4501 unsigned long size, expected_size;
4502 struct packet_ring_buffer *rb;
4503 unsigned long start;
4510 mutex_lock(&po->pg_vec_lock);
4513 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4515 expected_size += rb->pg_vec_len
4521 if (expected_size == 0)
4524 size = vma->vm_end - vma->vm_start;
4525 if (size != expected_size)
4528 start = vma->vm_start;
4529 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4530 if (rb->pg_vec == NULL)
4533 for (i = 0; i < rb->pg_vec_len; i++) {
4535 void *kaddr = rb->pg_vec[i].buffer;
4538 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4539 page = pgv_to_page(kaddr);
4540 err = vm_insert_page(vma, start, page);
4549 atomic_inc(&po->mapped);
4550 vma->vm_ops = &packet_mmap_ops;
4554 mutex_unlock(&po->pg_vec_lock);
4558 static const struct proto_ops packet_ops_spkt = {
4559 .family = PF_PACKET,
4560 .owner = THIS_MODULE,
4561 .release = packet_release,
4562 .bind = packet_bind_spkt,
4563 .connect = sock_no_connect,
4564 .socketpair = sock_no_socketpair,
4565 .accept = sock_no_accept,
4566 .getname = packet_getname_spkt,
4567 .poll = datagram_poll,
4568 .ioctl = packet_ioctl,
4569 .gettstamp = sock_gettstamp,
4570 .listen = sock_no_listen,
4571 .shutdown = sock_no_shutdown,
4572 .sendmsg = packet_sendmsg_spkt,
4573 .recvmsg = packet_recvmsg,
4574 .mmap = sock_no_mmap,
4575 .sendpage = sock_no_sendpage,
4578 static const struct proto_ops packet_ops = {
4579 .family = PF_PACKET,
4580 .owner = THIS_MODULE,
4581 .release = packet_release,
4582 .bind = packet_bind,
4583 .connect = sock_no_connect,
4584 .socketpair = sock_no_socketpair,
4585 .accept = sock_no_accept,
4586 .getname = packet_getname,
4587 .poll = packet_poll,
4588 .ioctl = packet_ioctl,
4589 .gettstamp = sock_gettstamp,
4590 .listen = sock_no_listen,
4591 .shutdown = sock_no_shutdown,
4592 .setsockopt = packet_setsockopt,
4593 .getsockopt = packet_getsockopt,
4594 .sendmsg = packet_sendmsg,
4595 .recvmsg = packet_recvmsg,
4596 .mmap = packet_mmap,
4597 .sendpage = sock_no_sendpage,
4600 static const struct net_proto_family packet_family_ops = {
4601 .family = PF_PACKET,
4602 .create = packet_create,
4603 .owner = THIS_MODULE,
4606 static struct notifier_block packet_netdev_notifier = {
4607 .notifier_call = packet_notifier,
4610 #ifdef CONFIG_PROC_FS
4612 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4615 struct net *net = seq_file_net(seq);
4618 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4621 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4623 struct net *net = seq_file_net(seq);
4624 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4627 static void packet_seq_stop(struct seq_file *seq, void *v)
4633 static int packet_seq_show(struct seq_file *seq, void *v)
4635 if (v == SEQ_START_TOKEN)
4637 "%*sRefCnt Type Proto Iface R Rmem User Inode\n",
4638 IS_ENABLED(CONFIG_64BIT) ? -17 : -9, "sk");
4640 struct sock *s = sk_entry(v);
4641 const struct packet_sock *po = pkt_sk(s);
4644 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
4646 refcount_read(&s->sk_refcnt),
4648 ntohs(READ_ONCE(po->num)),
4649 READ_ONCE(po->ifindex),
4651 atomic_read(&s->sk_rmem_alloc),
4652 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4659 static const struct seq_operations packet_seq_ops = {
4660 .start = packet_seq_start,
4661 .next = packet_seq_next,
4662 .stop = packet_seq_stop,
4663 .show = packet_seq_show,
4667 static int __net_init packet_net_init(struct net *net)
4669 mutex_init(&net->packet.sklist_lock);
4670 INIT_HLIST_HEAD(&net->packet.sklist);
4672 #ifdef CONFIG_PROC_FS
4673 if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4674 sizeof(struct seq_net_private)))
4676 #endif /* CONFIG_PROC_FS */
4681 static void __net_exit packet_net_exit(struct net *net)
4683 remove_proc_entry("packet", net->proc_net);
4684 WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4687 static struct pernet_operations packet_net_ops = {
4688 .init = packet_net_init,
4689 .exit = packet_net_exit,
4693 static void __exit packet_exit(void)
4695 unregister_netdevice_notifier(&packet_netdev_notifier);
4696 unregister_pernet_subsys(&packet_net_ops);
4697 sock_unregister(PF_PACKET);
4698 proto_unregister(&packet_proto);
4701 static int __init packet_init(void)
4705 rc = proto_register(&packet_proto, 0);
4708 rc = sock_register(&packet_family_ops);
4711 rc = register_pernet_subsys(&packet_net_ops);
4714 rc = register_netdevice_notifier(&packet_netdev_notifier);
4721 unregister_pernet_subsys(&packet_net_ops);
4723 sock_unregister(PF_PACKET);
4725 proto_unregister(&packet_proto);
4730 module_init(packet_init);
4731 module_exit(packet_exit);
4732 MODULE_LICENSE("GPL");
4733 MODULE_ALIAS_NETPROTO(PF_PACKET);