1 // SPDX-License-Identifier: GPL-2.0
4 * AF_XDP sockets allows a channel between XDP programs and userspace
6 * Copyright(c) 2018 Intel Corporation.
8 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <net/xdp_sock_drv.h>
26 #include <net/busy_poll.h>
29 #include "xsk_queue.h"
33 #define TX_BATCH_SIZE 32
35 static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
37 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
39 if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
42 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
43 pool->cached_need_wakeup |= XDP_WAKEUP_RX;
45 EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
47 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
51 if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
55 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
56 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
60 pool->cached_need_wakeup |= XDP_WAKEUP_TX;
62 EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
64 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
66 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
69 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
70 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
72 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
74 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
78 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
82 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
83 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
87 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
89 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
91 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
93 return pool->uses_need_wakeup;
95 EXPORT_SYMBOL(xsk_uses_need_wakeup);
97 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
100 if (queue_id < dev->real_num_rx_queues)
101 return dev->_rx[queue_id].pool;
102 if (queue_id < dev->real_num_tx_queues)
103 return dev->_tx[queue_id].pool;
107 EXPORT_SYMBOL(xsk_get_pool_from_qid);
109 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
111 if (queue_id < dev->num_rx_queues)
112 dev->_rx[queue_id].pool = NULL;
113 if (queue_id < dev->num_tx_queues)
114 dev->_tx[queue_id].pool = NULL;
117 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
118 * not know if the device has more tx queues than rx, or the opposite.
119 * This might also change during run time.
121 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
124 if (queue_id >= max_t(unsigned int,
125 dev->real_num_rx_queues,
126 dev->real_num_tx_queues))
129 if (queue_id < dev->real_num_rx_queues)
130 dev->_rx[queue_id].pool = pool;
131 if (queue_id < dev->real_num_tx_queues)
132 dev->_tx[queue_id].pool = pool;
137 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
139 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
143 addr = xp_get_handle(xskb);
144 err = xskq_prod_reserve_desc(xs->rx, addr, len);
154 static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len)
156 void *from_buf, *to_buf;
159 if (unlikely(xdp_data_meta_unsupported(from))) {
160 from_buf = from->data;
164 from_buf = from->data_meta;
165 metalen = from->data - from->data_meta;
166 to_buf = to->data - metalen;
169 memcpy(to_buf, from_buf, len + metalen);
172 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
174 struct xdp_buff *xsk_xdp;
178 len = xdp->data_end - xdp->data;
179 if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
184 xsk_xdp = xsk_buff_alloc(xs->pool);
190 xsk_copy_xdp(xsk_xdp, xdp, len);
191 err = __xsk_rcv_zc(xs, xsk_xdp, len);
193 xsk_buff_free(xsk_xdp);
199 static bool xsk_tx_writeable(struct xdp_sock *xs)
201 if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
207 static bool xsk_is_bound(struct xdp_sock *xs)
209 if (READ_ONCE(xs->state) == XSK_BOUND) {
210 /* Matches smp_wmb() in bind(). */
217 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp)
219 if (!xsk_is_bound(xs))
222 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
225 sk_mark_napi_id_once_xdp(&xs->sk, xdp);
229 static void xsk_flush(struct xdp_sock *xs)
231 xskq_prod_submit(xs->rx);
232 __xskq_cons_release(xs->pool->fq);
233 sock_def_readable(&xs->sk);
236 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
240 spin_lock_bh(&xs->rx_lock);
241 err = xsk_rcv_check(xs, xdp);
243 err = __xsk_rcv(xs, xdp);
246 spin_unlock_bh(&xs->rx_lock);
250 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
255 err = xsk_rcv_check(xs, xdp);
259 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
260 len = xdp->data_end - xdp->data;
261 return __xsk_rcv_zc(xs, xdp, len);
264 err = __xsk_rcv(xs, xdp);
266 xdp_return_buff(xdp);
270 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
272 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
275 err = xsk_rcv(xs, xdp);
279 if (!xs->flush_node.prev)
280 list_add(&xs->flush_node, flush_list);
285 void __xsk_map_flush(void)
287 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
288 struct xdp_sock *xs, *tmp;
290 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
292 __list_del_clearprev(&xs->flush_node);
296 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
298 xskq_prod_submit_n(pool->cq, nb_entries);
300 EXPORT_SYMBOL(xsk_tx_completed);
302 void xsk_tx_release(struct xsk_buff_pool *pool)
307 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
308 __xskq_cons_release(xs->tx);
309 if (xsk_tx_writeable(xs))
310 xs->sk.sk_write_space(&xs->sk);
314 EXPORT_SYMBOL(xsk_tx_release);
316 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
321 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
322 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
323 xs->tx->queue_empty_descs++;
327 /* This is the backpressure mechanism for the Tx path.
328 * Reserve space in the completion queue and only proceed
329 * if there is space in it. This avoids having to implement
330 * any buffering in the Tx path.
332 if (xskq_prod_reserve_addr(pool->cq, desc->addr))
335 xskq_cons_release(xs->tx);
344 EXPORT_SYMBOL(xsk_tx_peek_desc);
346 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries)
348 struct xdp_desc *descs = pool->tx_descs;
351 while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
354 xsk_tx_release(pool);
358 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts)
363 if (!list_is_singular(&pool->xsk_tx_list)) {
364 /* Fallback to the non-batched version */
366 return xsk_tx_peek_release_fallback(pool, nb_pkts);
369 xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
375 nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts);
377 /* This is the backpressure mechanism for the Tx path. Try to
378 * reserve space in the completion queue for all packets, but
379 * if there are fewer slots available, just process that many
380 * packets. This avoids having to implement any buffering in
383 nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts);
387 nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts);
389 xs->tx->queue_empty_descs++;
393 __xskq_cons_release(xs->tx);
394 xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
395 xs->sk.sk_write_space(&xs->sk);
401 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch);
403 static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
405 struct net_device *dev = xs->dev;
407 return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
410 static void xsk_destruct_skb(struct sk_buff *skb)
412 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
413 struct xdp_sock *xs = xdp_sk(skb->sk);
416 spin_lock_irqsave(&xs->pool->cq_lock, flags);
417 xskq_prod_submit_addr(xs->pool->cq, addr);
418 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
423 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
424 struct xdp_desc *desc)
426 struct xsk_buff_pool *pool = xs->pool;
427 u32 hr, len, ts, offset, copy, copied;
434 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
436 skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
440 skb_reserve(skb, hr);
444 ts = pool->unaligned ? len : pool->chunk_size;
446 buffer = xsk_buff_raw_get_data(pool, addr);
447 offset = offset_in_page(buffer);
448 addr = buffer - pool->addrs;
450 for (copied = 0, i = 0; copied < len; i++) {
451 page = pool->umem->pgs[addr >> PAGE_SHIFT];
454 copy = min_t(u32, PAGE_SIZE - offset, len - copied);
455 skb_fill_page_desc(skb, i, page, offset, copy);
463 skb->data_len += len;
466 refcount_add(ts, &xs->sk.sk_wmem_alloc);
471 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
472 struct xdp_desc *desc)
474 struct net_device *dev = xs->dev;
477 if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
478 skb = xsk_build_skb_zerocopy(xs, desc);
486 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
487 tr = dev->needed_tailroom;
490 skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
494 skb_reserve(skb, hr);
497 buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
498 err = skb_store_bits(skb, 0, buffer, len);
506 skb->priority = xs->sk.sk_priority;
507 skb->mark = xs->sk.sk_mark;
508 skb_shinfo(skb)->destructor_arg = (void *)(long)desc->addr;
509 skb->destructor = xsk_destruct_skb;
514 static int __xsk_generic_xmit(struct sock *sk)
516 struct xdp_sock *xs = xdp_sk(sk);
517 u32 max_batch = TX_BATCH_SIZE;
518 bool sent_frame = false;
519 struct xdp_desc desc;
524 mutex_lock(&xs->mutex);
526 /* Since we dropped the RCU read lock, the socket state might have changed. */
527 if (unlikely(!xsk_is_bound(xs))) {
532 if (xs->queue_id >= xs->dev->real_num_tx_queues)
535 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
536 if (max_batch-- == 0) {
541 /* This is the backpressure mechanism for the Tx path.
542 * Reserve space in the completion queue and only proceed
543 * if there is space in it. This avoids having to implement
544 * any buffering in the Tx path.
546 spin_lock_irqsave(&xs->pool->cq_lock, flags);
547 if (xskq_prod_reserve(xs->pool->cq)) {
548 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
551 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
553 skb = xsk_build_skb(xs, &desc);
556 spin_lock_irqsave(&xs->pool->cq_lock, flags);
557 xskq_prod_cancel(xs->pool->cq);
558 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
562 err = __dev_direct_xmit(skb, xs->queue_id);
563 if (err == NETDEV_TX_BUSY) {
564 /* Tell user-space to retry the send */
565 skb->destructor = sock_wfree;
566 spin_lock_irqsave(&xs->pool->cq_lock, flags);
567 xskq_prod_cancel(xs->pool->cq);
568 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
569 /* Free skb without triggering the perf drop trace */
575 xskq_cons_release(xs->tx);
576 /* Ignore NET_XMIT_CN as packet might have been sent */
577 if (err == NET_XMIT_DROP) {
578 /* SKB completed but not sent */
586 xs->tx->queue_empty_descs++;
590 if (xsk_tx_writeable(xs))
591 sk->sk_write_space(sk);
593 mutex_unlock(&xs->mutex);
597 static int xsk_generic_xmit(struct sock *sk)
601 /* Drop the RCU lock since the SKB path might sleep. */
603 ret = __xsk_generic_xmit(sk);
604 /* Reaquire RCU lock before going into common code. */
610 static bool xsk_no_wakeup(struct sock *sk)
612 #ifdef CONFIG_NET_RX_BUSY_POLL
613 /* Prefer busy-polling, skip the wakeup. */
614 return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) &&
615 READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID;
621 static int xsk_check_common(struct xdp_sock *xs)
623 if (unlikely(!xsk_is_bound(xs)))
625 if (unlikely(!(xs->dev->flags & IFF_UP)))
631 static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
633 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
634 struct sock *sk = sock->sk;
635 struct xdp_sock *xs = xdp_sk(sk);
636 struct xsk_buff_pool *pool;
639 err = xsk_check_common(xs);
642 if (unlikely(need_wait))
644 if (unlikely(!xs->tx))
647 if (sk_can_busy_loop(sk)) {
649 __sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool));
650 sk_busy_loop(sk, 1); /* only support non-blocking sockets */
653 if (xs->zc && xsk_no_wakeup(sk))
657 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) {
659 return xsk_wakeup(xs, XDP_WAKEUP_TX);
660 return xsk_generic_xmit(sk);
665 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
670 ret = __xsk_sendmsg(sock, m, total_len);
676 static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
678 bool need_wait = !(flags & MSG_DONTWAIT);
679 struct sock *sk = sock->sk;
680 struct xdp_sock *xs = xdp_sk(sk);
683 err = xsk_check_common(xs);
686 if (unlikely(!xs->rx))
688 if (unlikely(need_wait))
691 if (sk_can_busy_loop(sk))
692 sk_busy_loop(sk, 1); /* only support non-blocking sockets */
694 if (xsk_no_wakeup(sk))
697 if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
698 return xsk_wakeup(xs, XDP_WAKEUP_RX);
702 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
707 ret = __xsk_recvmsg(sock, m, len, flags);
713 static __poll_t xsk_poll(struct file *file, struct socket *sock,
714 struct poll_table_struct *wait)
717 struct sock *sk = sock->sk;
718 struct xdp_sock *xs = xdp_sk(sk);
719 struct xsk_buff_pool *pool;
721 sock_poll_wait(file, sock, wait);
724 if (xsk_check_common(xs))
729 if (pool->cached_need_wakeup) {
731 xsk_wakeup(xs, pool->cached_need_wakeup);
733 /* Poll needs to drive Tx also in copy mode */
734 xsk_generic_xmit(sk);
738 if (xs->rx && !xskq_prod_is_empty(xs->rx))
739 mask |= EPOLLIN | EPOLLRDNORM;
740 if (xs->tx && xsk_tx_writeable(xs))
741 mask |= EPOLLOUT | EPOLLWRNORM;
747 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
752 if (entries == 0 || *queue || !is_power_of_2(entries))
755 q = xskq_create(entries, umem_queue);
759 /* Make sure queue is ready before it can be seen by others */
761 WRITE_ONCE(*queue, q);
765 static void xsk_unbind_dev(struct xdp_sock *xs)
767 struct net_device *dev = xs->dev;
769 if (xs->state != XSK_BOUND)
771 WRITE_ONCE(xs->state, XSK_UNBOUND);
773 /* Wait for driver to stop using the xdp socket. */
774 xp_del_xsk(xs->pool, xs);
779 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
780 struct xdp_sock __rcu ***map_entry)
782 struct xsk_map *map = NULL;
783 struct xsk_map_node *node;
787 spin_lock_bh(&xs->map_list_lock);
788 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
791 bpf_map_inc(&node->map->map);
793 *map_entry = node->map_entry;
795 spin_unlock_bh(&xs->map_list_lock);
799 static void xsk_delete_from_maps(struct xdp_sock *xs)
801 /* This function removes the current XDP socket from all the
802 * maps it resides in. We need to take extra care here, due to
803 * the two locks involved. Each map has a lock synchronizing
804 * updates to the entries, and each socket has a lock that
805 * synchronizes access to the list of maps (map_list). For
806 * deadlock avoidance the locks need to be taken in the order
807 * "map lock"->"socket map list lock". We start off by
808 * accessing the socket map list, and take a reference to the
809 * map to guarantee existence between the
810 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
811 * calls. Then we ask the map to remove the socket, which
812 * tries to remove the socket from the map. Note that there
813 * might be updates to the map between
814 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
816 struct xdp_sock __rcu **map_entry = NULL;
819 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
820 xsk_map_try_sock_delete(map, xs, map_entry);
821 bpf_map_put(&map->map);
825 static int xsk_release(struct socket *sock)
827 struct sock *sk = sock->sk;
828 struct xdp_sock *xs = xdp_sk(sk);
836 mutex_lock(&net->xdp.lock);
837 sk_del_node_init_rcu(sk);
838 mutex_unlock(&net->xdp.lock);
840 sock_prot_inuse_add(net, sk->sk_prot, -1);
842 xsk_delete_from_maps(xs);
843 mutex_lock(&xs->mutex);
845 mutex_unlock(&xs->mutex);
847 xskq_destroy(xs->rx);
848 xskq_destroy(xs->tx);
849 xskq_destroy(xs->fq_tmp);
850 xskq_destroy(xs->cq_tmp);
855 sk_refcnt_debug_release(sk);
861 static struct socket *xsk_lookup_xsk_from_fd(int fd)
866 sock = sockfd_lookup(fd, &err);
868 return ERR_PTR(-ENOTSOCK);
870 if (sock->sk->sk_family != PF_XDP) {
872 return ERR_PTR(-ENOPROTOOPT);
878 static bool xsk_validate_queues(struct xdp_sock *xs)
880 return xs->fq_tmp && xs->cq_tmp;
883 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
885 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
886 struct sock *sk = sock->sk;
887 struct xdp_sock *xs = xdp_sk(sk);
888 struct net_device *dev;
892 if (addr_len < sizeof(struct sockaddr_xdp))
894 if (sxdp->sxdp_family != AF_XDP)
897 flags = sxdp->sxdp_flags;
898 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
899 XDP_USE_NEED_WAKEUP))
903 mutex_lock(&xs->mutex);
904 if (xs->state != XSK_READY) {
909 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
915 if (!xs->rx && !xs->tx) {
920 qid = sxdp->sxdp_queue_id;
922 if (flags & XDP_SHARED_UMEM) {
923 struct xdp_sock *umem_xs;
926 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
927 (flags & XDP_USE_NEED_WAKEUP)) {
928 /* Cannot specify flags for shared sockets. */
934 /* We have already our own. */
939 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
945 umem_xs = xdp_sk(sock->sk);
946 if (!xsk_is_bound(umem_xs)) {
952 if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
953 /* Share the umem with another socket on another qid
956 xs->pool = xp_create_and_assign_umem(xs,
964 err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
967 xp_destroy(xs->pool);
973 /* Share the buffer pool with the other socket. */
974 if (xs->fq_tmp || xs->cq_tmp) {
975 /* Do not allow setting your own fq or cq. */
981 xp_get_pool(umem_xs->pool);
982 xs->pool = umem_xs->pool;
984 /* If underlying shared umem was created without Tx
985 * ring, allocate Tx descs array that Tx batching API
988 if (xs->tx && !xs->pool->tx_descs) {
989 err = xp_alloc_tx_descs(xs->pool, xs);
991 xp_put_pool(xs->pool);
998 xdp_get_umem(umem_xs->umem);
999 WRITE_ONCE(xs->umem, umem_xs->umem);
1001 } else if (!xs->umem || !xsk_validate_queues(xs)) {
1005 /* This xsk has its own umem. */
1006 xs->pool = xp_create_and_assign_umem(xs, xs->umem);
1012 err = xp_assign_dev(xs->pool, dev, qid, flags);
1014 xp_destroy(xs->pool);
1020 /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
1025 xs->zc = xs->umem->zc;
1027 xp_add_xsk(xs->pool, xs);
1033 /* Matches smp_rmb() in bind() for shared umem
1034 * sockets, and xsk_is_bound().
1037 WRITE_ONCE(xs->state, XSK_BOUND);
1040 mutex_unlock(&xs->mutex);
1045 struct xdp_umem_reg_v1 {
1046 __u64 addr; /* Start of packet data area */
1047 __u64 len; /* Length of packet data area */
1052 static int xsk_setsockopt(struct socket *sock, int level, int optname,
1053 sockptr_t optval, unsigned int optlen)
1055 struct sock *sk = sock->sk;
1056 struct xdp_sock *xs = xdp_sk(sk);
1059 if (level != SOL_XDP)
1060 return -ENOPROTOOPT;
1066 struct xsk_queue **q;
1069 if (optlen < sizeof(entries))
1071 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1074 mutex_lock(&xs->mutex);
1075 if (xs->state != XSK_READY) {
1076 mutex_unlock(&xs->mutex);
1079 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
1080 err = xsk_init_queue(entries, q, false);
1081 if (!err && optname == XDP_TX_RING)
1082 /* Tx needs to be explicitly woken up the first time */
1083 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
1084 mutex_unlock(&xs->mutex);
1089 size_t mr_size = sizeof(struct xdp_umem_reg);
1090 struct xdp_umem_reg mr = {};
1091 struct xdp_umem *umem;
1093 if (optlen < sizeof(struct xdp_umem_reg_v1))
1095 else if (optlen < sizeof(mr))
1096 mr_size = sizeof(struct xdp_umem_reg_v1);
1098 if (copy_from_sockptr(&mr, optval, mr_size))
1101 mutex_lock(&xs->mutex);
1102 if (xs->state != XSK_READY || xs->umem) {
1103 mutex_unlock(&xs->mutex);
1107 umem = xdp_umem_create(&mr);
1109 mutex_unlock(&xs->mutex);
1110 return PTR_ERR(umem);
1113 /* Make sure umem is ready before it can be seen by others */
1115 WRITE_ONCE(xs->umem, umem);
1116 mutex_unlock(&xs->mutex);
1119 case XDP_UMEM_FILL_RING:
1120 case XDP_UMEM_COMPLETION_RING:
1122 struct xsk_queue **q;
1125 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1128 mutex_lock(&xs->mutex);
1129 if (xs->state != XSK_READY) {
1130 mutex_unlock(&xs->mutex);
1134 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
1136 err = xsk_init_queue(entries, q, true);
1137 mutex_unlock(&xs->mutex);
1144 return -ENOPROTOOPT;
1147 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
1149 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
1150 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
1151 ring->desc = offsetof(struct xdp_rxtx_ring, desc);
1154 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
1156 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
1157 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
1158 ring->desc = offsetof(struct xdp_umem_ring, desc);
1161 struct xdp_statistics_v1 {
1163 __u64 rx_invalid_descs;
1164 __u64 tx_invalid_descs;
1167 static int xsk_getsockopt(struct socket *sock, int level, int optname,
1168 char __user *optval, int __user *optlen)
1170 struct sock *sk = sock->sk;
1171 struct xdp_sock *xs = xdp_sk(sk);
1174 if (level != SOL_XDP)
1175 return -ENOPROTOOPT;
1177 if (get_user(len, optlen))
1183 case XDP_STATISTICS:
1185 struct xdp_statistics stats = {};
1186 bool extra_stats = true;
1189 if (len < sizeof(struct xdp_statistics_v1)) {
1191 } else if (len < sizeof(stats)) {
1192 extra_stats = false;
1193 stats_size = sizeof(struct xdp_statistics_v1);
1195 stats_size = sizeof(stats);
1198 mutex_lock(&xs->mutex);
1199 stats.rx_dropped = xs->rx_dropped;
1201 stats.rx_ring_full = xs->rx_queue_full;
1202 stats.rx_fill_ring_empty_descs =
1203 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
1204 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
1206 stats.rx_dropped += xs->rx_queue_full;
1208 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
1209 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
1210 mutex_unlock(&xs->mutex);
1212 if (copy_to_user(optval, &stats, stats_size))
1214 if (put_user(stats_size, optlen))
1219 case XDP_MMAP_OFFSETS:
1221 struct xdp_mmap_offsets off;
1222 struct xdp_mmap_offsets_v1 off_v1;
1223 bool flags_supported = true;
1226 if (len < sizeof(off_v1))
1228 else if (len < sizeof(off))
1229 flags_supported = false;
1231 if (flags_supported) {
1232 /* xdp_ring_offset is identical to xdp_ring_offset_v1
1233 * except for the flags field added to the end.
1235 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1237 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1239 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1241 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1243 off.rx.flags = offsetof(struct xdp_rxtx_ring,
1245 off.tx.flags = offsetof(struct xdp_rxtx_ring,
1247 off.fr.flags = offsetof(struct xdp_umem_ring,
1249 off.cr.flags = offsetof(struct xdp_umem_ring,
1255 xsk_enter_rxtx_offsets(&off_v1.rx);
1256 xsk_enter_rxtx_offsets(&off_v1.tx);
1257 xsk_enter_umem_offsets(&off_v1.fr);
1258 xsk_enter_umem_offsets(&off_v1.cr);
1260 len = sizeof(off_v1);
1264 if (copy_to_user(optval, to_copy, len))
1266 if (put_user(len, optlen))
1273 struct xdp_options opts = {};
1275 if (len < sizeof(opts))
1278 mutex_lock(&xs->mutex);
1280 opts.flags |= XDP_OPTIONS_ZEROCOPY;
1281 mutex_unlock(&xs->mutex);
1284 if (copy_to_user(optval, &opts, len))
1286 if (put_user(len, optlen))
1298 static int xsk_mmap(struct file *file, struct socket *sock,
1299 struct vm_area_struct *vma)
1301 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1302 unsigned long size = vma->vm_end - vma->vm_start;
1303 struct xdp_sock *xs = xdp_sk(sock->sk);
1304 struct xsk_queue *q = NULL;
1308 if (READ_ONCE(xs->state) != XSK_READY)
1311 if (offset == XDP_PGOFF_RX_RING) {
1312 q = READ_ONCE(xs->rx);
1313 } else if (offset == XDP_PGOFF_TX_RING) {
1314 q = READ_ONCE(xs->tx);
1316 /* Matches the smp_wmb() in XDP_UMEM_REG */
1318 if (offset == XDP_UMEM_PGOFF_FILL_RING)
1319 q = READ_ONCE(xs->fq_tmp);
1320 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
1321 q = READ_ONCE(xs->cq_tmp);
1327 /* Matches the smp_wmb() in xsk_init_queue */
1329 qpg = virt_to_head_page(q->ring);
1330 if (size > page_size(qpg))
1333 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
1334 return remap_pfn_range(vma, vma->vm_start, pfn,
1335 size, vma->vm_page_prot);
1338 static int xsk_notifier(struct notifier_block *this,
1339 unsigned long msg, void *ptr)
1341 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1342 struct net *net = dev_net(dev);
1346 case NETDEV_UNREGISTER:
1347 mutex_lock(&net->xdp.lock);
1348 sk_for_each(sk, &net->xdp.list) {
1349 struct xdp_sock *xs = xdp_sk(sk);
1351 mutex_lock(&xs->mutex);
1352 if (xs->dev == dev) {
1353 sk->sk_err = ENETDOWN;
1354 if (!sock_flag(sk, SOCK_DEAD))
1355 sk_error_report(sk);
1359 /* Clear device references. */
1360 xp_clear_dev(xs->pool);
1362 mutex_unlock(&xs->mutex);
1364 mutex_unlock(&net->xdp.lock);
1370 static struct proto xsk_proto = {
1372 .owner = THIS_MODULE,
1373 .obj_size = sizeof(struct xdp_sock),
1376 static const struct proto_ops xsk_proto_ops = {
1378 .owner = THIS_MODULE,
1379 .release = xsk_release,
1381 .connect = sock_no_connect,
1382 .socketpair = sock_no_socketpair,
1383 .accept = sock_no_accept,
1384 .getname = sock_no_getname,
1386 .ioctl = sock_no_ioctl,
1387 .listen = sock_no_listen,
1388 .shutdown = sock_no_shutdown,
1389 .setsockopt = xsk_setsockopt,
1390 .getsockopt = xsk_getsockopt,
1391 .sendmsg = xsk_sendmsg,
1392 .recvmsg = xsk_recvmsg,
1394 .sendpage = sock_no_sendpage,
1397 static void xsk_destruct(struct sock *sk)
1399 struct xdp_sock *xs = xdp_sk(sk);
1401 if (!sock_flag(sk, SOCK_DEAD))
1404 if (!xp_put_pool(xs->pool))
1405 xdp_put_umem(xs->umem, !xs->pool);
1407 sk_refcnt_debug_dec(sk);
1410 static int xsk_create(struct net *net, struct socket *sock, int protocol,
1413 struct xdp_sock *xs;
1416 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1418 if (sock->type != SOCK_RAW)
1419 return -ESOCKTNOSUPPORT;
1422 return -EPROTONOSUPPORT;
1424 sock->state = SS_UNCONNECTED;
1426 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1430 sock->ops = &xsk_proto_ops;
1432 sock_init_data(sock, sk);
1434 sk->sk_family = PF_XDP;
1436 sk->sk_destruct = xsk_destruct;
1437 sk_refcnt_debug_inc(sk);
1439 sock_set_flag(sk, SOCK_RCU_FREE);
1442 xs->state = XSK_READY;
1443 mutex_init(&xs->mutex);
1444 spin_lock_init(&xs->rx_lock);
1446 INIT_LIST_HEAD(&xs->map_list);
1447 spin_lock_init(&xs->map_list_lock);
1449 mutex_lock(&net->xdp.lock);
1450 sk_add_node_rcu(sk, &net->xdp.list);
1451 mutex_unlock(&net->xdp.lock);
1453 sock_prot_inuse_add(net, &xsk_proto, 1);
1458 static const struct net_proto_family xsk_family_ops = {
1460 .create = xsk_create,
1461 .owner = THIS_MODULE,
1464 static struct notifier_block xsk_netdev_notifier = {
1465 .notifier_call = xsk_notifier,
1468 static int __net_init xsk_net_init(struct net *net)
1470 mutex_init(&net->xdp.lock);
1471 INIT_HLIST_HEAD(&net->xdp.list);
1475 static void __net_exit xsk_net_exit(struct net *net)
1477 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1480 static struct pernet_operations xsk_net_ops = {
1481 .init = xsk_net_init,
1482 .exit = xsk_net_exit,
1485 static int __init xsk_init(void)
1489 err = proto_register(&xsk_proto, 0 /* no slab */);
1493 err = sock_register(&xsk_family_ops);
1497 err = register_pernet_subsys(&xsk_net_ops);
1501 err = register_netdevice_notifier(&xsk_netdev_notifier);
1505 for_each_possible_cpu(cpu)
1506 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
1510 unregister_pernet_subsys(&xsk_net_ops);
1512 sock_unregister(PF_XDP);
1514 proto_unregister(&xsk_proto);
1519 fs_initcall(xsk_init);