1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
4 * AF_XDP user-space access library.
6 * Copyright(c) 2018 - 2019 Intel Corporation.
8 * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
15 #include <arpa/inet.h>
16 #include <asm/barrier.h>
17 #include <linux/compiler.h>
18 #include <linux/ethtool.h>
19 #include <linux/filter.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_packet.h>
22 #include <linux/if_xdp.h>
23 #include <linux/kernel.h>
24 #include <linux/list.h>
25 #include <linux/sockios.h>
27 #include <sys/ioctl.h>
29 #include <sys/socket.h>
30 #include <sys/types.h>
34 #include "libbpf_internal.h"
51 XSK_PROG_REDIRECT_FLAGS,
55 struct xsk_ring_prod *fill_save;
56 struct xsk_ring_cons *comp_save;
58 struct xsk_umem_config config;
61 struct list_head ctx_list;
65 struct xsk_ring_prod *fill;
66 struct xsk_ring_cons *comp;
68 struct xsk_umem *umem;
71 struct list_head list;
74 char ifname[IFNAMSIZ];
78 struct xsk_ring_cons *rx;
79 struct xsk_ring_prod *tx;
82 struct xsk_socket_config config;
87 bool xdp_prog_attached;
92 /* Up until and including Linux 5.3 */
93 struct xdp_ring_offset_v1 {
99 /* Up until and including Linux 5.3 */
100 struct xdp_mmap_offsets_v1 {
101 struct xdp_ring_offset_v1 rx;
102 struct xdp_ring_offset_v1 tx;
103 struct xdp_ring_offset_v1 fr;
104 struct xdp_ring_offset_v1 cr;
107 int xsk_umem__fd(const struct xsk_umem *umem)
109 return umem ? umem->fd : -EINVAL;
112 int xsk_socket__fd(const struct xsk_socket *xsk)
114 return xsk ? xsk->fd : -EINVAL;
117 static bool xsk_page_aligned(void *buffer)
119 unsigned long addr = (unsigned long)buffer;
121 return !(addr & (getpagesize() - 1));
124 static void xsk_set_umem_config(struct xsk_umem_config *cfg,
125 const struct xsk_umem_config *usr_cfg)
128 cfg->fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
129 cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
130 cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
131 cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM;
132 cfg->flags = XSK_UMEM__DEFAULT_FLAGS;
136 cfg->fill_size = usr_cfg->fill_size;
137 cfg->comp_size = usr_cfg->comp_size;
138 cfg->frame_size = usr_cfg->frame_size;
139 cfg->frame_headroom = usr_cfg->frame_headroom;
140 cfg->flags = usr_cfg->flags;
143 static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
144 const struct xsk_socket_config *usr_cfg)
147 cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
148 cfg->tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
149 cfg->libbpf_flags = 0;
155 if (usr_cfg->libbpf_flags & ~XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)
158 cfg->rx_size = usr_cfg->rx_size;
159 cfg->tx_size = usr_cfg->tx_size;
160 cfg->libbpf_flags = usr_cfg->libbpf_flags;
161 cfg->xdp_flags = usr_cfg->xdp_flags;
162 cfg->bind_flags = usr_cfg->bind_flags;
167 static void xsk_mmap_offsets_v1(struct xdp_mmap_offsets *off)
169 struct xdp_mmap_offsets_v1 off_v1;
171 /* getsockopt on a kernel <= 5.3 has no flags fields.
172 * Copy over the offsets to the correct places in the >=5.4 format
173 * and put the flags where they would have been on that kernel.
175 memcpy(&off_v1, off, sizeof(off_v1));
177 off->rx.producer = off_v1.rx.producer;
178 off->rx.consumer = off_v1.rx.consumer;
179 off->rx.desc = off_v1.rx.desc;
180 off->rx.flags = off_v1.rx.consumer + sizeof(__u32);
182 off->tx.producer = off_v1.tx.producer;
183 off->tx.consumer = off_v1.tx.consumer;
184 off->tx.desc = off_v1.tx.desc;
185 off->tx.flags = off_v1.tx.consumer + sizeof(__u32);
187 off->fr.producer = off_v1.fr.producer;
188 off->fr.consumer = off_v1.fr.consumer;
189 off->fr.desc = off_v1.fr.desc;
190 off->fr.flags = off_v1.fr.consumer + sizeof(__u32);
192 off->cr.producer = off_v1.cr.producer;
193 off->cr.consumer = off_v1.cr.consumer;
194 off->cr.desc = off_v1.cr.desc;
195 off->cr.flags = off_v1.cr.consumer + sizeof(__u32);
198 static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off)
203 optlen = sizeof(*off);
204 err = getsockopt(fd, SOL_XDP, XDP_MMAP_OFFSETS, off, &optlen);
208 if (optlen == sizeof(*off))
211 if (optlen == sizeof(struct xdp_mmap_offsets_v1)) {
212 xsk_mmap_offsets_v1(off);
219 static int xsk_create_umem_rings(struct xsk_umem *umem, int fd,
220 struct xsk_ring_prod *fill,
221 struct xsk_ring_cons *comp)
223 struct xdp_mmap_offsets off;
227 err = setsockopt(fd, SOL_XDP, XDP_UMEM_FILL_RING,
228 &umem->config.fill_size,
229 sizeof(umem->config.fill_size));
233 err = setsockopt(fd, SOL_XDP, XDP_UMEM_COMPLETION_RING,
234 &umem->config.comp_size,
235 sizeof(umem->config.comp_size));
239 err = xsk_get_mmap_offsets(fd, &off);
243 map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64),
244 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
245 XDP_UMEM_PGOFF_FILL_RING);
246 if (map == MAP_FAILED)
249 fill->mask = umem->config.fill_size - 1;
250 fill->size = umem->config.fill_size;
251 fill->producer = map + off.fr.producer;
252 fill->consumer = map + off.fr.consumer;
253 fill->flags = map + off.fr.flags;
254 fill->ring = map + off.fr.desc;
255 fill->cached_cons = umem->config.fill_size;
257 map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
258 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
259 XDP_UMEM_PGOFF_COMPLETION_RING);
260 if (map == MAP_FAILED) {
265 comp->mask = umem->config.comp_size - 1;
266 comp->size = umem->config.comp_size;
267 comp->producer = map + off.cr.producer;
268 comp->consumer = map + off.cr.consumer;
269 comp->flags = map + off.cr.flags;
270 comp->ring = map + off.cr.desc;
275 munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64));
279 int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
280 __u64 size, struct xsk_ring_prod *fill,
281 struct xsk_ring_cons *comp,
282 const struct xsk_umem_config *usr_config)
284 struct xdp_umem_reg mr;
285 struct xsk_umem *umem;
288 if (!umem_area || !umem_ptr || !fill || !comp)
290 if (!size && !xsk_page_aligned(umem_area))
293 umem = calloc(1, sizeof(*umem));
297 umem->fd = socket(AF_XDP, SOCK_RAW, 0);
303 umem->umem_area = umem_area;
304 INIT_LIST_HEAD(&umem->ctx_list);
305 xsk_set_umem_config(&umem->config, usr_config);
307 memset(&mr, 0, sizeof(mr));
308 mr.addr = (uintptr_t)umem_area;
310 mr.chunk_size = umem->config.frame_size;
311 mr.headroom = umem->config.frame_headroom;
312 mr.flags = umem->config.flags;
314 err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr));
320 err = xsk_create_umem_rings(umem, umem->fd, fill, comp);
324 umem->fill_save = fill;
325 umem->comp_save = comp;
336 struct xsk_umem_config_v1 {
340 __u32 frame_headroom;
343 int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area,
344 __u64 size, struct xsk_ring_prod *fill,
345 struct xsk_ring_cons *comp,
346 const struct xsk_umem_config *usr_config)
348 struct xsk_umem_config config;
350 memcpy(&config, usr_config, sizeof(struct xsk_umem_config_v1));
353 return xsk_umem__create_v0_0_4(umem_ptr, umem_area, size, fill, comp,
356 COMPAT_VERSION(xsk_umem__create_v0_0_2, xsk_umem__create, LIBBPF_0.0.2)
357 DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4)
359 static enum xsk_prog get_xsk_prog(void)
361 enum xsk_prog detected = XSK_PROG_FALLBACK;
362 struct bpf_load_program_attr prog_attr;
363 struct bpf_create_map_attr map_attr;
364 __u32 size_out, retval, duration;
365 char data_in = 0, data_out;
366 struct bpf_insn insns[] = {
367 BPF_LD_MAP_FD(BPF_REG_1, 0),
368 BPF_MOV64_IMM(BPF_REG_2, 0),
369 BPF_MOV64_IMM(BPF_REG_3, XDP_PASS),
370 BPF_EMIT_CALL(BPF_FUNC_redirect_map),
373 int prog_fd, map_fd, ret;
375 memset(&map_attr, 0, sizeof(map_attr));
376 map_attr.map_type = BPF_MAP_TYPE_XSKMAP;
377 map_attr.key_size = sizeof(int);
378 map_attr.value_size = sizeof(int);
379 map_attr.max_entries = 1;
381 map_fd = bpf_create_map_xattr(&map_attr);
385 insns[0].imm = map_fd;
387 memset(&prog_attr, 0, sizeof(prog_attr));
388 prog_attr.prog_type = BPF_PROG_TYPE_XDP;
389 prog_attr.insns = insns;
390 prog_attr.insns_cnt = ARRAY_SIZE(insns);
391 prog_attr.license = "GPL";
393 prog_fd = bpf_load_program_xattr(&prog_attr, NULL, 0);
399 ret = bpf_prog_test_run(prog_fd, 0, &data_in, 1, &data_out, &size_out, &retval, &duration);
400 if (!ret && retval == XDP_PASS)
401 detected = XSK_PROG_REDIRECT_FLAGS;
407 static int xsk_load_xdp_prog(struct xsk_socket *xsk)
409 static const int log_buf_size = 16 * 1024;
410 struct xsk_ctx *ctx = xsk->ctx;
411 char log_buf[log_buf_size];
414 /* This is the fallback C-program:
415 * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
417 * int ret, index = ctx->rx_queue_index;
419 * // A set entry here means that the correspnding queue_id
420 * // has an active AF_XDP socket bound to it.
421 * ret = bpf_redirect_map(&xsks_map, index, XDP_PASS);
425 * // Fallback for pre-5.3 kernels, not supporting default
426 * // action in the flags parameter.
427 * if (bpf_map_lookup_elem(&xsks_map, &index))
428 * return bpf_redirect_map(&xsks_map, index, 0);
432 struct bpf_insn prog[] = {
433 /* r2 = *(u32 *)(r1 + 16) */
434 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 16),
435 /* *(u32 *)(r10 - 4) = r2 */
436 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -4),
438 BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
440 BPF_MOV64_IMM(BPF_REG_3, 2),
441 /* call bpf_redirect_map */
442 BPF_EMIT_CALL(BPF_FUNC_redirect_map),
443 /* if w0 != 0 goto pc+13 */
444 BPF_JMP32_IMM(BPF_JSGT, BPF_REG_0, 0, 13),
446 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
448 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
450 BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
451 /* call bpf_map_lookup_elem */
452 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
454 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
456 BPF_MOV64_IMM(BPF_REG_0, 2),
457 /* if r1 == 0 goto pc+5 */
458 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5),
459 /* r2 = *(u32 *)(r10 - 4) */
460 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4),
462 BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
464 BPF_MOV64_IMM(BPF_REG_3, 0),
465 /* call bpf_redirect_map */
466 BPF_EMIT_CALL(BPF_FUNC_redirect_map),
467 /* The jumps are to this instruction */
471 /* This is the post-5.3 kernel C-program:
472 * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
474 * return bpf_redirect_map(&xsks_map, ctx->rx_queue_index, XDP_PASS);
477 struct bpf_insn prog_redirect_flags[] = {
478 /* r2 = *(u32 *)(r1 + 16) */
479 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 16),
481 BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
483 BPF_MOV64_IMM(BPF_REG_3, 2),
484 /* call bpf_redirect_map */
485 BPF_EMIT_CALL(BPF_FUNC_redirect_map),
488 size_t insns_cnt[] = {sizeof(prog) / sizeof(struct bpf_insn),
489 sizeof(prog_redirect_flags) / sizeof(struct bpf_insn),
491 struct bpf_insn *progs[] = {prog, prog_redirect_flags};
492 enum xsk_prog option = get_xsk_prog();
494 prog_fd = bpf_load_program(BPF_PROG_TYPE_XDP, progs[option], insns_cnt[option],
495 "LGPL-2.1 or BSD-2-Clause", 0, log_buf,
498 pr_warn("BPF log buffer:\n%s", log_buf);
502 err = bpf_set_link_xdp_fd(xsk->ctx->ifindex, prog_fd,
503 xsk->config.xdp_flags);
509 ctx->prog_fd = prog_fd;
513 static int xsk_get_max_queues(struct xsk_socket *xsk)
515 struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
516 struct xsk_ctx *ctx = xsk->ctx;
517 struct ifreq ifr = {};
520 fd = socket(AF_LOCAL, SOCK_DGRAM, 0);
524 ifr.ifr_data = (void *)&channels;
525 memcpy(ifr.ifr_name, ctx->ifname, IFNAMSIZ - 1);
526 ifr.ifr_name[IFNAMSIZ - 1] = '\0';
527 err = ioctl(fd, SIOCETHTOOL, &ifr);
528 if (err && errno != EOPNOTSUPP) {
534 /* If the device says it has no channels, then all traffic
535 * is sent to a single stream, so max queues = 1.
539 /* Take the max of rx, tx, combined. Drivers return
540 * the number of channels in different ways.
542 ret = max(channels.max_rx, channels.max_tx);
543 ret = max(ret, (int)channels.max_combined);
551 static int xsk_create_bpf_maps(struct xsk_socket *xsk)
553 struct xsk_ctx *ctx = xsk->ctx;
557 max_queues = xsk_get_max_queues(xsk);
561 fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map",
562 sizeof(int), sizeof(int), max_queues, 0);
566 ctx->xsks_map_fd = fd;
571 static void xsk_delete_bpf_maps(struct xsk_socket *xsk)
573 struct xsk_ctx *ctx = xsk->ctx;
575 bpf_map_delete_elem(ctx->xsks_map_fd, &ctx->queue_id);
576 close(ctx->xsks_map_fd);
579 static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
581 __u32 i, *map_ids, num_maps, prog_len = sizeof(struct bpf_prog_info);
582 __u32 map_len = sizeof(struct bpf_map_info);
583 struct bpf_prog_info prog_info = {};
584 struct xsk_ctx *ctx = xsk->ctx;
585 struct bpf_map_info map_info;
588 err = bpf_obj_get_info_by_fd(ctx->prog_fd, &prog_info, &prog_len);
592 num_maps = prog_info.nr_map_ids;
594 map_ids = calloc(prog_info.nr_map_ids, sizeof(*map_ids));
598 memset(&prog_info, 0, prog_len);
599 prog_info.nr_map_ids = num_maps;
600 prog_info.map_ids = (__u64)(unsigned long)map_ids;
602 err = bpf_obj_get_info_by_fd(ctx->prog_fd, &prog_info, &prog_len);
606 ctx->xsks_map_fd = -1;
608 for (i = 0; i < prog_info.nr_map_ids; i++) {
609 fd = bpf_map_get_fd_by_id(map_ids[i]);
613 memset(&map_info, 0, map_len);
614 err = bpf_obj_get_info_by_fd(fd, &map_info, &map_len);
620 if (!strncmp(map_info.name, "xsks_map", sizeof(map_info.name))) {
621 ctx->xsks_map_fd = fd;
629 if (ctx->xsks_map_fd == -1)
637 static int xsk_set_bpf_maps(struct xsk_socket *xsk)
639 struct xsk_ctx *ctx = xsk->ctx;
641 return bpf_map_update_elem(ctx->xsks_map_fd, &ctx->queue_id,
645 static int xsk_create_xsk_struct(int ifindex, struct xsk_socket *xsk)
647 char ifname[IFNAMSIZ];
651 ctx = calloc(1, sizeof(*ctx));
655 interface = if_indextoname(ifindex, &ifname[0]);
661 ctx->ifindex = ifindex;
662 memcpy(ctx->ifname, ifname, IFNAMSIZ -1);
663 ctx->ifname[IFNAMSIZ - 1] = 0;
670 static int __xsk_setup_xdp_prog(struct xsk_socket *_xdp,
673 struct xsk_socket *xsk = _xdp;
674 struct xsk_ctx *ctx = xsk->ctx;
678 err = bpf_get_link_xdp_id(ctx->ifindex, &prog_id,
679 xsk->config.xdp_flags);
684 err = xsk_create_bpf_maps(xsk);
688 err = xsk_load_xdp_prog(xsk);
690 goto err_load_xdp_prog;
693 ctx->prog_fd = bpf_prog_get_fd_by_id(prog_id);
694 if (ctx->prog_fd < 0)
696 err = xsk_lookup_bpf_maps(xsk);
704 err = xsk_set_bpf_maps(xsk);
707 goto err_set_bpf_maps;
715 *xsks_map_fd = ctx->xsks_map_fd;
721 bpf_set_link_xdp_fd(ctx->ifindex, -1, 0);
723 xsk_delete_bpf_maps(xsk);
728 static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex,
733 if (list_empty(&umem->ctx_list))
736 list_for_each_entry(ctx, &umem->ctx_list, list) {
737 if (ctx->ifindex == ifindex && ctx->queue_id == queue_id) {
746 static void xsk_put_ctx(struct xsk_ctx *ctx)
748 struct xsk_umem *umem = ctx->umem;
749 struct xdp_mmap_offsets off;
752 if (--ctx->refcount == 0) {
753 err = xsk_get_mmap_offsets(umem->fd, &off);
755 munmap(ctx->fill->ring - off.fr.desc,
756 off.fr.desc + umem->config.fill_size *
758 munmap(ctx->comp->ring - off.cr.desc,
759 off.cr.desc + umem->config.comp_size *
763 list_del(&ctx->list);
768 static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
769 struct xsk_umem *umem, int ifindex,
770 const char *ifname, __u32 queue_id,
771 struct xsk_ring_prod *fill,
772 struct xsk_ring_cons *comp)
777 ctx = calloc(1, sizeof(*ctx));
781 if (!umem->fill_save) {
782 err = xsk_create_umem_rings(umem, xsk->fd, fill, comp);
787 } else if (umem->fill_save != fill || umem->comp_save != comp) {
788 /* Copy over rings to new structs. */
789 memcpy(fill, umem->fill_save, sizeof(*fill));
790 memcpy(comp, umem->comp_save, sizeof(*comp));
793 ctx->ifindex = ifindex;
796 ctx->queue_id = queue_id;
797 memcpy(ctx->ifname, ifname, IFNAMSIZ - 1);
798 ctx->ifname[IFNAMSIZ - 1] = '\0';
800 umem->fill_save = NULL;
801 umem->comp_save = NULL;
804 list_add(&ctx->list, &umem->ctx_list);
808 static void xsk_destroy_xsk_struct(struct xsk_socket *xsk)
814 int xsk_socket__update_xskmap(struct xsk_socket *xsk, int fd)
816 xsk->ctx->xsks_map_fd = fd;
817 return xsk_set_bpf_maps(xsk);
820 int xsk_setup_xdp_prog(int ifindex, int *xsks_map_fd)
822 struct xsk_socket *xsk;
825 xsk = calloc(1, sizeof(*xsk));
829 res = xsk_create_xsk_struct(ifindex, xsk);
835 res = __xsk_setup_xdp_prog(xsk, xsks_map_fd);
837 xsk_destroy_xsk_struct(xsk);
842 int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
844 __u32 queue_id, struct xsk_umem *umem,
845 struct xsk_ring_cons *rx,
846 struct xsk_ring_prod *tx,
847 struct xsk_ring_prod *fill,
848 struct xsk_ring_cons *comp,
849 const struct xsk_socket_config *usr_config)
851 void *rx_map = NULL, *tx_map = NULL;
852 struct sockaddr_xdp sxdp = {};
853 struct xdp_mmap_offsets off;
854 struct xsk_socket *xsk;
858 if (!umem || !xsk_ptr || !(rx || tx))
861 xsk = calloc(1, sizeof(*xsk));
865 err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
869 xsk->outstanding_tx = 0;
870 ifindex = if_nametoindex(ifname);
876 if (umem->refcount++ > 0) {
877 xsk->fd = socket(AF_XDP, SOCK_RAW, 0);
886 ctx = xsk_get_ctx(umem, ifindex, queue_id);
888 if (!fill || !comp) {
893 ctx = xsk_create_ctx(xsk, umem, ifindex, ifname, queue_id,
903 err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
904 &xsk->config.rx_size,
905 sizeof(xsk->config.rx_size));
912 err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
913 &xsk->config.tx_size,
914 sizeof(xsk->config.tx_size));
921 err = xsk_get_mmap_offsets(xsk->fd, &off);
928 rx_map = mmap(NULL, off.rx.desc +
929 xsk->config.rx_size * sizeof(struct xdp_desc),
930 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
931 xsk->fd, XDP_PGOFF_RX_RING);
932 if (rx_map == MAP_FAILED) {
937 rx->mask = xsk->config.rx_size - 1;
938 rx->size = xsk->config.rx_size;
939 rx->producer = rx_map + off.rx.producer;
940 rx->consumer = rx_map + off.rx.consumer;
941 rx->flags = rx_map + off.rx.flags;
942 rx->ring = rx_map + off.rx.desc;
943 rx->cached_prod = *rx->producer;
944 rx->cached_cons = *rx->consumer;
949 tx_map = mmap(NULL, off.tx.desc +
950 xsk->config.tx_size * sizeof(struct xdp_desc),
951 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
952 xsk->fd, XDP_PGOFF_TX_RING);
953 if (tx_map == MAP_FAILED) {
958 tx->mask = xsk->config.tx_size - 1;
959 tx->size = xsk->config.tx_size;
960 tx->producer = tx_map + off.tx.producer;
961 tx->consumer = tx_map + off.tx.consumer;
962 tx->flags = tx_map + off.tx.flags;
963 tx->ring = tx_map + off.tx.desc;
964 tx->cached_prod = *tx->producer;
965 /* cached_cons is r->size bigger than the real consumer pointer
966 * See xsk_prod_nb_free
968 tx->cached_cons = *tx->consumer + xsk->config.tx_size;
972 sxdp.sxdp_family = PF_XDP;
973 sxdp.sxdp_ifindex = ctx->ifindex;
974 sxdp.sxdp_queue_id = ctx->queue_id;
975 if (umem->refcount > 1) {
976 sxdp.sxdp_flags |= XDP_SHARED_UMEM;
977 sxdp.sxdp_shared_umem_fd = umem->fd;
979 sxdp.sxdp_flags = xsk->config.bind_flags;
982 err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp));
990 if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
991 err = __xsk_setup_xdp_prog(xsk, NULL);
1001 munmap(tx_map, off.tx.desc +
1002 xsk->config.tx_size * sizeof(struct xdp_desc));
1005 munmap(rx_map, off.rx.desc +
1006 xsk->config.rx_size * sizeof(struct xdp_desc));
1010 if (--umem->refcount)
1017 int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
1018 __u32 queue_id, struct xsk_umem *umem,
1019 struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
1020 const struct xsk_socket_config *usr_config)
1025 return xsk_socket__create_shared(xsk_ptr, ifname, queue_id, umem,
1026 rx, tx, umem->fill_save,
1027 umem->comp_save, usr_config);
1030 int xsk_umem__delete(struct xsk_umem *umem)
1044 void xsk_socket__delete(struct xsk_socket *xsk)
1046 size_t desc_sz = sizeof(struct xdp_desc);
1047 struct xdp_mmap_offsets off;
1048 struct xsk_umem *umem;
1049 struct xsk_ctx *ctx;
1057 if (ctx->prog_fd != -1) {
1058 xsk_delete_bpf_maps(xsk);
1059 close(ctx->prog_fd);
1062 err = xsk_get_mmap_offsets(xsk->fd, &off);
1065 munmap(xsk->rx->ring - off.rx.desc,
1066 off.rx.desc + xsk->config.rx_size * desc_sz);
1069 munmap(xsk->tx->ring - off.tx.desc,
1070 off.tx.desc + xsk->config.tx_size * desc_sz);
1077 /* Do not close an fd that also has an associated umem connected
1080 if (xsk->fd != umem->fd)