1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
4 * AF_XDP user-space access library.
6 * Copyright(c) 2018 - 2019 Intel Corporation.
8 * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
15 #include <arpa/inet.h>
16 #include <asm/barrier.h>
17 #include <linux/compiler.h>
18 #include <linux/ethtool.h>
19 #include <linux/filter.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_packet.h>
22 #include <linux/if_xdp.h>
23 #include <linux/kernel.h>
24 #include <linux/list.h>
25 #include <linux/sockios.h>
27 #include <sys/ioctl.h>
29 #include <sys/socket.h>
30 #include <sys/types.h>
31 #include <linux/if_link.h>
35 #include "libbpf_internal.h"
52 XSK_PROG_REDIRECT_FLAGS,
56 struct xsk_ring_prod *fill_save;
57 struct xsk_ring_cons *comp_save;
59 struct xsk_umem_config config;
62 struct list_head ctx_list;
63 bool rx_ring_setup_done;
64 bool tx_ring_setup_done;
68 struct xsk_ring_prod *fill;
69 struct xsk_ring_cons *comp;
71 struct xsk_umem *umem;
74 struct list_head list;
78 char ifname[IFNAMSIZ];
83 struct xsk_ring_cons *rx;
84 struct xsk_ring_prod *tx;
87 struct xsk_socket_config config;
92 bool xdp_prog_attached;
97 /* Up until and including Linux 5.3 */
98 struct xdp_ring_offset_v1 {
104 /* Up until and including Linux 5.3 */
105 struct xdp_mmap_offsets_v1 {
106 struct xdp_ring_offset_v1 rx;
107 struct xdp_ring_offset_v1 tx;
108 struct xdp_ring_offset_v1 fr;
109 struct xdp_ring_offset_v1 cr;
112 int xsk_umem__fd(const struct xsk_umem *umem)
114 return umem ? umem->fd : -EINVAL;
117 int xsk_socket__fd(const struct xsk_socket *xsk)
119 return xsk ? xsk->fd : -EINVAL;
122 static bool xsk_page_aligned(void *buffer)
124 unsigned long addr = (unsigned long)buffer;
126 return !(addr & (getpagesize() - 1));
129 static void xsk_set_umem_config(struct xsk_umem_config *cfg,
130 const struct xsk_umem_config *usr_cfg)
133 cfg->fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
134 cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
135 cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
136 cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM;
137 cfg->flags = XSK_UMEM__DEFAULT_FLAGS;
141 cfg->fill_size = usr_cfg->fill_size;
142 cfg->comp_size = usr_cfg->comp_size;
143 cfg->frame_size = usr_cfg->frame_size;
144 cfg->frame_headroom = usr_cfg->frame_headroom;
145 cfg->flags = usr_cfg->flags;
148 static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
149 const struct xsk_socket_config *usr_cfg)
152 cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
153 cfg->tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
154 cfg->libbpf_flags = 0;
160 if (usr_cfg->libbpf_flags & ~XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)
163 cfg->rx_size = usr_cfg->rx_size;
164 cfg->tx_size = usr_cfg->tx_size;
165 cfg->libbpf_flags = usr_cfg->libbpf_flags;
166 cfg->xdp_flags = usr_cfg->xdp_flags;
167 cfg->bind_flags = usr_cfg->bind_flags;
172 static void xsk_mmap_offsets_v1(struct xdp_mmap_offsets *off)
174 struct xdp_mmap_offsets_v1 off_v1;
176 /* getsockopt on a kernel <= 5.3 has no flags fields.
177 * Copy over the offsets to the correct places in the >=5.4 format
178 * and put the flags where they would have been on that kernel.
180 memcpy(&off_v1, off, sizeof(off_v1));
182 off->rx.producer = off_v1.rx.producer;
183 off->rx.consumer = off_v1.rx.consumer;
184 off->rx.desc = off_v1.rx.desc;
185 off->rx.flags = off_v1.rx.consumer + sizeof(__u32);
187 off->tx.producer = off_v1.tx.producer;
188 off->tx.consumer = off_v1.tx.consumer;
189 off->tx.desc = off_v1.tx.desc;
190 off->tx.flags = off_v1.tx.consumer + sizeof(__u32);
192 off->fr.producer = off_v1.fr.producer;
193 off->fr.consumer = off_v1.fr.consumer;
194 off->fr.desc = off_v1.fr.desc;
195 off->fr.flags = off_v1.fr.consumer + sizeof(__u32);
197 off->cr.producer = off_v1.cr.producer;
198 off->cr.consumer = off_v1.cr.consumer;
199 off->cr.desc = off_v1.cr.desc;
200 off->cr.flags = off_v1.cr.consumer + sizeof(__u32);
203 static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off)
208 optlen = sizeof(*off);
209 err = getsockopt(fd, SOL_XDP, XDP_MMAP_OFFSETS, off, &optlen);
213 if (optlen == sizeof(*off))
216 if (optlen == sizeof(struct xdp_mmap_offsets_v1)) {
217 xsk_mmap_offsets_v1(off);
224 static int xsk_create_umem_rings(struct xsk_umem *umem, int fd,
225 struct xsk_ring_prod *fill,
226 struct xsk_ring_cons *comp)
228 struct xdp_mmap_offsets off;
232 err = setsockopt(fd, SOL_XDP, XDP_UMEM_FILL_RING,
233 &umem->config.fill_size,
234 sizeof(umem->config.fill_size));
238 err = setsockopt(fd, SOL_XDP, XDP_UMEM_COMPLETION_RING,
239 &umem->config.comp_size,
240 sizeof(umem->config.comp_size));
244 err = xsk_get_mmap_offsets(fd, &off);
248 map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64),
249 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
250 XDP_UMEM_PGOFF_FILL_RING);
251 if (map == MAP_FAILED)
254 fill->mask = umem->config.fill_size - 1;
255 fill->size = umem->config.fill_size;
256 fill->producer = map + off.fr.producer;
257 fill->consumer = map + off.fr.consumer;
258 fill->flags = map + off.fr.flags;
259 fill->ring = map + off.fr.desc;
260 fill->cached_cons = umem->config.fill_size;
262 map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
263 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
264 XDP_UMEM_PGOFF_COMPLETION_RING);
265 if (map == MAP_FAILED) {
270 comp->mask = umem->config.comp_size - 1;
271 comp->size = umem->config.comp_size;
272 comp->producer = map + off.cr.producer;
273 comp->consumer = map + off.cr.consumer;
274 comp->flags = map + off.cr.flags;
275 comp->ring = map + off.cr.desc;
280 munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64));
284 int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
285 __u64 size, struct xsk_ring_prod *fill,
286 struct xsk_ring_cons *comp,
287 const struct xsk_umem_config *usr_config)
289 struct xdp_umem_reg mr;
290 struct xsk_umem *umem;
293 if (!umem_area || !umem_ptr || !fill || !comp)
295 if (!size && !xsk_page_aligned(umem_area))
298 umem = calloc(1, sizeof(*umem));
302 umem->fd = socket(AF_XDP, SOCK_RAW, 0);
308 umem->umem_area = umem_area;
309 INIT_LIST_HEAD(&umem->ctx_list);
310 xsk_set_umem_config(&umem->config, usr_config);
312 memset(&mr, 0, sizeof(mr));
313 mr.addr = (uintptr_t)umem_area;
315 mr.chunk_size = umem->config.frame_size;
316 mr.headroom = umem->config.frame_headroom;
317 mr.flags = umem->config.flags;
319 err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr));
325 err = xsk_create_umem_rings(umem, umem->fd, fill, comp);
329 umem->fill_save = fill;
330 umem->comp_save = comp;
341 struct xsk_umem_config_v1 {
345 __u32 frame_headroom;
348 int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area,
349 __u64 size, struct xsk_ring_prod *fill,
350 struct xsk_ring_cons *comp,
351 const struct xsk_umem_config *usr_config)
353 struct xsk_umem_config config;
355 memcpy(&config, usr_config, sizeof(struct xsk_umem_config_v1));
358 return xsk_umem__create_v0_0_4(umem_ptr, umem_area, size, fill, comp,
361 COMPAT_VERSION(xsk_umem__create_v0_0_2, xsk_umem__create, LIBBPF_0.0.2)
362 DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4)
364 static enum xsk_prog get_xsk_prog(void)
366 enum xsk_prog detected = XSK_PROG_FALLBACK;
367 struct bpf_load_program_attr prog_attr;
368 struct bpf_create_map_attr map_attr;
369 __u32 size_out, retval, duration;
370 char data_in = 0, data_out;
371 struct bpf_insn insns[] = {
372 BPF_LD_MAP_FD(BPF_REG_1, 0),
373 BPF_MOV64_IMM(BPF_REG_2, 0),
374 BPF_MOV64_IMM(BPF_REG_3, XDP_PASS),
375 BPF_EMIT_CALL(BPF_FUNC_redirect_map),
378 int prog_fd, map_fd, ret;
380 memset(&map_attr, 0, sizeof(map_attr));
381 map_attr.map_type = BPF_MAP_TYPE_XSKMAP;
382 map_attr.key_size = sizeof(int);
383 map_attr.value_size = sizeof(int);
384 map_attr.max_entries = 1;
386 map_fd = bpf_create_map_xattr(&map_attr);
390 insns[0].imm = map_fd;
392 memset(&prog_attr, 0, sizeof(prog_attr));
393 prog_attr.prog_type = BPF_PROG_TYPE_XDP;
394 prog_attr.insns = insns;
395 prog_attr.insns_cnt = ARRAY_SIZE(insns);
396 prog_attr.license = "GPL";
398 prog_fd = bpf_load_program_xattr(&prog_attr, NULL, 0);
404 ret = bpf_prog_test_run(prog_fd, 0, &data_in, 1, &data_out, &size_out, &retval, &duration);
405 if (!ret && retval == XDP_PASS)
406 detected = XSK_PROG_REDIRECT_FLAGS;
412 static int xsk_load_xdp_prog(struct xsk_socket *xsk)
414 static const int log_buf_size = 16 * 1024;
415 struct xsk_ctx *ctx = xsk->ctx;
416 char log_buf[log_buf_size];
419 /* This is the fallback C-program:
420 * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
422 * int ret, index = ctx->rx_queue_index;
424 * // A set entry here means that the correspnding queue_id
425 * // has an active AF_XDP socket bound to it.
426 * ret = bpf_redirect_map(&xsks_map, index, XDP_PASS);
430 * // Fallback for pre-5.3 kernels, not supporting default
431 * // action in the flags parameter.
432 * if (bpf_map_lookup_elem(&xsks_map, &index))
433 * return bpf_redirect_map(&xsks_map, index, 0);
437 struct bpf_insn prog[] = {
438 /* r2 = *(u32 *)(r1 + 16) */
439 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 16),
440 /* *(u32 *)(r10 - 4) = r2 */
441 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -4),
443 BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
445 BPF_MOV64_IMM(BPF_REG_3, 2),
446 /* call bpf_redirect_map */
447 BPF_EMIT_CALL(BPF_FUNC_redirect_map),
448 /* if w0 != 0 goto pc+13 */
449 BPF_JMP32_IMM(BPF_JSGT, BPF_REG_0, 0, 13),
451 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
453 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
455 BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
456 /* call bpf_map_lookup_elem */
457 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
459 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
461 BPF_MOV64_IMM(BPF_REG_0, 2),
462 /* if r1 == 0 goto pc+5 */
463 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5),
464 /* r2 = *(u32 *)(r10 - 4) */
465 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4),
467 BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
469 BPF_MOV64_IMM(BPF_REG_3, 0),
470 /* call bpf_redirect_map */
471 BPF_EMIT_CALL(BPF_FUNC_redirect_map),
472 /* The jumps are to this instruction */
476 /* This is the post-5.3 kernel C-program:
477 * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
479 * return bpf_redirect_map(&xsks_map, ctx->rx_queue_index, XDP_PASS);
482 struct bpf_insn prog_redirect_flags[] = {
483 /* r2 = *(u32 *)(r1 + 16) */
484 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 16),
486 BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
488 BPF_MOV64_IMM(BPF_REG_3, 2),
489 /* call bpf_redirect_map */
490 BPF_EMIT_CALL(BPF_FUNC_redirect_map),
493 size_t insns_cnt[] = {sizeof(prog) / sizeof(struct bpf_insn),
494 sizeof(prog_redirect_flags) / sizeof(struct bpf_insn),
496 struct bpf_insn *progs[] = {prog, prog_redirect_flags};
497 enum xsk_prog option = get_xsk_prog();
499 prog_fd = bpf_load_program(BPF_PROG_TYPE_XDP, progs[option], insns_cnt[option],
500 "LGPL-2.1 or BSD-2-Clause", 0, log_buf,
503 pr_warn("BPF log buffer:\n%s", log_buf);
507 ctx->prog_fd = prog_fd;
511 static int xsk_create_bpf_link(struct xsk_socket *xsk)
513 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts);
514 struct xsk_ctx *ctx = xsk->ctx;
519 err = bpf_get_link_xdp_id(ctx->ifindex, &prog_id, xsk->config.xdp_flags);
521 pr_warn("getting XDP prog id failed\n");
525 /* if there's a netlink-based XDP prog loaded on interface, bail out
526 * and ask user to do the removal by himself
529 pr_warn("Netlink-based XDP prog detected, please unload it in order to launch AF_XDP prog\n");
533 opts.flags = xsk->config.xdp_flags & ~(XDP_FLAGS_UPDATE_IF_NOEXIST | XDP_FLAGS_REPLACE);
535 link_fd = bpf_link_create(ctx->prog_fd, ctx->ifindex, BPF_XDP, &opts);
537 pr_warn("bpf_link_create failed: %s\n", strerror(errno));
541 ctx->link_fd = link_fd;
545 static int xsk_get_max_queues(struct xsk_socket *xsk)
547 struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
548 struct xsk_ctx *ctx = xsk->ctx;
549 struct ifreq ifr = {};
552 fd = socket(AF_LOCAL, SOCK_DGRAM, 0);
556 ifr.ifr_data = (void *)&channels;
557 memcpy(ifr.ifr_name, ctx->ifname, IFNAMSIZ - 1);
558 ifr.ifr_name[IFNAMSIZ - 1] = '\0';
559 err = ioctl(fd, SIOCETHTOOL, &ifr);
560 if (err && errno != EOPNOTSUPP) {
566 /* If the device says it has no channels, then all traffic
567 * is sent to a single stream, so max queues = 1.
571 /* Take the max of rx, tx, combined. Drivers return
572 * the number of channels in different ways.
574 ret = max(channels.max_rx, channels.max_tx);
575 ret = max(ret, (int)channels.max_combined);
583 static int xsk_create_bpf_maps(struct xsk_socket *xsk)
585 struct xsk_ctx *ctx = xsk->ctx;
589 max_queues = xsk_get_max_queues(xsk);
593 fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map",
594 sizeof(int), sizeof(int), max_queues, 0);
598 ctx->xsks_map_fd = fd;
603 static void xsk_delete_bpf_maps(struct xsk_socket *xsk)
605 struct xsk_ctx *ctx = xsk->ctx;
607 bpf_map_delete_elem(ctx->xsks_map_fd, &ctx->queue_id);
608 close(ctx->xsks_map_fd);
611 static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
613 __u32 i, *map_ids, num_maps, prog_len = sizeof(struct bpf_prog_info);
614 __u32 map_len = sizeof(struct bpf_map_info);
615 struct bpf_prog_info prog_info = {};
616 struct xsk_ctx *ctx = xsk->ctx;
617 struct bpf_map_info map_info;
620 err = bpf_obj_get_info_by_fd(ctx->prog_fd, &prog_info, &prog_len);
624 num_maps = prog_info.nr_map_ids;
626 map_ids = calloc(prog_info.nr_map_ids, sizeof(*map_ids));
630 memset(&prog_info, 0, prog_len);
631 prog_info.nr_map_ids = num_maps;
632 prog_info.map_ids = (__u64)(unsigned long)map_ids;
634 err = bpf_obj_get_info_by_fd(ctx->prog_fd, &prog_info, &prog_len);
638 ctx->xsks_map_fd = -1;
640 for (i = 0; i < prog_info.nr_map_ids; i++) {
641 fd = bpf_map_get_fd_by_id(map_ids[i]);
645 memset(&map_info, 0, map_len);
646 err = bpf_obj_get_info_by_fd(fd, &map_info, &map_len);
652 if (!strncmp(map_info.name, "xsks_map", sizeof(map_info.name))) {
653 ctx->xsks_map_fd = fd;
660 if (ctx->xsks_map_fd == -1)
668 static int xsk_set_bpf_maps(struct xsk_socket *xsk)
670 struct xsk_ctx *ctx = xsk->ctx;
672 return bpf_map_update_elem(ctx->xsks_map_fd, &ctx->queue_id,
676 static int xsk_link_lookup(int ifindex, __u32 *prog_id, int *link_fd)
678 struct bpf_link_info link_info;
685 err = bpf_link_get_next_id(id, &id);
687 if (errno == ENOENT) {
691 pr_warn("can't get next link: %s\n", strerror(errno));
695 fd = bpf_link_get_fd_by_id(id);
699 pr_warn("can't get link by id (%u): %s\n", id, strerror(errno));
704 link_len = sizeof(struct bpf_link_info);
705 memset(&link_info, 0, link_len);
706 err = bpf_obj_get_info_by_fd(fd, &link_info, &link_len);
708 pr_warn("can't get link info: %s\n", strerror(errno));
712 if (link_info.type == BPF_LINK_TYPE_XDP) {
713 if (link_info.xdp.ifindex == ifindex) {
716 *prog_id = link_info.prog_id;
726 static bool xsk_probe_bpf_link(void)
728 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
729 .flags = XDP_FLAGS_SKB_MODE);
730 struct bpf_load_program_attr prog_attr;
731 struct bpf_insn insns[2] = {
732 BPF_MOV64_IMM(BPF_REG_0, XDP_PASS),
735 int prog_fd, link_fd = -1;
740 err = xsk_link_lookup(ifindex_lo, NULL, &link_fd);
747 memset(&prog_attr, 0, sizeof(prog_attr));
748 prog_attr.prog_type = BPF_PROG_TYPE_XDP;
749 prog_attr.insns = insns;
750 prog_attr.insns_cnt = ARRAY_SIZE(insns);
751 prog_attr.license = "GPL";
753 prog_fd = bpf_load_program_xattr(&prog_attr, NULL, 0);
757 link_fd = bpf_link_create(prog_fd, ifindex_lo, BPF_XDP, &opts);
768 static int xsk_create_xsk_struct(int ifindex, struct xsk_socket *xsk)
770 char ifname[IFNAMSIZ];
774 ctx = calloc(1, sizeof(*ctx));
778 interface = if_indextoname(ifindex, &ifname[0]);
784 ctx->ifindex = ifindex;
785 memcpy(ctx->ifname, ifname, IFNAMSIZ -1);
786 ctx->ifname[IFNAMSIZ - 1] = 0;
789 xsk->ctx->has_bpf_link = xsk_probe_bpf_link();
794 static int xsk_init_xdp_res(struct xsk_socket *xsk,
797 struct xsk_ctx *ctx = xsk->ctx;
800 err = xsk_create_bpf_maps(xsk);
804 err = xsk_load_xdp_prog(xsk);
806 goto err_load_xdp_prog;
808 if (ctx->has_bpf_link)
809 err = xsk_create_bpf_link(xsk);
811 err = bpf_set_link_xdp_fd(xsk->ctx->ifindex, ctx->prog_fd,
812 xsk->config.xdp_flags);
815 goto err_attach_xdp_prog;
820 err = xsk_set_bpf_maps(xsk);
822 goto err_set_bpf_maps;
827 if (ctx->has_bpf_link)
830 bpf_set_link_xdp_fd(ctx->ifindex, -1, 0);
834 xsk_delete_bpf_maps(xsk);
838 static int xsk_lookup_xdp_res(struct xsk_socket *xsk, int *xsks_map_fd, int prog_id)
840 struct xsk_ctx *ctx = xsk->ctx;
843 ctx->prog_fd = bpf_prog_get_fd_by_id(prog_id);
844 if (ctx->prog_fd < 0) {
848 err = xsk_lookup_bpf_maps(xsk);
850 goto err_lookup_maps;
855 err = xsk_set_bpf_maps(xsk);
862 close(ctx->xsks_map_fd);
866 if (ctx->has_bpf_link)
871 static int __xsk_setup_xdp_prog(struct xsk_socket *_xdp, int *xsks_map_fd)
873 struct xsk_socket *xsk = _xdp;
874 struct xsk_ctx *ctx = xsk->ctx;
878 if (ctx->has_bpf_link)
879 err = xsk_link_lookup(ctx->ifindex, &prog_id, &ctx->link_fd);
881 err = bpf_get_link_xdp_id(ctx->ifindex, &prog_id, xsk->config.xdp_flags);
886 err = !prog_id ? xsk_init_xdp_res(xsk, xsks_map_fd) :
887 xsk_lookup_xdp_res(xsk, xsks_map_fd, prog_id);
889 if (!err && xsks_map_fd)
890 *xsks_map_fd = ctx->xsks_map_fd;
895 static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex,
900 if (list_empty(&umem->ctx_list))
903 list_for_each_entry(ctx, &umem->ctx_list, list) {
904 if (ctx->ifindex == ifindex && ctx->queue_id == queue_id) {
913 static void xsk_put_ctx(struct xsk_ctx *ctx, bool unmap)
915 struct xsk_umem *umem = ctx->umem;
916 struct xdp_mmap_offsets off;
925 err = xsk_get_mmap_offsets(umem->fd, &off);
929 munmap(ctx->fill->ring - off.fr.desc, off.fr.desc + umem->config.fill_size *
931 munmap(ctx->comp->ring - off.cr.desc, off.cr.desc + umem->config.comp_size *
935 list_del(&ctx->list);
939 static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
940 struct xsk_umem *umem, int ifindex,
941 const char *ifname, __u32 queue_id,
942 struct xsk_ring_prod *fill,
943 struct xsk_ring_cons *comp)
948 ctx = calloc(1, sizeof(*ctx));
952 if (!umem->fill_save) {
953 err = xsk_create_umem_rings(umem, xsk->fd, fill, comp);
958 } else if (umem->fill_save != fill || umem->comp_save != comp) {
959 /* Copy over rings to new structs. */
960 memcpy(fill, umem->fill_save, sizeof(*fill));
961 memcpy(comp, umem->comp_save, sizeof(*comp));
964 ctx->ifindex = ifindex;
967 ctx->queue_id = queue_id;
968 memcpy(ctx->ifname, ifname, IFNAMSIZ - 1);
969 ctx->ifname[IFNAMSIZ - 1] = '\0';
973 list_add(&ctx->list, &umem->ctx_list);
977 static void xsk_destroy_xsk_struct(struct xsk_socket *xsk)
983 int xsk_socket__update_xskmap(struct xsk_socket *xsk, int fd)
985 xsk->ctx->xsks_map_fd = fd;
986 return xsk_set_bpf_maps(xsk);
989 int xsk_setup_xdp_prog(int ifindex, int *xsks_map_fd)
991 struct xsk_socket *xsk;
994 xsk = calloc(1, sizeof(*xsk));
998 res = xsk_create_xsk_struct(ifindex, xsk);
1004 res = __xsk_setup_xdp_prog(xsk, xsks_map_fd);
1006 xsk_destroy_xsk_struct(xsk);
1011 int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
1013 __u32 queue_id, struct xsk_umem *umem,
1014 struct xsk_ring_cons *rx,
1015 struct xsk_ring_prod *tx,
1016 struct xsk_ring_prod *fill,
1017 struct xsk_ring_cons *comp,
1018 const struct xsk_socket_config *usr_config)
1020 bool unmap, rx_setup_done = false, tx_setup_done = false;
1021 void *rx_map = NULL, *tx_map = NULL;
1022 struct sockaddr_xdp sxdp = {};
1023 struct xdp_mmap_offsets off;
1024 struct xsk_socket *xsk;
1025 struct xsk_ctx *ctx;
1028 if (!umem || !xsk_ptr || !(rx || tx))
1031 unmap = umem->fill_save != fill;
1033 xsk = calloc(1, sizeof(*xsk));
1037 err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
1041 xsk->outstanding_tx = 0;
1042 ifindex = if_nametoindex(ifname);
1048 if (umem->refcount++ > 0) {
1049 xsk->fd = socket(AF_XDP, SOCK_RAW, 0);
1056 rx_setup_done = umem->rx_ring_setup_done;
1057 tx_setup_done = umem->tx_ring_setup_done;
1060 ctx = xsk_get_ctx(umem, ifindex, queue_id);
1062 if (!fill || !comp) {
1067 ctx = xsk_create_ctx(xsk, umem, ifindex, ifname, queue_id,
1075 xsk->ctx->has_bpf_link = xsk_probe_bpf_link();
1077 if (rx && !rx_setup_done) {
1078 err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
1079 &xsk->config.rx_size,
1080 sizeof(xsk->config.rx_size));
1085 if (xsk->fd == umem->fd)
1086 umem->rx_ring_setup_done = true;
1088 if (tx && !tx_setup_done) {
1089 err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
1090 &xsk->config.tx_size,
1091 sizeof(xsk->config.tx_size));
1096 if (xsk->fd == umem->fd)
1097 umem->tx_ring_setup_done = true;
1100 err = xsk_get_mmap_offsets(xsk->fd, &off);
1107 rx_map = mmap(NULL, off.rx.desc +
1108 xsk->config.rx_size * sizeof(struct xdp_desc),
1109 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
1110 xsk->fd, XDP_PGOFF_RX_RING);
1111 if (rx_map == MAP_FAILED) {
1116 rx->mask = xsk->config.rx_size - 1;
1117 rx->size = xsk->config.rx_size;
1118 rx->producer = rx_map + off.rx.producer;
1119 rx->consumer = rx_map + off.rx.consumer;
1120 rx->flags = rx_map + off.rx.flags;
1121 rx->ring = rx_map + off.rx.desc;
1122 rx->cached_prod = *rx->producer;
1123 rx->cached_cons = *rx->consumer;
1128 tx_map = mmap(NULL, off.tx.desc +
1129 xsk->config.tx_size * sizeof(struct xdp_desc),
1130 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
1131 xsk->fd, XDP_PGOFF_TX_RING);
1132 if (tx_map == MAP_FAILED) {
1137 tx->mask = xsk->config.tx_size - 1;
1138 tx->size = xsk->config.tx_size;
1139 tx->producer = tx_map + off.tx.producer;
1140 tx->consumer = tx_map + off.tx.consumer;
1141 tx->flags = tx_map + off.tx.flags;
1142 tx->ring = tx_map + off.tx.desc;
1143 tx->cached_prod = *tx->producer;
1144 /* cached_cons is r->size bigger than the real consumer pointer
1145 * See xsk_prod_nb_free
1147 tx->cached_cons = *tx->consumer + xsk->config.tx_size;
1151 sxdp.sxdp_family = PF_XDP;
1152 sxdp.sxdp_ifindex = ctx->ifindex;
1153 sxdp.sxdp_queue_id = ctx->queue_id;
1154 if (umem->refcount > 1) {
1155 sxdp.sxdp_flags |= XDP_SHARED_UMEM;
1156 sxdp.sxdp_shared_umem_fd = umem->fd;
1158 sxdp.sxdp_flags = xsk->config.bind_flags;
1161 err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp));
1167 if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
1168 err = __xsk_setup_xdp_prog(xsk, NULL);
1174 umem->fill_save = NULL;
1175 umem->comp_save = NULL;
1180 munmap(tx_map, off.tx.desc +
1181 xsk->config.tx_size * sizeof(struct xdp_desc));
1184 munmap(rx_map, off.rx.desc +
1185 xsk->config.rx_size * sizeof(struct xdp_desc));
1187 xsk_put_ctx(ctx, unmap);
1189 if (--umem->refcount)
1196 int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
1197 __u32 queue_id, struct xsk_umem *umem,
1198 struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
1199 const struct xsk_socket_config *usr_config)
1204 return xsk_socket__create_shared(xsk_ptr, ifname, queue_id, umem,
1205 rx, tx, umem->fill_save,
1206 umem->comp_save, usr_config);
1209 int xsk_umem__delete(struct xsk_umem *umem)
1211 struct xdp_mmap_offsets off;
1220 err = xsk_get_mmap_offsets(umem->fd, &off);
1221 if (!err && umem->fill_save && umem->comp_save) {
1222 munmap(umem->fill_save->ring - off.fr.desc,
1223 off.fr.desc + umem->config.fill_size * sizeof(__u64));
1224 munmap(umem->comp_save->ring - off.cr.desc,
1225 off.cr.desc + umem->config.comp_size * sizeof(__u64));
1234 void xsk_socket__delete(struct xsk_socket *xsk)
1236 size_t desc_sz = sizeof(struct xdp_desc);
1237 struct xdp_mmap_offsets off;
1238 struct xsk_umem *umem;
1239 struct xsk_ctx *ctx;
1248 if (ctx->refcount == 1) {
1249 xsk_delete_bpf_maps(xsk);
1250 close(ctx->prog_fd);
1251 if (ctx->has_bpf_link)
1252 close(ctx->link_fd);
1255 xsk_put_ctx(ctx, true);
1257 err = xsk_get_mmap_offsets(xsk->fd, &off);
1260 munmap(xsk->rx->ring - off.rx.desc,
1261 off.rx.desc + xsk->config.rx_size * desc_sz);
1264 munmap(xsk->tx->ring - off.tx.desc,
1265 off.tx.desc + xsk->config.tx_size * desc_sz);
1270 /* Do not close an fd that also has an associated umem connected
1273 if (xsk->fd != umem->fd)