1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
4 * common eBPF ELF operations.
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation;
13 * version 2.1 of the License (not later!)
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this program; if not, see <http://www.gnu.org/licenses>
28 #include <asm/unistd.h>
30 #include <linux/bpf.h>
33 #include "libbpf_internal.h"
36 * When building perf, unistd.h is overridden. __NR_bpf is
37 * required to be defined explicitly.
40 # if defined(__i386__)
42 # elif defined(__x86_64__)
44 # elif defined(__aarch64__)
46 # elif defined(__sparc__)
48 # elif defined(__s390__)
50 # elif defined(__arc__)
53 # error __NR_bpf not defined. libbpf does not support your arch.
57 static inline __u64 ptr_to_u64(const void *ptr)
59 return (__u64) (unsigned long) ptr;
62 static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
65 return syscall(__NR_bpf, cmd, attr, size);
68 static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
74 fd = sys_bpf(BPF_PROG_LOAD, attr, size);
75 } while (fd < 0 && errno == EAGAIN && retries-- > 0);
80 int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
85 memset(&attr, '\0', sizeof(attr));
87 attr.map_type = create_attr->map_type;
88 attr.key_size = create_attr->key_size;
89 attr.value_size = create_attr->value_size;
90 attr.max_entries = create_attr->max_entries;
91 attr.map_flags = create_attr->map_flags;
92 if (create_attr->name)
93 memcpy(attr.map_name, create_attr->name,
94 min(strlen(create_attr->name), BPF_OBJ_NAME_LEN - 1));
95 attr.numa_node = create_attr->numa_node;
96 attr.btf_fd = create_attr->btf_fd;
97 attr.btf_key_type_id = create_attr->btf_key_type_id;
98 attr.btf_value_type_id = create_attr->btf_value_type_id;
99 attr.map_ifindex = create_attr->map_ifindex;
100 if (attr.map_type == BPF_MAP_TYPE_STRUCT_OPS)
101 attr.btf_vmlinux_value_type_id =
102 create_attr->btf_vmlinux_value_type_id;
104 attr.inner_map_fd = create_attr->inner_map_fd;
106 fd = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
107 return libbpf_err_errno(fd);
110 int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
111 int key_size, int value_size, int max_entries,
112 __u32 map_flags, int node)
114 struct bpf_create_map_attr map_attr = {};
116 map_attr.name = name;
117 map_attr.map_type = map_type;
118 map_attr.map_flags = map_flags;
119 map_attr.key_size = key_size;
120 map_attr.value_size = value_size;
121 map_attr.max_entries = max_entries;
123 map_attr.numa_node = node;
124 map_attr.map_flags |= BPF_F_NUMA_NODE;
127 return bpf_create_map_xattr(&map_attr);
130 int bpf_create_map(enum bpf_map_type map_type, int key_size,
131 int value_size, int max_entries, __u32 map_flags)
133 struct bpf_create_map_attr map_attr = {};
135 map_attr.map_type = map_type;
136 map_attr.map_flags = map_flags;
137 map_attr.key_size = key_size;
138 map_attr.value_size = value_size;
139 map_attr.max_entries = max_entries;
141 return bpf_create_map_xattr(&map_attr);
144 int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
145 int key_size, int value_size, int max_entries,
148 struct bpf_create_map_attr map_attr = {};
150 map_attr.name = name;
151 map_attr.map_type = map_type;
152 map_attr.map_flags = map_flags;
153 map_attr.key_size = key_size;
154 map_attr.value_size = value_size;
155 map_attr.max_entries = max_entries;
157 return bpf_create_map_xattr(&map_attr);
160 int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
161 int key_size, int inner_map_fd, int max_entries,
162 __u32 map_flags, int node)
167 memset(&attr, '\0', sizeof(attr));
169 attr.map_type = map_type;
170 attr.key_size = key_size;
172 attr.inner_map_fd = inner_map_fd;
173 attr.max_entries = max_entries;
174 attr.map_flags = map_flags;
176 memcpy(attr.map_name, name,
177 min(strlen(name), BPF_OBJ_NAME_LEN - 1));
180 attr.map_flags |= BPF_F_NUMA_NODE;
181 attr.numa_node = node;
184 fd = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
185 return libbpf_err_errno(fd);
188 int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
189 int key_size, int inner_map_fd, int max_entries,
192 return bpf_create_map_in_map_node(map_type, name, key_size,
193 inner_map_fd, max_entries, map_flags,
198 alloc_zero_tailing_info(const void *orecord, __u32 cnt,
199 __u32 actual_rec_size, __u32 expected_rec_size)
201 __u64 info_len = (__u64)actual_rec_size * cnt;
202 void *info, *nrecord;
205 info = malloc(info_len);
209 /* zero out bytes kernel does not understand */
211 for (i = 0; i < cnt; i++) {
212 memcpy(nrecord, orecord, expected_rec_size);
213 memset(nrecord + expected_rec_size, 0,
214 actual_rec_size - expected_rec_size);
215 orecord += actual_rec_size;
216 nrecord += actual_rec_size;
222 int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
224 void *finfo = NULL, *linfo = NULL;
228 if (!load_attr->log_buf != !load_attr->log_buf_sz)
229 return libbpf_err(-EINVAL);
231 if (load_attr->log_level > (4 | 2 | 1) || (load_attr->log_level && !load_attr->log_buf))
232 return libbpf_err(-EINVAL);
234 memset(&attr, 0, sizeof(attr));
235 attr.prog_type = load_attr->prog_type;
236 attr.expected_attach_type = load_attr->expected_attach_type;
238 if (load_attr->attach_prog_fd)
239 attr.attach_prog_fd = load_attr->attach_prog_fd;
241 attr.attach_btf_obj_fd = load_attr->attach_btf_obj_fd;
242 attr.attach_btf_id = load_attr->attach_btf_id;
244 attr.prog_ifindex = load_attr->prog_ifindex;
245 attr.kern_version = load_attr->kern_version;
247 attr.insn_cnt = (__u32)load_attr->insn_cnt;
248 attr.insns = ptr_to_u64(load_attr->insns);
249 attr.license = ptr_to_u64(load_attr->license);
251 attr.log_level = load_attr->log_level;
252 if (attr.log_level) {
253 attr.log_buf = ptr_to_u64(load_attr->log_buf);
254 attr.log_size = load_attr->log_buf_sz;
257 attr.prog_btf_fd = load_attr->prog_btf_fd;
258 attr.prog_flags = load_attr->prog_flags;
260 attr.func_info_rec_size = load_attr->func_info_rec_size;
261 attr.func_info_cnt = load_attr->func_info_cnt;
262 attr.func_info = ptr_to_u64(load_attr->func_info);
264 attr.line_info_rec_size = load_attr->line_info_rec_size;
265 attr.line_info_cnt = load_attr->line_info_cnt;
266 attr.line_info = ptr_to_u64(load_attr->line_info);
269 memcpy(attr.prog_name, load_attr->name,
270 min(strlen(load_attr->name), (size_t)BPF_OBJ_NAME_LEN - 1));
272 fd = sys_bpf_prog_load(&attr, sizeof(attr));
276 /* After bpf_prog_load, the kernel may modify certain attributes
277 * to give user space a hint how to deal with loading failure.
278 * Check to see whether we can make some changes and load again.
280 while (errno == E2BIG && (!finfo || !linfo)) {
281 if (!finfo && attr.func_info_cnt &&
282 attr.func_info_rec_size < load_attr->func_info_rec_size) {
283 /* try with corrected func info records */
284 finfo = alloc_zero_tailing_info(load_attr->func_info,
285 load_attr->func_info_cnt,
286 load_attr->func_info_rec_size,
287 attr.func_info_rec_size);
293 attr.func_info = ptr_to_u64(finfo);
294 attr.func_info_rec_size = load_attr->func_info_rec_size;
295 } else if (!linfo && attr.line_info_cnt &&
296 attr.line_info_rec_size <
297 load_attr->line_info_rec_size) {
298 linfo = alloc_zero_tailing_info(load_attr->line_info,
299 load_attr->line_info_cnt,
300 load_attr->line_info_rec_size,
301 attr.line_info_rec_size);
307 attr.line_info = ptr_to_u64(linfo);
308 attr.line_info_rec_size = load_attr->line_info_rec_size;
313 fd = sys_bpf_prog_load(&attr, sizeof(attr));
318 if (load_attr->log_level || !load_attr->log_buf)
321 /* Try again with log */
322 attr.log_buf = ptr_to_u64(load_attr->log_buf);
323 attr.log_size = load_attr->log_buf_sz;
325 load_attr->log_buf[0] = 0;
327 fd = sys_bpf_prog_load(&attr, sizeof(attr));
329 /* free() doesn't affect errno, so we don't need to restore it */
332 return libbpf_err_errno(fd);
335 int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
336 char *log_buf, size_t log_buf_sz)
338 struct bpf_prog_load_params p = {};
340 if (!load_attr || !log_buf != !log_buf_sz)
341 return libbpf_err(-EINVAL);
343 p.prog_type = load_attr->prog_type;
344 p.expected_attach_type = load_attr->expected_attach_type;
345 switch (p.prog_type) {
346 case BPF_PROG_TYPE_STRUCT_OPS:
347 case BPF_PROG_TYPE_LSM:
348 p.attach_btf_id = load_attr->attach_btf_id;
350 case BPF_PROG_TYPE_TRACING:
351 case BPF_PROG_TYPE_EXT:
352 p.attach_btf_id = load_attr->attach_btf_id;
353 p.attach_prog_fd = load_attr->attach_prog_fd;
356 p.prog_ifindex = load_attr->prog_ifindex;
357 p.kern_version = load_attr->kern_version;
359 p.insn_cnt = load_attr->insns_cnt;
360 p.insns = load_attr->insns;
361 p.license = load_attr->license;
362 p.log_level = load_attr->log_level;
364 p.log_buf_sz = log_buf_sz;
365 p.prog_btf_fd = load_attr->prog_btf_fd;
366 p.func_info_rec_size = load_attr->func_info_rec_size;
367 p.func_info_cnt = load_attr->func_info_cnt;
368 p.func_info = load_attr->func_info;
369 p.line_info_rec_size = load_attr->line_info_rec_size;
370 p.line_info_cnt = load_attr->line_info_cnt;
371 p.line_info = load_attr->line_info;
372 p.name = load_attr->name;
373 p.prog_flags = load_attr->prog_flags;
375 return libbpf__bpf_prog_load(&p);
378 int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
379 size_t insns_cnt, const char *license,
380 __u32 kern_version, char *log_buf,
383 struct bpf_load_program_attr load_attr;
385 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
386 load_attr.prog_type = type;
387 load_attr.expected_attach_type = 0;
388 load_attr.name = NULL;
389 load_attr.insns = insns;
390 load_attr.insns_cnt = insns_cnt;
391 load_attr.license = license;
392 load_attr.kern_version = kern_version;
394 return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz);
397 int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
398 size_t insns_cnt, __u32 prog_flags, const char *license,
399 __u32 kern_version, char *log_buf, size_t log_buf_sz,
405 memset(&attr, 0, sizeof(attr));
406 attr.prog_type = type;
407 attr.insn_cnt = (__u32)insns_cnt;
408 attr.insns = ptr_to_u64(insns);
409 attr.license = ptr_to_u64(license);
410 attr.log_buf = ptr_to_u64(log_buf);
411 attr.log_size = log_buf_sz;
412 attr.log_level = log_level;
414 attr.kern_version = kern_version;
415 attr.prog_flags = prog_flags;
417 fd = sys_bpf_prog_load(&attr, sizeof(attr));
418 return libbpf_err_errno(fd);
421 int bpf_map_update_elem(int fd, const void *key, const void *value,
427 memset(&attr, 0, sizeof(attr));
429 attr.key = ptr_to_u64(key);
430 attr.value = ptr_to_u64(value);
433 ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
434 return libbpf_err_errno(ret);
437 int bpf_map_lookup_elem(int fd, const void *key, void *value)
442 memset(&attr, 0, sizeof(attr));
444 attr.key = ptr_to_u64(key);
445 attr.value = ptr_to_u64(value);
447 ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
448 return libbpf_err_errno(ret);
451 int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags)
456 memset(&attr, 0, sizeof(attr));
458 attr.key = ptr_to_u64(key);
459 attr.value = ptr_to_u64(value);
462 ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
463 return libbpf_err_errno(ret);
466 int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
471 memset(&attr, 0, sizeof(attr));
473 attr.key = ptr_to_u64(key);
474 attr.value = ptr_to_u64(value);
476 ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr));
477 return libbpf_err_errno(ret);
480 int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags)
485 memset(&attr, 0, sizeof(attr));
487 attr.key = ptr_to_u64(key);
488 attr.value = ptr_to_u64(value);
491 ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr));
492 return libbpf_err_errno(ret);
495 int bpf_map_delete_elem(int fd, const void *key)
500 memset(&attr, 0, sizeof(attr));
502 attr.key = ptr_to_u64(key);
504 ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
505 return libbpf_err_errno(ret);
508 int bpf_map_get_next_key(int fd, const void *key, void *next_key)
513 memset(&attr, 0, sizeof(attr));
515 attr.key = ptr_to_u64(key);
516 attr.next_key = ptr_to_u64(next_key);
518 ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
519 return libbpf_err_errno(ret);
522 int bpf_map_freeze(int fd)
527 memset(&attr, 0, sizeof(attr));
530 ret = sys_bpf(BPF_MAP_FREEZE, &attr, sizeof(attr));
531 return libbpf_err_errno(ret);
534 static int bpf_map_batch_common(int cmd, int fd, void *in_batch,
535 void *out_batch, void *keys, void *values,
537 const struct bpf_map_batch_opts *opts)
542 if (!OPTS_VALID(opts, bpf_map_batch_opts))
543 return libbpf_err(-EINVAL);
545 memset(&attr, 0, sizeof(attr));
546 attr.batch.map_fd = fd;
547 attr.batch.in_batch = ptr_to_u64(in_batch);
548 attr.batch.out_batch = ptr_to_u64(out_batch);
549 attr.batch.keys = ptr_to_u64(keys);
550 attr.batch.values = ptr_to_u64(values);
551 attr.batch.count = *count;
552 attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0);
553 attr.batch.flags = OPTS_GET(opts, flags, 0);
555 ret = sys_bpf(cmd, &attr, sizeof(attr));
556 *count = attr.batch.count;
558 return libbpf_err_errno(ret);
561 int bpf_map_delete_batch(int fd, void *keys, __u32 *count,
562 const struct bpf_map_batch_opts *opts)
564 return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL,
565 NULL, keys, NULL, count, opts);
568 int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys,
569 void *values, __u32 *count,
570 const struct bpf_map_batch_opts *opts)
572 return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH, fd, in_batch,
573 out_batch, keys, values, count, opts);
576 int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch,
577 void *keys, void *values, __u32 *count,
578 const struct bpf_map_batch_opts *opts)
580 return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH,
581 fd, in_batch, out_batch, keys, values,
585 int bpf_map_update_batch(int fd, void *keys, void *values, __u32 *count,
586 const struct bpf_map_batch_opts *opts)
588 return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL,
589 keys, values, count, opts);
592 int bpf_obj_pin(int fd, const char *pathname)
597 memset(&attr, 0, sizeof(attr));
598 attr.pathname = ptr_to_u64((void *)pathname);
601 ret = sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
602 return libbpf_err_errno(ret);
605 int bpf_obj_get(const char *pathname)
610 memset(&attr, 0, sizeof(attr));
611 attr.pathname = ptr_to_u64((void *)pathname);
613 fd = sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
614 return libbpf_err_errno(fd);
617 int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
620 DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opts,
624 return bpf_prog_attach_xattr(prog_fd, target_fd, type, &opts);
627 int bpf_prog_attach_xattr(int prog_fd, int target_fd,
628 enum bpf_attach_type type,
629 const struct bpf_prog_attach_opts *opts)
634 if (!OPTS_VALID(opts, bpf_prog_attach_opts))
635 return libbpf_err(-EINVAL);
637 memset(&attr, 0, sizeof(attr));
638 attr.target_fd = target_fd;
639 attr.attach_bpf_fd = prog_fd;
640 attr.attach_type = type;
641 attr.attach_flags = OPTS_GET(opts, flags, 0);
642 attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0);
644 ret = sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
645 return libbpf_err_errno(ret);
648 int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
653 memset(&attr, 0, sizeof(attr));
654 attr.target_fd = target_fd;
655 attr.attach_type = type;
657 ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
658 return libbpf_err_errno(ret);
661 int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
666 memset(&attr, 0, sizeof(attr));
667 attr.target_fd = target_fd;
668 attr.attach_bpf_fd = prog_fd;
669 attr.attach_type = type;
671 ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
672 return libbpf_err_errno(ret);
675 int bpf_link_create(int prog_fd, int target_fd,
676 enum bpf_attach_type attach_type,
677 const struct bpf_link_create_opts *opts)
679 __u32 target_btf_id, iter_info_len;
683 if (!OPTS_VALID(opts, bpf_link_create_opts))
684 return libbpf_err(-EINVAL);
686 iter_info_len = OPTS_GET(opts, iter_info_len, 0);
687 target_btf_id = OPTS_GET(opts, target_btf_id, 0);
689 /* validate we don't have unexpected combinations of non-zero fields */
690 if (iter_info_len || target_btf_id) {
691 if (iter_info_len && target_btf_id)
692 return libbpf_err(-EINVAL);
693 if (!OPTS_ZEROED(opts, target_btf_id))
694 return libbpf_err(-EINVAL);
697 memset(&attr, 0, sizeof(attr));
698 attr.link_create.prog_fd = prog_fd;
699 attr.link_create.target_fd = target_fd;
700 attr.link_create.attach_type = attach_type;
701 attr.link_create.flags = OPTS_GET(opts, flags, 0);
704 attr.link_create.target_btf_id = target_btf_id;
708 switch (attach_type) {
710 attr.link_create.iter_info = ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0));
711 attr.link_create.iter_info_len = iter_info_len;
714 attr.link_create.perf_event.bpf_cookie = OPTS_GET(opts, perf_event.bpf_cookie, 0);
715 if (!OPTS_ZEROED(opts, perf_event))
716 return libbpf_err(-EINVAL);
719 if (!OPTS_ZEROED(opts, flags))
720 return libbpf_err(-EINVAL);
724 fd = sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr));
725 return libbpf_err_errno(fd);
728 int bpf_link_detach(int link_fd)
733 memset(&attr, 0, sizeof(attr));
734 attr.link_detach.link_fd = link_fd;
736 ret = sys_bpf(BPF_LINK_DETACH, &attr, sizeof(attr));
737 return libbpf_err_errno(ret);
740 int bpf_link_update(int link_fd, int new_prog_fd,
741 const struct bpf_link_update_opts *opts)
746 if (!OPTS_VALID(opts, bpf_link_update_opts))
747 return libbpf_err(-EINVAL);
749 memset(&attr, 0, sizeof(attr));
750 attr.link_update.link_fd = link_fd;
751 attr.link_update.new_prog_fd = new_prog_fd;
752 attr.link_update.flags = OPTS_GET(opts, flags, 0);
753 attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0);
755 ret = sys_bpf(BPF_LINK_UPDATE, &attr, sizeof(attr));
756 return libbpf_err_errno(ret);
759 int bpf_iter_create(int link_fd)
764 memset(&attr, 0, sizeof(attr));
765 attr.iter_create.link_fd = link_fd;
767 fd = sys_bpf(BPF_ITER_CREATE, &attr, sizeof(attr));
768 return libbpf_err_errno(fd);
771 int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
772 __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
777 memset(&attr, 0, sizeof(attr));
778 attr.query.target_fd = target_fd;
779 attr.query.attach_type = type;
780 attr.query.query_flags = query_flags;
781 attr.query.prog_cnt = *prog_cnt;
782 attr.query.prog_ids = ptr_to_u64(prog_ids);
784 ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr));
787 *attach_flags = attr.query.attach_flags;
788 *prog_cnt = attr.query.prog_cnt;
790 return libbpf_err_errno(ret);
793 int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
794 void *data_out, __u32 *size_out, __u32 *retval,
800 memset(&attr, 0, sizeof(attr));
801 attr.test.prog_fd = prog_fd;
802 attr.test.data_in = ptr_to_u64(data);
803 attr.test.data_out = ptr_to_u64(data_out);
804 attr.test.data_size_in = size;
805 attr.test.repeat = repeat;
807 ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
810 *size_out = attr.test.data_size_out;
812 *retval = attr.test.retval;
814 *duration = attr.test.duration;
816 return libbpf_err_errno(ret);
819 int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
824 if (!test_attr->data_out && test_attr->data_size_out > 0)
825 return libbpf_err(-EINVAL);
827 memset(&attr, 0, sizeof(attr));
828 attr.test.prog_fd = test_attr->prog_fd;
829 attr.test.data_in = ptr_to_u64(test_attr->data_in);
830 attr.test.data_out = ptr_to_u64(test_attr->data_out);
831 attr.test.data_size_in = test_attr->data_size_in;
832 attr.test.data_size_out = test_attr->data_size_out;
833 attr.test.ctx_in = ptr_to_u64(test_attr->ctx_in);
834 attr.test.ctx_out = ptr_to_u64(test_attr->ctx_out);
835 attr.test.ctx_size_in = test_attr->ctx_size_in;
836 attr.test.ctx_size_out = test_attr->ctx_size_out;
837 attr.test.repeat = test_attr->repeat;
839 ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
841 test_attr->data_size_out = attr.test.data_size_out;
842 test_attr->ctx_size_out = attr.test.ctx_size_out;
843 test_attr->retval = attr.test.retval;
844 test_attr->duration = attr.test.duration;
846 return libbpf_err_errno(ret);
849 int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts)
854 if (!OPTS_VALID(opts, bpf_test_run_opts))
855 return libbpf_err(-EINVAL);
857 memset(&attr, 0, sizeof(attr));
858 attr.test.prog_fd = prog_fd;
859 attr.test.cpu = OPTS_GET(opts, cpu, 0);
860 attr.test.flags = OPTS_GET(opts, flags, 0);
861 attr.test.repeat = OPTS_GET(opts, repeat, 0);
862 attr.test.duration = OPTS_GET(opts, duration, 0);
863 attr.test.ctx_size_in = OPTS_GET(opts, ctx_size_in, 0);
864 attr.test.ctx_size_out = OPTS_GET(opts, ctx_size_out, 0);
865 attr.test.data_size_in = OPTS_GET(opts, data_size_in, 0);
866 attr.test.data_size_out = OPTS_GET(opts, data_size_out, 0);
867 attr.test.ctx_in = ptr_to_u64(OPTS_GET(opts, ctx_in, NULL));
868 attr.test.ctx_out = ptr_to_u64(OPTS_GET(opts, ctx_out, NULL));
869 attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL));
870 attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL));
872 ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
874 OPTS_SET(opts, data_size_out, attr.test.data_size_out);
875 OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out);
876 OPTS_SET(opts, duration, attr.test.duration);
877 OPTS_SET(opts, retval, attr.test.retval);
879 return libbpf_err_errno(ret);
882 static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd)
887 memset(&attr, 0, sizeof(attr));
888 attr.start_id = start_id;
890 err = sys_bpf(cmd, &attr, sizeof(attr));
892 *next_id = attr.next_id;
894 return libbpf_err_errno(err);
897 int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
899 return bpf_obj_get_next_id(start_id, next_id, BPF_PROG_GET_NEXT_ID);
902 int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
904 return bpf_obj_get_next_id(start_id, next_id, BPF_MAP_GET_NEXT_ID);
907 int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id)
909 return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID);
912 int bpf_link_get_next_id(__u32 start_id, __u32 *next_id)
914 return bpf_obj_get_next_id(start_id, next_id, BPF_LINK_GET_NEXT_ID);
917 int bpf_prog_get_fd_by_id(__u32 id)
922 memset(&attr, 0, sizeof(attr));
925 fd = sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
926 return libbpf_err_errno(fd);
929 int bpf_map_get_fd_by_id(__u32 id)
934 memset(&attr, 0, sizeof(attr));
937 fd = sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
938 return libbpf_err_errno(fd);
941 int bpf_btf_get_fd_by_id(__u32 id)
946 memset(&attr, 0, sizeof(attr));
949 fd = sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
950 return libbpf_err_errno(fd);
953 int bpf_link_get_fd_by_id(__u32 id)
958 memset(&attr, 0, sizeof(attr));
961 fd = sys_bpf(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr));
962 return libbpf_err_errno(fd);
965 int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len)
970 memset(&attr, 0, sizeof(attr));
971 attr.info.bpf_fd = bpf_fd;
972 attr.info.info_len = *info_len;
973 attr.info.info = ptr_to_u64(info);
975 err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
978 *info_len = attr.info.info_len;
980 return libbpf_err_errno(err);
983 int bpf_raw_tracepoint_open(const char *name, int prog_fd)
988 memset(&attr, 0, sizeof(attr));
989 attr.raw_tracepoint.name = ptr_to_u64(name);
990 attr.raw_tracepoint.prog_fd = prog_fd;
992 fd = sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
993 return libbpf_err_errno(fd);
996 int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size,
999 union bpf_attr attr = {};
1002 attr.btf = ptr_to_u64(btf);
1003 attr.btf_size = btf_size;
1006 if (do_log && log_buf && log_buf_size) {
1007 attr.btf_log_level = 1;
1008 attr.btf_log_size = log_buf_size;
1009 attr.btf_log_buf = ptr_to_u64(log_buf);
1012 fd = sys_bpf(BPF_BTF_LOAD, &attr, sizeof(attr));
1014 if (fd < 0 && !do_log && log_buf && log_buf_size) {
1019 return libbpf_err_errno(fd);
1022 int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
1023 __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset,
1026 union bpf_attr attr = {};
1029 attr.task_fd_query.pid = pid;
1030 attr.task_fd_query.fd = fd;
1031 attr.task_fd_query.flags = flags;
1032 attr.task_fd_query.buf = ptr_to_u64(buf);
1033 attr.task_fd_query.buf_len = *buf_len;
1035 err = sys_bpf(BPF_TASK_FD_QUERY, &attr, sizeof(attr));
1037 *buf_len = attr.task_fd_query.buf_len;
1038 *prog_id = attr.task_fd_query.prog_id;
1039 *fd_type = attr.task_fd_query.fd_type;
1040 *probe_offset = attr.task_fd_query.probe_offset;
1041 *probe_addr = attr.task_fd_query.probe_addr;
1043 return libbpf_err_errno(err);
1046 int bpf_enable_stats(enum bpf_stats_type type)
1048 union bpf_attr attr;
1051 memset(&attr, 0, sizeof(attr));
1052 attr.enable_stats.type = type;
1054 fd = sys_bpf(BPF_ENABLE_STATS, &attr, sizeof(attr));
1055 return libbpf_err_errno(fd);
1058 int bpf_prog_bind_map(int prog_fd, int map_fd,
1059 const struct bpf_prog_bind_opts *opts)
1061 union bpf_attr attr;
1064 if (!OPTS_VALID(opts, bpf_prog_bind_opts))
1065 return libbpf_err(-EINVAL);
1067 memset(&attr, 0, sizeof(attr));
1068 attr.prog_bind_map.prog_fd = prog_fd;
1069 attr.prog_bind_map.map_fd = map_fd;
1070 attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0);
1072 ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, sizeof(attr));
1073 return libbpf_err_errno(ret);