selftests: bpf: break up test_progs - stackmap
[platform/kernel/linux-starfive.git] / tools / testing / selftests / bpf / test_progs.c
index 25f0083..a342fbe 100644 (file)
@@ -4,92 +4,30 @@
  * modify it under the terms of version 2 of the GNU General Public
  * License as published by the Free Software Foundation.
  */
-#include <stdio.h>
-#include <unistd.h>
-#include <errno.h>
-#include <string.h>
-#include <assert.h>
-#include <stdlib.h>
-#include <time.h>
-
-#include <linux/types.h>
-typedef __u16 __sum16;
-#include <arpa/inet.h>
-#include <linux/if_ether.h>
-#include <linux/if_packet.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/tcp.h>
-#include <linux/filter.h>
-#include <linux/perf_event.h>
-#include <linux/unistd.h>
-
-#include <sys/ioctl.h>
-#include <sys/wait.h>
-#include <sys/types.h>
-#include <fcntl.h>
-
-#include <linux/bpf.h>
-#include <linux/err.h>
-#include <bpf/bpf.h>
-#include <bpf/libbpf.h>
-
-#include "test_iptunnel_common.h"
-#include "bpf_util.h"
-#include "bpf_endian.h"
+#include "test_progs.h"
 #include "bpf_rlimit.h"
-#include "trace_helpers.h"
 
-static int error_cnt, pass_cnt;
-static bool jit_enabled;
+int error_cnt, pass_cnt;
+bool jit_enabled;
 
-#define MAGIC_BYTES 123
-
-/* ipv4 test vector */
-static struct {
-       struct ethhdr eth;
-       struct iphdr iph;
-       struct tcphdr tcp;
-} __packed pkt_v4 = {
+struct ipv4_packet pkt_v4 = {
        .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
        .iph.ihl = 5,
-       .iph.protocol = 6,
+       .iph.protocol = IPPROTO_TCP,
        .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
        .tcp.urg_ptr = 123,
+       .tcp.doff = 5,
 };
 
-/* ipv6 test vector */
-static struct {
-       struct ethhdr eth;
-       struct ipv6hdr iph;
-       struct tcphdr tcp;
-} __packed pkt_v6 = {
+struct ipv6_packet pkt_v6 = {
        .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
-       .iph.nexthdr = 6,
+       .iph.nexthdr = IPPROTO_TCP,
        .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
        .tcp.urg_ptr = 123,
+       .tcp.doff = 5,
 };
 
-#define _CHECK(condition, tag, duration, format...) ({                 \
-       int __ret = !!(condition);                                      \
-       if (__ret) {                                                    \
-               error_cnt++;                                            \
-               printf("%s:FAIL:%s ", __func__, tag);                   \
-               printf(format);                                         \
-       } else {                                                        \
-               pass_cnt++;                                             \
-               printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
-       }                                                               \
-       __ret;                                                          \
-})
-
-#define CHECK(condition, tag, format...) \
-       _CHECK(condition, tag, duration, format)
-#define CHECK_ATTR(condition, tag, format...) \
-       _CHECK(condition, tag, tattr.duration, format)
-
-static int bpf_find_map(const char *test, struct bpf_object *obj,
-                       const char *name)
+int bpf_find_map(const char *test, struct bpf_object *obj, const char *name)
 {
        struct bpf_map *map;
 
@@ -102,33 +40,6 @@ static int bpf_find_map(const char *test, struct bpf_object *obj,
        return bpf_map__fd(map);
 }
 
-static void test_pkt_access(void)
-{
-       const char *file = "./test_pkt_access.o";
-       struct bpf_object *obj;
-       __u32 duration, retval;
-       int err, prog_fd;
-
-       err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
-       if (err) {
-               error_cnt++;
-               return;
-       }
-
-       err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
-                               NULL, NULL, &retval, &duration);
-       CHECK(err || retval, "ipv4",
-             "err %d errno %d retval %d duration %d\n",
-             err, errno, retval, duration);
-
-       err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
-                               NULL, NULL, &retval, &duration);
-       CHECK(err || retval, "ipv6",
-             "err %d errno %d retval %d duration %d\n",
-             err, errno, retval, duration);
-       bpf_object__close(obj);
-}
-
 static void test_prog_run_xattr(void)
 {
        const char *file = "./test_pkt_access.o";
@@ -176,85 +87,6 @@ static void test_prog_run_xattr(void)
        bpf_object__close(obj);
 }
 
-static void test_xdp(void)
-{
-       struct vip key4 = {.protocol = 6, .family = AF_INET};
-       struct vip key6 = {.protocol = 6, .family = AF_INET6};
-       struct iptnl_info value4 = {.family = AF_INET};
-       struct iptnl_info value6 = {.family = AF_INET6};
-       const char *file = "./test_xdp.o";
-       struct bpf_object *obj;
-       char buf[128];
-       struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
-       struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
-       __u32 duration, retval, size;
-       int err, prog_fd, map_fd;
-
-       err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
-       if (err) {
-               error_cnt++;
-               return;
-       }
-
-       map_fd = bpf_find_map(__func__, obj, "vip2tnl");
-       if (map_fd < 0)
-               goto out;
-       bpf_map_update_elem(map_fd, &key4, &value4, 0);
-       bpf_map_update_elem(map_fd, &key6, &value6, 0);
-
-       err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
-                               buf, &size, &retval, &duration);
-
-       CHECK(err || retval != XDP_TX || size != 74 ||
-             iph->protocol != IPPROTO_IPIP, "ipv4",
-             "err %d errno %d retval %d size %d\n",
-             err, errno, retval, size);
-
-       err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
-                               buf, &size, &retval, &duration);
-       CHECK(err || retval != XDP_TX || size != 114 ||
-             iph6->nexthdr != IPPROTO_IPV6, "ipv6",
-             "err %d errno %d retval %d size %d\n",
-             err, errno, retval, size);
-out:
-       bpf_object__close(obj);
-}
-
-static void test_xdp_adjust_tail(void)
-{
-       const char *file = "./test_adjust_tail.o";
-       struct bpf_object *obj;
-       char buf[128];
-       __u32 duration, retval, size;
-       int err, prog_fd;
-
-       err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
-       if (err) {
-               error_cnt++;
-               return;
-       }
-
-       err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
-                               buf, &size, &retval, &duration);
-
-       CHECK(err || retval != XDP_DROP,
-             "ipv4", "err %d errno %d retval %d size %d\n",
-             err, errno, retval, size);
-
-       err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
-                               buf, &size, &retval, &duration);
-       CHECK(err || retval != XDP_TX || size != 54,
-             "ipv6", "err %d errno %d retval %d size %d\n",
-             err, errno, retval, size);
-       bpf_object__close(obj);
-}
-
-
-
-#define MAGIC_VAL 0x1234
-#define NUM_ITER 100000
-#define VIP_NUM 5
-
 static void test_l4lb(const char *file)
 {
        unsigned int nr_cpus = bpf_num_possible_cpus();
@@ -343,86 +175,6 @@ static void test_l4lb_all(void)
        test_l4lb(file2);
 }
 
-static void test_xdp_noinline(void)
-{
-       const char *file = "./test_xdp_noinline.o";
-       unsigned int nr_cpus = bpf_num_possible_cpus();
-       struct vip key = {.protocol = 6};
-       struct vip_meta {
-               __u32 flags;
-               __u32 vip_num;
-       } value = {.vip_num = VIP_NUM};
-       __u32 stats_key = VIP_NUM;
-       struct vip_stats {
-               __u64 bytes;
-               __u64 pkts;
-       } stats[nr_cpus];
-       struct real_definition {
-               union {
-                       __be32 dst;
-                       __be32 dstv6[4];
-               };
-               __u8 flags;
-       } real_def = {.dst = MAGIC_VAL};
-       __u32 ch_key = 11, real_num = 3;
-       __u32 duration, retval, size;
-       int err, i, prog_fd, map_fd;
-       __u64 bytes = 0, pkts = 0;
-       struct bpf_object *obj;
-       char buf[128];
-       u32 *magic = (u32 *)buf;
-
-       err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
-       if (err) {
-               error_cnt++;
-               return;
-       }
-
-       map_fd = bpf_find_map(__func__, obj, "vip_map");
-       if (map_fd < 0)
-               goto out;
-       bpf_map_update_elem(map_fd, &key, &value, 0);
-
-       map_fd = bpf_find_map(__func__, obj, "ch_rings");
-       if (map_fd < 0)
-               goto out;
-       bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
-
-       map_fd = bpf_find_map(__func__, obj, "reals");
-       if (map_fd < 0)
-               goto out;
-       bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
-
-       err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
-                               buf, &size, &retval, &duration);
-       CHECK(err || retval != 1 || size != 54 ||
-             *magic != MAGIC_VAL, "ipv4",
-             "err %d errno %d retval %d size %d magic %x\n",
-             err, errno, retval, size, *magic);
-
-       err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
-                               buf, &size, &retval, &duration);
-       CHECK(err || retval != 1 || size != 74 ||
-             *magic != MAGIC_VAL, "ipv6",
-             "err %d errno %d retval %d size %d magic %x\n",
-             err, errno, retval, size, *magic);
-
-       map_fd = bpf_find_map(__func__, obj, "stats");
-       if (map_fd < 0)
-               goto out;
-       bpf_map_lookup_elem(map_fd, &stats_key, stats);
-       for (i = 0; i < nr_cpus; i++) {
-               bytes += stats[i].bytes;
-               pkts += stats[i].pkts;
-       }
-       if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
-               error_cnt++;
-               printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
-       }
-out:
-       bpf_object__close(obj);
-}
-
 static void test_tcp_estats(void)
 {
        const char *file = "./test_tcp_estats.o";
@@ -710,28 +462,6 @@ done:
                bpf_object__close(objs[i]);
 }
 
-static void test_pkt_md_access(void)
-{
-       const char *file = "./test_pkt_md_access.o";
-       struct bpf_object *obj;
-       __u32 duration, retval;
-       int err, prog_fd;
-
-       err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
-       if (err) {
-               error_cnt++;
-               return;
-       }
-
-       err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
-                               NULL, NULL, &retval, &duration);
-       CHECK(err || retval, "",
-             "err %d errno %d retval %d duration %d\n",
-             err, errno, retval, duration);
-
-       bpf_object__close(obj);
-}
-
 static void test_obj_name(void)
 {
        struct {
@@ -931,7 +661,7 @@ static void test_tp_attach_query(void)
        free(query);
 }
 
-static int compare_map_keys(int map1_fd, int map2_fd)
+int compare_map_keys(int map1_fd, int map2_fd)
 {
        __u32 key, next_key;
        char val_buf[PERF_MAX_STACK_DEPTH *
@@ -958,7 +688,7 @@ static int compare_map_keys(int map1_fd, int map2_fd)
        return 0;
 }
 
-static int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
+int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
 {
        __u32 key, next_key, *cur_key_p, *next_key_p;
        char *val_buf1, *val_buf2;
@@ -994,165 +724,7 @@ out:
        return err;
 }
 
-static void test_stacktrace_map()
-{
-       int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
-       const char *file = "./test_stacktrace_map.o";
-       int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
-       struct perf_event_attr attr = {};
-       __u32 key, val, duration = 0;
-       struct bpf_object *obj;
-       char buf[256];
-
-       err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
-       if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
-               return;
-
-       /* Get the ID for the sched/sched_switch tracepoint */
-       snprintf(buf, sizeof(buf),
-                "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
-       efd = open(buf, O_RDONLY, 0);
-       if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
-               goto close_prog;
-
-       bytes = read(efd, buf, sizeof(buf));
-       close(efd);
-       if (bytes <= 0 || bytes >= sizeof(buf))
-               goto close_prog;
-
-       /* Open the perf event and attach bpf progrram */
-       attr.config = strtol(buf, NULL, 0);
-       attr.type = PERF_TYPE_TRACEPOINT;
-       attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
-       attr.sample_period = 1;
-       attr.wakeup_events = 1;
-       pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
-                        0 /* cpu 0 */, -1 /* group id */,
-                        0 /* flags */);
-       if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
-                 pmu_fd, errno))
-               goto close_prog;
-
-       err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
-       if (err)
-               goto disable_pmu;
-
-       err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
-       if (err)
-               goto disable_pmu;
-
-       /* find map fds */
-       control_map_fd = bpf_find_map(__func__, obj, "control_map");
-       if (control_map_fd < 0)
-               goto disable_pmu;
-
-       stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
-       if (stackid_hmap_fd < 0)
-               goto disable_pmu;
-
-       stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
-       if (stackmap_fd < 0)
-               goto disable_pmu;
-
-       stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
-       if (stack_amap_fd < 0)
-               goto disable_pmu;
-
-       /* give some time for bpf program run */
-       sleep(1);
-
-       /* disable stack trace collection */
-       key = 0;
-       val = 1;
-       bpf_map_update_elem(control_map_fd, &key, &val, 0);
-
-       /* for every element in stackid_hmap, we can find a corresponding one
-        * in stackmap, and vise versa.
-        */
-       err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
-       if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
-                 "err %d errno %d\n", err, errno))
-               goto disable_pmu_noerr;
-
-       err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
-       if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
-                 "err %d errno %d\n", err, errno))
-               goto disable_pmu_noerr;
-
-       stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
-       err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
-       if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
-                 "err %d errno %d\n", err, errno))
-               goto disable_pmu_noerr;
-
-       goto disable_pmu_noerr;
-disable_pmu:
-       error_cnt++;
-disable_pmu_noerr:
-       ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
-       close(pmu_fd);
-close_prog:
-       bpf_object__close(obj);
-}
-
-static void test_stacktrace_map_raw_tp()
-{
-       int control_map_fd, stackid_hmap_fd, stackmap_fd;
-       const char *file = "./test_stacktrace_map.o";
-       int efd, err, prog_fd;
-       __u32 key, val, duration = 0;
-       struct bpf_object *obj;
-
-       err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
-       if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
-               return;
-
-       efd = bpf_raw_tracepoint_open("sched_switch", prog_fd);
-       if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
-               goto close_prog;
-
-       /* find map fds */
-       control_map_fd = bpf_find_map(__func__, obj, "control_map");
-       if (control_map_fd < 0)
-               goto close_prog;
-
-       stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
-       if (stackid_hmap_fd < 0)
-               goto close_prog;
-
-       stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
-       if (stackmap_fd < 0)
-               goto close_prog;
-
-       /* give some time for bpf program run */
-       sleep(1);
-
-       /* disable stack trace collection */
-       key = 0;
-       val = 1;
-       bpf_map_update_elem(control_map_fd, &key, &val, 0);
-
-       /* for every element in stackid_hmap, we can find a corresponding one
-        * in stackmap, and vise versa.
-        */
-       err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
-       if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
-                 "err %d errno %d\n", err, errno))
-               goto close_prog;
-
-       err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
-       if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
-                 "err %d errno %d\n", err, errno))
-               goto close_prog;
-
-       goto close_prog_noerr;
-close_prog:
-       error_cnt++;
-close_prog_noerr:
-       bpf_object__close(obj);
-}
-
-static int extract_build_id(char *build_id, size_t size)
+int extract_build_id(char *build_id, size_t size)
 {
        FILE *fp;
        char *line = NULL;
@@ -1176,317 +748,6 @@ err:
        return -1;
 }
 
-static void test_stacktrace_build_id(void)
-{
-       int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
-       const char *file = "./test_stacktrace_build_id.o";
-       int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
-       struct perf_event_attr attr = {};
-       __u32 key, previous_key, val, duration = 0;
-       struct bpf_object *obj;
-       char buf[256];
-       int i, j;
-       struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
-       int build_id_matches = 0;
-       int retry = 1;
-
-retry:
-       err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
-       if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
-               goto out;
-
-       /* Get the ID for the sched/sched_switch tracepoint */
-       snprintf(buf, sizeof(buf),
-                "/sys/kernel/debug/tracing/events/random/urandom_read/id");
-       efd = open(buf, O_RDONLY, 0);
-       if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
-               goto close_prog;
-
-       bytes = read(efd, buf, sizeof(buf));
-       close(efd);
-       if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
-                 "read", "bytes %d errno %d\n", bytes, errno))
-               goto close_prog;
-
-       /* Open the perf event and attach bpf progrram */
-       attr.config = strtol(buf, NULL, 0);
-       attr.type = PERF_TYPE_TRACEPOINT;
-       attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
-       attr.sample_period = 1;
-       attr.wakeup_events = 1;
-       pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
-                        0 /* cpu 0 */, -1 /* group id */,
-                        0 /* flags */);
-       if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
-                 pmu_fd, errno))
-               goto close_prog;
-
-       err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
-       if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
-                 err, errno))
-               goto close_pmu;
-
-       err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
-       if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
-                 err, errno))
-               goto disable_pmu;
-
-       /* find map fds */
-       control_map_fd = bpf_find_map(__func__, obj, "control_map");
-       if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
-                 "err %d errno %d\n", err, errno))
-               goto disable_pmu;
-
-       stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
-       if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
-                 "err %d errno %d\n", err, errno))
-               goto disable_pmu;
-
-       stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
-       if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
-                 err, errno))
-               goto disable_pmu;
-
-       stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
-       if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
-                 "err %d errno %d\n", err, errno))
-               goto disable_pmu;
-
-       assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
-              == 0);
-       assert(system("./urandom_read") == 0);
-       /* disable stack trace collection */
-       key = 0;
-       val = 1;
-       bpf_map_update_elem(control_map_fd, &key, &val, 0);
-
-       /* for every element in stackid_hmap, we can find a corresponding one
-        * in stackmap, and vise versa.
-        */
-       err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
-       if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
-                 "err %d errno %d\n", err, errno))
-               goto disable_pmu;
-
-       err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
-       if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
-                 "err %d errno %d\n", err, errno))
-               goto disable_pmu;
-
-       err = extract_build_id(buf, 256);
-
-       if (CHECK(err, "get build_id with readelf",
-                 "err %d errno %d\n", err, errno))
-               goto disable_pmu;
-
-       err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
-       if (CHECK(err, "get_next_key from stackmap",
-                 "err %d, errno %d\n", err, errno))
-               goto disable_pmu;
-
-       do {
-               char build_id[64];
-
-               err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
-               if (CHECK(err, "lookup_elem from stackmap",
-                         "err %d, errno %d\n", err, errno))
-                       goto disable_pmu;
-               for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
-                       if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
-                           id_offs[i].offset != 0) {
-                               for (j = 0; j < 20; ++j)
-                                       sprintf(build_id + 2 * j, "%02x",
-                                               id_offs[i].build_id[j] & 0xff);
-                               if (strstr(buf, build_id) != NULL)
-                                       build_id_matches = 1;
-                       }
-               previous_key = key;
-       } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
-
-       /* stack_map_get_build_id_offset() is racy and sometimes can return
-        * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
-        * try it one more time.
-        */
-       if (build_id_matches < 1 && retry--) {
-               ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
-               close(pmu_fd);
-               bpf_object__close(obj);
-               printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
-                      __func__);
-               goto retry;
-       }
-
-       if (CHECK(build_id_matches < 1, "build id match",
-                 "Didn't find expected build ID from the map\n"))
-               goto disable_pmu;
-
-       stack_trace_len = PERF_MAX_STACK_DEPTH
-               * sizeof(struct bpf_stack_build_id);
-       err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
-       CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
-             "err %d errno %d\n", err, errno);
-
-disable_pmu:
-       ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
-
-close_pmu:
-       close(pmu_fd);
-
-close_prog:
-       bpf_object__close(obj);
-
-out:
-       return;
-}
-
-static void test_stacktrace_build_id_nmi(void)
-{
-       int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
-       const char *file = "./test_stacktrace_build_id.o";
-       int err, pmu_fd, prog_fd;
-       struct perf_event_attr attr = {
-               .sample_freq = 5000,
-               .freq = 1,
-               .type = PERF_TYPE_HARDWARE,
-               .config = PERF_COUNT_HW_CPU_CYCLES,
-       };
-       __u32 key, previous_key, val, duration = 0;
-       struct bpf_object *obj;
-       char buf[256];
-       int i, j;
-       struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
-       int build_id_matches = 0;
-       int retry = 1;
-
-retry:
-       err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
-       if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
-               return;
-
-       pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
-                        0 /* cpu 0 */, -1 /* group id */,
-                        0 /* flags */);
-       if (CHECK(pmu_fd < 0, "perf_event_open",
-                 "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n",
-                 pmu_fd, errno))
-               goto close_prog;
-
-       err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
-       if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
-                 err, errno))
-               goto close_pmu;
-
-       err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
-       if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
-                 err, errno))
-               goto disable_pmu;
-
-       /* find map fds */
-       control_map_fd = bpf_find_map(__func__, obj, "control_map");
-       if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
-                 "err %d errno %d\n", err, errno))
-               goto disable_pmu;
-
-       stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
-       if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
-                 "err %d errno %d\n", err, errno))
-               goto disable_pmu;
-
-       stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
-       if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
-                 err, errno))
-               goto disable_pmu;
-
-       stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
-       if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
-                 "err %d errno %d\n", err, errno))
-               goto disable_pmu;
-
-       assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
-              == 0);
-       assert(system("taskset 0x1 ./urandom_read 100000") == 0);
-       /* disable stack trace collection */
-       key = 0;
-       val = 1;
-       bpf_map_update_elem(control_map_fd, &key, &val, 0);
-
-       /* for every element in stackid_hmap, we can find a corresponding one
-        * in stackmap, and vise versa.
-        */
-       err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
-       if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
-                 "err %d errno %d\n", err, errno))
-               goto disable_pmu;
-
-       err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
-       if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
-                 "err %d errno %d\n", err, errno))
-               goto disable_pmu;
-
-       err = extract_build_id(buf, 256);
-
-       if (CHECK(err, "get build_id with readelf",
-                 "err %d errno %d\n", err, errno))
-               goto disable_pmu;
-
-       err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
-       if (CHECK(err, "get_next_key from stackmap",
-                 "err %d, errno %d\n", err, errno))
-               goto disable_pmu;
-
-       do {
-               char build_id[64];
-
-               err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
-               if (CHECK(err, "lookup_elem from stackmap",
-                         "err %d, errno %d\n", err, errno))
-                       goto disable_pmu;
-               for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
-                       if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
-                           id_offs[i].offset != 0) {
-                               for (j = 0; j < 20; ++j)
-                                       sprintf(build_id + 2 * j, "%02x",
-                                               id_offs[i].build_id[j] & 0xff);
-                               if (strstr(buf, build_id) != NULL)
-                                       build_id_matches = 1;
-                       }
-               previous_key = key;
-       } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
-
-       /* stack_map_get_build_id_offset() is racy and sometimes can return
-        * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
-        * try it one more time.
-        */
-       if (build_id_matches < 1 && retry--) {
-               ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
-               close(pmu_fd);
-               bpf_object__close(obj);
-               printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
-                      __func__);
-               goto retry;
-       }
-
-       if (CHECK(build_id_matches < 1, "build id match",
-                 "Didn't find expected build ID from the map\n"))
-               goto disable_pmu;
-
-       /*
-        * We intentionally skip compare_stack_ips(). This is because we
-        * only support one in_nmi() ips-to-build_id translation per cpu
-        * at any time, thus stack_amap here will always fallback to
-        * BPF_STACK_BUILD_ID_IP;
-        */
-
-disable_pmu:
-       ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
-
-close_pmu:
-       close(pmu_fd);
-
-close_prog:
-       bpf_object__close(obj);
-}
-
 #define MAX_CNT_RAWTP  10ull
 #define MAX_STACK_RAWTP        100
 struct get_stack_trace_t {
@@ -1780,6 +1041,15 @@ static void test_task_fd_query_tp(void)
                                   "sys_enter_read");
 }
 
+static int libbpf_debug_print(enum libbpf_print_level level,
+                             const char *format, va_list args)
+{
+       if (level == LIBBPF_DEBUG)
+               return 0;
+
+       return vfprintf(stderr, format, args);
+}
+
 static void test_reference_tracking()
 {
        const char *file = "./test_sk_lookup_kern.o";
@@ -1806,9 +1076,9 @@ static void test_reference_tracking()
 
                /* Expect verifier failure if test name has 'fail' */
                if (strstr(title, "fail") != NULL) {
-                       libbpf_set_print(NULL, NULL, NULL);
+                       libbpf_set_print(NULL);
                        err = !bpf_program__load(prog, "GPL", 0);
-                       libbpf_set_print(printf, printf, NULL);
+                       libbpf_set_print(libbpf_debug_print);
                } else {
                        err = bpf_program__load(prog, "GPL", 0);
                }
@@ -1912,33 +1182,259 @@ out:
        bpf_object__close(obj);
 }
 
+#define CHECK_FLOW_KEYS(desc, got, expected)                           \
+       CHECK(memcmp(&got, &expected, sizeof(got)) != 0,                \
+             desc,                                                     \
+             "nhoff=%u/%u "                                            \
+             "thoff=%u/%u "                                            \
+             "addr_proto=0x%x/0x%x "                                   \
+             "is_frag=%u/%u "                                          \
+             "is_first_frag=%u/%u "                                    \
+             "is_encap=%u/%u "                                         \
+             "n_proto=0x%x/0x%x "                                      \
+             "sport=%u/%u "                                            \
+             "dport=%u/%u\n",                                          \
+             got.nhoff, expected.nhoff,                                \
+             got.thoff, expected.thoff,                                \
+             got.addr_proto, expected.addr_proto,                      \
+             got.is_frag, expected.is_frag,                            \
+             got.is_first_frag, expected.is_first_frag,                \
+             got.is_encap, expected.is_encap,                          \
+             got.n_proto, expected.n_proto,                            \
+             got.sport, expected.sport,                                \
+             got.dport, expected.dport)
+
+static struct bpf_flow_keys pkt_v4_flow_keys = {
+       .nhoff = 0,
+       .thoff = sizeof(struct iphdr),
+       .addr_proto = ETH_P_IP,
+       .ip_proto = IPPROTO_TCP,
+       .n_proto = __bpf_constant_htons(ETH_P_IP),
+};
+
+static struct bpf_flow_keys pkt_v6_flow_keys = {
+       .nhoff = 0,
+       .thoff = sizeof(struct ipv6hdr),
+       .addr_proto = ETH_P_IPV6,
+       .ip_proto = IPPROTO_TCP,
+       .n_proto = __bpf_constant_htons(ETH_P_IPV6),
+};
+
+static void test_flow_dissector(void)
+{
+       struct bpf_flow_keys flow_keys;
+       struct bpf_object *obj;
+       __u32 duration, retval;
+       int err, prog_fd;
+       __u32 size;
+
+       err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector",
+                           "jmp_table", &prog_fd);
+       if (err) {
+               error_cnt++;
+               return;
+       }
+
+       err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
+                               &flow_keys, &size, &retval, &duration);
+       CHECK(size != sizeof(flow_keys) || err || retval != 1, "ipv4",
+             "err %d errno %d retval %d duration %d size %u/%lu\n",
+             err, errno, retval, duration, size, sizeof(flow_keys));
+       CHECK_FLOW_KEYS("ipv4_flow_keys", flow_keys, pkt_v4_flow_keys);
+
+       err = bpf_prog_test_run(prog_fd, 10, &pkt_v6, sizeof(pkt_v6),
+                               &flow_keys, &size, &retval, &duration);
+       CHECK(size != sizeof(flow_keys) || err || retval != 1, "ipv6",
+             "err %d errno %d retval %d duration %d size %u/%lu\n",
+             err, errno, retval, duration, size, sizeof(flow_keys));
+       CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys);
+
+       bpf_object__close(obj);
+}
+
+static void *test_spin_lock(void *arg)
+{
+       __u32 duration, retval;
+       int err, prog_fd = *(u32 *) arg;
+
+       err = bpf_prog_test_run(prog_fd, 10000, &pkt_v4, sizeof(pkt_v4),
+                               NULL, NULL, &retval, &duration);
+       CHECK(err || retval, "",
+             "err %d errno %d retval %d duration %d\n",
+             err, errno, retval, duration);
+       pthread_exit(arg);
+}
+
+static void test_spinlock(void)
+{
+       const char *file = "./test_spin_lock.o";
+       pthread_t thread_id[4];
+       struct bpf_object *obj;
+       int prog_fd;
+       int err = 0, i;
+       void *ret;
+
+       err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
+       if (err) {
+               printf("test_spin_lock:bpf_prog_load errno %d\n", errno);
+               goto close_prog;
+       }
+       for (i = 0; i < 4; i++)
+               assert(pthread_create(&thread_id[i], NULL,
+                                     &test_spin_lock, &prog_fd) == 0);
+       for (i = 0; i < 4; i++)
+               assert(pthread_join(thread_id[i], &ret) == 0 &&
+                      ret == (void *)&prog_fd);
+       goto close_prog_noerr;
+close_prog:
+       error_cnt++;
+close_prog_noerr:
+       bpf_object__close(obj);
+}
+
+static void *parallel_map_access(void *arg)
+{
+       int err, map_fd = *(u32 *) arg;
+       int vars[17], i, j, rnd, key = 0;
+
+       for (i = 0; i < 10000; i++) {
+               err = bpf_map_lookup_elem_flags(map_fd, &key, vars, BPF_F_LOCK);
+               if (err) {
+                       printf("lookup failed\n");
+                       error_cnt++;
+                       goto out;
+               }
+               if (vars[0] != 0) {
+                       printf("lookup #%d var[0]=%d\n", i, vars[0]);
+                       error_cnt++;
+                       goto out;
+               }
+               rnd = vars[1];
+               for (j = 2; j < 17; j++) {
+                       if (vars[j] == rnd)
+                               continue;
+                       printf("lookup #%d var[1]=%d var[%d]=%d\n",
+                              i, rnd, j, vars[j]);
+                       error_cnt++;
+                       goto out;
+               }
+       }
+out:
+       pthread_exit(arg);
+}
+
+static void test_map_lock(void)
+{
+       const char *file = "./test_map_lock.o";
+       int prog_fd, map_fd[2], vars[17] = {};
+       pthread_t thread_id[6];
+       struct bpf_object *obj;
+       int err = 0, key = 0, i;
+       void *ret;
+
+       err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
+       if (err) {
+               printf("test_map_lock:bpf_prog_load errno %d\n", errno);
+               goto close_prog;
+       }
+       map_fd[0] = bpf_find_map(__func__, obj, "hash_map");
+       if (map_fd[0] < 0)
+               goto close_prog;
+       map_fd[1] = bpf_find_map(__func__, obj, "array_map");
+       if (map_fd[1] < 0)
+               goto close_prog;
+
+       bpf_map_update_elem(map_fd[0], &key, vars, BPF_F_LOCK);
+
+       for (i = 0; i < 4; i++)
+               assert(pthread_create(&thread_id[i], NULL,
+                                     &test_spin_lock, &prog_fd) == 0);
+       for (i = 4; i < 6; i++)
+               assert(pthread_create(&thread_id[i], NULL,
+                                     &parallel_map_access, &map_fd[i - 4]) == 0);
+       for (i = 0; i < 4; i++)
+               assert(pthread_join(thread_id[i], &ret) == 0 &&
+                      ret == (void *)&prog_fd);
+       for (i = 4; i < 6; i++)
+               assert(pthread_join(thread_id[i], &ret) == 0 &&
+                      ret == (void *)&map_fd[i - 4]);
+       goto close_prog_noerr;
+close_prog:
+       error_cnt++;
+close_prog_noerr:
+       bpf_object__close(obj);
+}
+
+static void sigalrm_handler(int s) {}
+static struct sigaction sigalrm_action = {
+       .sa_handler = sigalrm_handler,
+};
+
+static void test_signal_pending(enum bpf_prog_type prog_type)
+{
+       struct bpf_insn prog[4096];
+       struct itimerval timeo = {
+               .it_value.tv_usec = 100000, /* 100ms */
+       };
+       __u32 duration, retval;
+       int prog_fd;
+       int err;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(prog); i++)
+               prog[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0);
+       prog[ARRAY_SIZE(prog) - 1] = BPF_EXIT_INSN();
+
+       prog_fd = bpf_load_program(prog_type, prog, ARRAY_SIZE(prog),
+                                  "GPL", 0, NULL, 0);
+       CHECK(prog_fd < 0, "test-run", "errno %d\n", errno);
+
+       err = sigaction(SIGALRM, &sigalrm_action, NULL);
+       CHECK(err, "test-run-signal-sigaction", "errno %d\n", errno);
+
+       err = setitimer(ITIMER_REAL, &timeo, NULL);
+       CHECK(err, "test-run-signal-timer", "errno %d\n", errno);
+
+       err = bpf_prog_test_run(prog_fd, 0xffffffff, &pkt_v4, sizeof(pkt_v4),
+                               NULL, NULL, &retval, &duration);
+       CHECK(duration > 500000000, /* 500ms */
+             "test-run-signal-duration",
+             "duration %dns > 500ms\n",
+             duration);
+
+       signal(SIGALRM, SIG_DFL);
+}
+
+#define DECLARE
+#include <prog_tests/tests.h>
+#undef DECLARE
+
 int main(void)
 {
        srand(time(NULL));
 
        jit_enabled = is_jit_enabled();
 
-       test_pkt_access();
+#define CALL
+#include <prog_tests/tests.h>
+#undef CALL
        test_prog_run_xattr();
-       test_xdp();
-       test_xdp_adjust_tail();
        test_l4lb_all();
-       test_xdp_noinline();
        test_tcp_estats();
        test_bpf_obj_id();
-       test_pkt_md_access();
        test_obj_name();
        test_tp_attach_query();
-       test_stacktrace_map();
-       test_stacktrace_build_id();
-       test_stacktrace_build_id_nmi();
-       test_stacktrace_map_raw_tp();
        test_get_stack_raw_tp();
        test_task_fd_query_rawtp();
        test_task_fd_query_tp();
        test_reference_tracking();
        test_queue_stack_map(QUEUE);
        test_queue_stack_map(STACK);
+       test_flow_dissector();
+       test_spinlock();
+       test_map_lock();
+       test_signal_pending(BPF_PROG_TYPE_SOCKET_FILTER);
+       test_signal_pending(BPF_PROG_TYPE_FLOW_DISSECTOR);
 
        printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
        return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;