2 #include <linux/version.h>
3 #include <bpf/bpf_helpers.h>
4 #include <bpf/bpf_tracing.h>
5 #include <bpf/bpf_core_read.h>
8 __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
9 __uint(key_size, sizeof(int));
10 __uint(value_size, sizeof(u32));
11 __uint(max_entries, 64);
12 } counters SEC(".maps");
15 __uint(type, BPF_MAP_TYPE_HASH);
18 __uint(max_entries, 64);
19 } values SEC(".maps");
22 __uint(type, BPF_MAP_TYPE_HASH);
24 __type(value, struct bpf_perf_event_value);
25 __uint(max_entries, 64);
26 } values2 SEC(".maps");
28 SEC("kprobe/htab_map_get_next_key")
29 int bpf_prog1(struct pt_regs *ctx)
31 u32 key = bpf_get_smp_processor_id();
35 count = bpf_perf_event_read(&counters, key);
37 if (error <= -2 && error >= -22)
40 val = bpf_map_lookup_elem(&values, &key);
44 bpf_map_update_elem(&values, &key, &count, BPF_NOEXIST);
50 * Since *_map_lookup_elem can't be expected to trigger bpf programs
51 * due to potential deadlocks (bpf_disable_instrumentation), this bpf
52 * program will be attached to bpf_map_copy_value (which is called
53 * from map_lookup_elem) and will only filter the hashtable type.
55 SEC("kprobe/bpf_map_copy_value")
56 int BPF_KPROBE(bpf_prog2, struct bpf_map *map)
58 u32 key = bpf_get_smp_processor_id();
59 struct bpf_perf_event_value *val, buf;
60 enum bpf_map_type type;
63 type = BPF_CORE_READ(map, map_type);
64 if (type != BPF_MAP_TYPE_HASH)
67 error = bpf_perf_event_read_value(&counters, key, &buf, sizeof(buf));
71 val = bpf_map_lookup_elem(&values2, &key);
75 bpf_map_update_elem(&values2, &key, &buf, BPF_NOEXIST);
80 char _license[] SEC("license") = "GPL";
81 u32 _version SEC("version") = LINUX_VERSION_CODE;