1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 // Copyright (c) 2023 Google
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_tracing.h>
6 #include <bpf/bpf_core_read.h>
8 #include "sample-filter.h"
10 /* BPF map that will be filled by user space */
12 __uint(type, BPF_MAP_TYPE_ARRAY);
14 __type(value, struct perf_bpf_filter_entry);
15 __uint(max_entries, MAX_FILTERS);
16 } filters SEC(".maps");
20 void *bpf_cast_to_kern_ctx(void *) __ksym;
22 /* new kernel perf_sample_data definition */
23 struct perf_sample_data___new {
25 } __attribute__((preserve_access_index));
27 /* new kernel perf_mem_data_src definition */
28 union perf_mem_data_src__new {
31 __u64 mem_op:5, /* type of opcode */
32 mem_lvl:14, /* memory hierarchy level */
33 mem_snoop:5, /* snoop mode */
34 mem_lock:2, /* lock instr */
35 mem_dtlb:7, /* tlb access */
36 mem_lvl_num:4, /* memory hierarchy level number */
37 mem_remote:1, /* remote */
38 mem_snoopx:2, /* snoop mode, ext */
39 mem_blk:3, /* access blocked */
40 mem_hops:3, /* hop level */
45 /* helper function to return the given perf sample data */
46 static inline __u64 perf_get_sample(struct bpf_perf_event_data_kern *kctx,
47 struct perf_bpf_filter_entry *entry)
49 struct perf_sample_data___new *data = (void *)kctx->data;
51 if (!bpf_core_field_exists(data->sample_flags) ||
52 (data->sample_flags & entry->flags) == 0)
55 switch (entry->flags) {
57 return kctx->data->ip;
59 return kctx->data->id;
62 return kctx->data->tid_entry.pid;
64 return kctx->data->tid_entry.tid;
66 return kctx->data->cpu_entry.cpu;
67 case PERF_SAMPLE_TIME:
68 return kctx->data->time;
69 case PERF_SAMPLE_ADDR:
70 return kctx->data->addr;
71 case PERF_SAMPLE_PERIOD:
72 return kctx->data->period;
73 case PERF_SAMPLE_TRANSACTION:
74 return kctx->data->txn;
75 case PERF_SAMPLE_WEIGHT_STRUCT:
77 return kctx->data->weight.var1_dw;
79 return kctx->data->weight.var2_w;
81 return kctx->data->weight.var3_w;
83 case PERF_SAMPLE_WEIGHT:
84 return kctx->data->weight.full;
85 case PERF_SAMPLE_PHYS_ADDR:
86 return kctx->data->phys_addr;
87 case PERF_SAMPLE_CODE_PAGE_SIZE:
88 return kctx->data->code_page_size;
89 case PERF_SAMPLE_DATA_PAGE_SIZE:
90 return kctx->data->data_page_size;
91 case PERF_SAMPLE_DATA_SRC:
93 return kctx->data->data_src.mem_op;
95 return kctx->data->data_src.mem_lvl_num;
96 if (entry->part == 3) {
97 __u32 snoop = kctx->data->data_src.mem_snoop;
98 __u32 snoopx = kctx->data->data_src.mem_snoopx;
100 return (snoopx << 5) | snoop;
102 if (entry->part == 4)
103 return kctx->data->data_src.mem_remote;
104 if (entry->part == 5)
105 return kctx->data->data_src.mem_lock;
106 if (entry->part == 6)
107 return kctx->data->data_src.mem_dtlb;
108 if (entry->part == 7)
109 return kctx->data->data_src.mem_blk;
110 if (entry->part == 8) {
111 union perf_mem_data_src__new *data = (void *)&kctx->data->data_src;
113 if (bpf_core_field_exists(data->mem_hops))
114 return data->mem_hops;
118 /* return the whole word */
119 return kctx->data->data_src.val;
126 #define CHECK_RESULT(data, op, val) \
127 if (!(data op val)) { \
130 } else if (in_group) { \
134 /* BPF program to be called from perf event overflow handler */
136 int perf_sample_filter(void *ctx)
138 struct bpf_perf_event_data_kern *kctx;
139 struct perf_bpf_filter_entry *entry;
142 int group_result = 0;
145 kctx = bpf_cast_to_kern_ctx(ctx);
147 for (i = 0; i < MAX_FILTERS; i++) {
148 int key = i; /* needed for verifier :( */
150 entry = bpf_map_lookup_elem(&filters, &key);
153 sample_data = perf_get_sample(kctx, entry);
157 CHECK_RESULT(sample_data, ==, entry->value)
160 CHECK_RESULT(sample_data, !=, entry->value)
163 CHECK_RESULT(sample_data, >, entry->value)
166 CHECK_RESULT(sample_data, >=, entry->value)
169 CHECK_RESULT(sample_data, <, entry->value)
172 CHECK_RESULT(sample_data, <=, entry->value)
175 CHECK_RESULT(sample_data, &, entry->value)
177 case PBF_OP_GROUP_BEGIN:
181 case PBF_OP_GROUP_END:
182 if (group_result == 0)
188 /* generate sample data */
192 __sync_fetch_and_add(&dropped, 1);
196 char LICENSE[] SEC("license") = "Dual BSD/GPL";