perf bpf filter: Fix a broken perf sample data naming for BPF CO-RE
[platform/kernel/linux-starfive.git] / tools / perf / util / bpf_skel / sample_filter.bpf.c
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 // Copyright (c) 2023 Google
3 #include "vmlinux.h"
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_tracing.h>
6 #include <bpf/bpf_core_read.h>
7
8 #include "sample-filter.h"
9
10 /* BPF map that will be filled by user space */
11 struct filters {
12         __uint(type, BPF_MAP_TYPE_ARRAY);
13         __type(key, int);
14         __type(value, struct perf_bpf_filter_entry);
15         __uint(max_entries, MAX_FILTERS);
16 } filters SEC(".maps");
17
18 int dropped;
19
20 void *bpf_cast_to_kern_ctx(void *) __ksym;
21
22 /* new kernel perf_sample_data definition */
23 struct perf_sample_data___new {
24         __u64 sample_flags;
25 } __attribute__((preserve_access_index));
26
27 /* new kernel perf_mem_data_src definition */
28 union perf_mem_data_src___new {
29         __u64 val;
30         struct {
31                 __u64   mem_op:5,       /* type of opcode */
32                         mem_lvl:14,     /* memory hierarchy level */
33                         mem_snoop:5,    /* snoop mode */
34                         mem_lock:2,     /* lock instr */
35                         mem_dtlb:7,     /* tlb access */
36                         mem_lvl_num:4,  /* memory hierarchy level number */
37                         mem_remote:1,   /* remote */
38                         mem_snoopx:2,   /* snoop mode, ext */
39                         mem_blk:3,      /* access blocked */
40                         mem_hops:3,     /* hop level */
41                         mem_rsvd:18;
42         };
43 };
44
45 /* helper function to return the given perf sample data */
46 static inline __u64 perf_get_sample(struct bpf_perf_event_data_kern *kctx,
47                                     struct perf_bpf_filter_entry *entry)
48 {
49         struct perf_sample_data___new *data = (void *)kctx->data;
50
51         if (!bpf_core_field_exists(data->sample_flags) ||
52             (data->sample_flags & entry->flags) == 0)
53                 return 0;
54
55         switch (entry->flags) {
56         case PERF_SAMPLE_IP:
57                 return kctx->data->ip;
58         case PERF_SAMPLE_ID:
59                 return kctx->data->id;
60         case PERF_SAMPLE_TID:
61                 if (entry->part)
62                         return kctx->data->tid_entry.pid;
63                 else
64                         return kctx->data->tid_entry.tid;
65         case PERF_SAMPLE_CPU:
66                 return kctx->data->cpu_entry.cpu;
67         case PERF_SAMPLE_TIME:
68                 return kctx->data->time;
69         case PERF_SAMPLE_ADDR:
70                 return kctx->data->addr;
71         case PERF_SAMPLE_PERIOD:
72                 return kctx->data->period;
73         case PERF_SAMPLE_TRANSACTION:
74                 return kctx->data->txn;
75         case PERF_SAMPLE_WEIGHT_STRUCT:
76                 if (entry->part == 1)
77                         return kctx->data->weight.var1_dw;
78                 if (entry->part == 2)
79                         return kctx->data->weight.var2_w;
80                 if (entry->part == 3)
81                         return kctx->data->weight.var3_w;
82                 /* fall through */
83         case PERF_SAMPLE_WEIGHT:
84                 return kctx->data->weight.full;
85         case PERF_SAMPLE_PHYS_ADDR:
86                 return kctx->data->phys_addr;
87         case PERF_SAMPLE_CODE_PAGE_SIZE:
88                 return kctx->data->code_page_size;
89         case PERF_SAMPLE_DATA_PAGE_SIZE:
90                 return kctx->data->data_page_size;
91         case PERF_SAMPLE_DATA_SRC:
92                 if (entry->part == 1)
93                         return kctx->data->data_src.mem_op;
94                 if (entry->part == 2)
95                         return kctx->data->data_src.mem_lvl_num;
96                 if (entry->part == 3) {
97                         __u32 snoop = kctx->data->data_src.mem_snoop;
98                         __u32 snoopx = kctx->data->data_src.mem_snoopx;
99
100                         return (snoopx << 5) | snoop;
101                 }
102                 if (entry->part == 4)
103                         return kctx->data->data_src.mem_remote;
104                 if (entry->part == 5)
105                         return kctx->data->data_src.mem_lock;
106                 if (entry->part == 6)
107                         return kctx->data->data_src.mem_dtlb;
108                 if (entry->part == 7)
109                         return kctx->data->data_src.mem_blk;
110                 if (entry->part == 8) {
111                         union perf_mem_data_src___new *data = (void *)&kctx->data->data_src;
112
113                         if (bpf_core_field_exists(data->mem_hops))
114                                 return data->mem_hops;
115
116                         return 0;
117                 }
118                 /* return the whole word */
119                 return kctx->data->data_src.val;
120         default:
121                 break;
122         }
123         return 0;
124 }
125
126 #define CHECK_RESULT(data, op, val)                     \
127         if (!(data op val)) {                           \
128                 if (!in_group)                          \
129                         goto drop;                      \
130         } else if (in_group) {                          \
131                 group_result = 1;                       \
132         }
133
134 /* BPF program to be called from perf event overflow handler */
135 SEC("perf_event")
136 int perf_sample_filter(void *ctx)
137 {
138         struct bpf_perf_event_data_kern *kctx;
139         struct perf_bpf_filter_entry *entry;
140         __u64 sample_data;
141         int in_group = 0;
142         int group_result = 0;
143         int i;
144
145         kctx = bpf_cast_to_kern_ctx(ctx);
146
147         for (i = 0; i < MAX_FILTERS; i++) {
148                 int key = i; /* needed for verifier :( */
149
150                 entry = bpf_map_lookup_elem(&filters, &key);
151                 if (entry == NULL)
152                         break;
153                 sample_data = perf_get_sample(kctx, entry);
154
155                 switch (entry->op) {
156                 case PBF_OP_EQ:
157                         CHECK_RESULT(sample_data, ==, entry->value)
158                         break;
159                 case PBF_OP_NEQ:
160                         CHECK_RESULT(sample_data, !=, entry->value)
161                         break;
162                 case PBF_OP_GT:
163                         CHECK_RESULT(sample_data, >, entry->value)
164                         break;
165                 case PBF_OP_GE:
166                         CHECK_RESULT(sample_data, >=, entry->value)
167                         break;
168                 case PBF_OP_LT:
169                         CHECK_RESULT(sample_data, <, entry->value)
170                         break;
171                 case PBF_OP_LE:
172                         CHECK_RESULT(sample_data, <=, entry->value)
173                         break;
174                 case PBF_OP_AND:
175                         CHECK_RESULT(sample_data, &, entry->value)
176                         break;
177                 case PBF_OP_GROUP_BEGIN:
178                         in_group = 1;
179                         group_result = 0;
180                         break;
181                 case PBF_OP_GROUP_END:
182                         if (group_result == 0)
183                                 goto drop;
184                         in_group = 0;
185                         break;
186                 }
187         }
188         /* generate sample data */
189         return 1;
190
191 drop:
192         __sync_fetch_and_add(&dropped, 1);
193         return 0;
194 }
195
196 char LICENSE[] SEC("license") = "Dual BSD/GPL";