struct {
int max;
struct syscall *table;
+ struct bpf_map *map;
struct {
struct perf_evsel *sys_enter,
*sys_exit,
goto out;
}
+#ifdef HAVE_LIBBPF_SUPPORT
+static int trace__set_ev_qualifier_bpf_filter(struct trace *trace)
+{
+ int fd = bpf_map__fd(trace->syscalls.map);
+ bool value = !trace->not_ev_qualifier;
+ int err = 0;
+ size_t i;
+
+ for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) {
+ int key = trace->ev_qualifier_ids.entries[i];
+
+ err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled)
+{
+ int fd = bpf_map__fd(trace->syscalls.map);
+ int err = 0, key;
+
+ for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
+ err = bpf_map_update_elem(fd, &key, &enabled, BPF_ANY);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int trace__init_syscalls_bpf_map(struct trace *trace)
+{
+ bool enabled = true;
+
+ if (trace->ev_qualifier_ids.nr)
+ enabled = trace->not_ev_qualifier;
+
+ return __trace__init_syscalls_bpf_map(trace, enabled);
+}
+#else
+static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused)
+{
+ return 0;
+}
+
+static int trace__init_syscalls_bpf_map(struct trace *trace __maybe_unused)
+{
+ return 0;
+}
+#endif // HAVE_LIBBPF_SUPPORT
+
static int trace__set_ev_qualifier_filter(struct trace *trace)
{
+ if (trace->syscalls.map)
+ return trace__set_ev_qualifier_bpf_filter(trace);
return trace__set_ev_qualifier_tp_filter(trace);
}
if (err < 0)
goto out_error_mem;
+ if (trace->syscalls.map)
+ trace__init_syscalls_bpf_map(trace);
+
if (trace->ev_qualifier_ids.nr > 0) {
err = trace__set_ev_qualifier_filter(trace);
if (err < 0)
trace->filter_pids.map = bpf__find_map_by_name("pids_filtered");
}
+static void trace__set_bpf_map_syscalls(struct trace *trace)
+{
+ trace->syscalls.map = bpf__find_map_by_name("syscalls");
+}
+
int cmd_trace(int argc, const char **argv)
{
const char *trace_usage[] = {
if (evsel) {
trace.syscalls.events.augmented = evsel;
trace__set_bpf_map_filtered_pids(&trace);
+ trace__set_bpf_map_syscalls(&trace);
}
err = bpf__setup_stdout(trace.evlist);
.max_entries = __NR_CPUS__,
};
+struct bpf_map SEC("maps") syscalls = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(bool),
+ .max_entries = 512,
+};
+
struct syscall_enter_args {
unsigned long long common_tp_fields;
long syscall_nr;
struct syscall_enter_args args;
struct augmented_filename filename;
} augmented_args;
+ bool *enabled;
unsigned int len = sizeof(augmented_args);
const void *filename_arg = NULL;
return 0;
probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
+
+ enabled = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
+ if (enabled == NULL || !*enabled)
+ return 0;
/*
* Yonghong and Edward Cree sayz:
*
SEC("raw_syscalls:sys_exit")
int sys_exit(struct syscall_exit_args *args)
{
- return !pid_filter__has(&pids_filtered, getpid());
+ struct syscall_exit_args exit_args;
+ bool *enabled;
+
+ if (pid_filter__has(&pids_filtered, getpid()))
+ return 0;
+
+ probe_read(&exit_args, sizeof(exit_args), args);
+
+ enabled = bpf_map_lookup_elem(&syscalls, &exit_args.syscall_nr);
+ if (enabled == NULL || !*enabled)
+ return 0;
+
+ return 1;
}
license(GPL);