perf stat: Fix handling of --for-each-cgroup with --bpf-counters to match non BPF...
[platform/kernel/linux-starfive.git] / tools / perf / util / ftrace.h
1 #ifndef __PERF_FTRACE_H__
2 #define __PERF_FTRACE_H__
3
4 #include <linux/list.h>
5
6 #include "target.h"
7
8 struct evlist;
9
10 struct perf_ftrace {
11         struct evlist           *evlist;
12         struct target           target;
13         const char              *tracer;
14         struct list_head        filters;
15         struct list_head        notrace;
16         struct list_head        graph_funcs;
17         struct list_head        nograph_funcs;
18         unsigned long           percpu_buffer_size;
19         bool                    inherit;
20         bool                    use_nsec;
21         int                     graph_depth;
22         int                     func_stack_trace;
23         int                     func_irq_info;
24         int                     graph_nosleep_time;
25         int                     graph_noirqs;
26         int                     graph_verbose;
27         int                     graph_thresh;
28         unsigned int            initial_delay;
29 };
30
31 struct filter_entry {
32         struct list_head        list;
33         char                    name[];
34 };
35
36 #define NUM_BUCKET  22  /* 20 + 2 (for outliers in both direction) */
37
38 #ifdef HAVE_BPF_SKEL
39
40 int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace);
41 int perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace);
42 int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace);
43 int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace,
44                                   int buckets[]);
45 int perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace);
46
47 #else  /* !HAVE_BPF_SKEL */
48
49 static inline int
50 perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace __maybe_unused)
51 {
52         return -1;
53 }
54
55 static inline int
56 perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace __maybe_unused)
57 {
58         return -1;
59 }
60
61 static inline int
62 perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused)
63 {
64         return -1;
65 }
66
67 static inline int
68 perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
69                               int buckets[] __maybe_unused)
70 {
71         return -1;
72 }
73
74 static inline int
75 perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace __maybe_unused)
76 {
77         return -1;
78 }
79
80 #endif  /* HAVE_BPF_SKEL */
81
82 #endif  /* __PERF_FTRACE_H__ */