2 * trace event based perf event profiling/tracing
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
12 EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
14 static char *perf_trace_buf[4];
17 * Force it to be aligned to unsigned long to avoid misaligned accesses
20 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
23 /* Count the events in use (per event id, not per instance) */
24 static int total_ref_count;
26 static int perf_trace_event_init(struct ftrace_event_call *tp_event,
27 struct perf_event *p_event)
29 struct hlist_head *list;
33 p_event->tp_event = tp_event;
34 if (tp_event->perf_refcount++ > 0)
37 list = alloc_percpu(struct hlist_head);
41 for_each_possible_cpu(cpu)
42 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
44 tp_event->perf_events = list;
46 if (!total_ref_count) {
50 for (i = 0; i < 4; i++) {
51 buf = (char *)alloc_percpu(perf_trace_t);
55 perf_trace_buf[i] = buf;
59 if (tp_event->class->reg)
60 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER);
62 ret = tracepoint_probe_register(tp_event->name,
63 tp_event->class->perf_probe,
73 if (!total_ref_count) {
76 for (i = 0; i < 4; i++) {
77 free_percpu(perf_trace_buf[i]);
78 perf_trace_buf[i] = NULL;
82 if (!--tp_event->perf_refcount) {
83 free_percpu(tp_event->perf_events);
84 tp_event->perf_events = NULL;
90 int perf_trace_init(struct perf_event *p_event)
92 struct ftrace_event_call *tp_event;
93 int event_id = p_event->attr.config;
96 mutex_lock(&event_mutex);
97 list_for_each_entry(tp_event, &ftrace_events, list) {
98 if (tp_event->event.type == event_id &&
100 (tp_event->class->perf_probe ||
101 tp_event->class->reg) &&
102 try_module_get(tp_event->mod)) {
103 ret = perf_trace_event_init(tp_event, p_event);
107 mutex_unlock(&event_mutex);
112 int perf_trace_enable(struct perf_event *p_event)
114 struct ftrace_event_call *tp_event = p_event->tp_event;
115 struct hlist_head *list;
117 list = tp_event->perf_events;
118 if (WARN_ON_ONCE(!list))
121 list = this_cpu_ptr(list);
122 hlist_add_head_rcu(&p_event->hlist_entry, list);
127 void perf_trace_disable(struct perf_event *p_event)
129 hlist_del_rcu(&p_event->hlist_entry);
132 void perf_trace_destroy(struct perf_event *p_event)
134 struct ftrace_event_call *tp_event = p_event->tp_event;
137 mutex_lock(&event_mutex);
138 if (--tp_event->perf_refcount > 0)
141 if (tp_event->class->reg)
142 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER);
144 tracepoint_probe_unregister(tp_event->name,
145 tp_event->class->perf_probe,
149 * Ensure our callback won't be called anymore. See
150 * tracepoint_probe_unregister() and __DO_TRACE().
154 free_percpu(tp_event->perf_events);
155 tp_event->perf_events = NULL;
157 if (!--total_ref_count) {
158 for (i = 0; i < 4; i++) {
159 free_percpu(perf_trace_buf[i]);
160 perf_trace_buf[i] = NULL;
164 mutex_unlock(&event_mutex);
167 __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
168 struct pt_regs *regs, int *rctxp)
170 struct trace_entry *entry;
175 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
177 pc = preempt_count();
179 *rctxp = perf_swevent_get_recursion_context();
183 raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
185 /* zero the dead bytes from align to not leak stack to user */
186 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
188 entry = (struct trace_entry *)raw_data;
189 local_save_flags(flags);
190 tracing_generic_entry_update(entry, flags, pc);
195 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);