2 * Performance events callchain code, extracted from core.c:
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
12 #include <linux/perf_event.h>
13 #include <linux/slab.h>
14 #include <linux/sched/task_stack.h>
18 struct callchain_cpus_entries {
19 struct rcu_head rcu_head;
20 struct perf_callchain_entry *cpu_entries[0];
23 int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
24 int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK;
26 static inline size_t perf_callchain_entry__sizeof(void)
28 return (sizeof(struct perf_callchain_entry) +
29 sizeof(__u64) * (sysctl_perf_event_max_stack +
30 sysctl_perf_event_max_contexts_per_stack));
33 static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
34 static atomic_t nr_callchain_events;
35 static DEFINE_MUTEX(callchain_mutex);
36 static struct callchain_cpus_entries *callchain_cpus_entries;
39 __weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
44 __weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
49 static void release_callchain_buffers_rcu(struct rcu_head *head)
51 struct callchain_cpus_entries *entries;
54 entries = container_of(head, struct callchain_cpus_entries, rcu_head);
56 for_each_possible_cpu(cpu)
57 kfree(entries->cpu_entries[cpu]);
62 static void release_callchain_buffers(void)
64 struct callchain_cpus_entries *entries;
66 entries = callchain_cpus_entries;
67 RCU_INIT_POINTER(callchain_cpus_entries, NULL);
68 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
71 static int alloc_callchain_buffers(void)
75 struct callchain_cpus_entries *entries;
78 * We can't use the percpu allocation API for data that can be
79 * accessed from NMI. Use a temporary manual per cpu allocation
80 * until that gets sorted out.
82 size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
84 entries = kzalloc(size, GFP_KERNEL);
88 size = perf_callchain_entry__sizeof() * PERF_NR_CONTEXTS;
90 for_each_possible_cpu(cpu) {
91 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
93 if (!entries->cpu_entries[cpu])
97 rcu_assign_pointer(callchain_cpus_entries, entries);
102 for_each_possible_cpu(cpu)
103 kfree(entries->cpu_entries[cpu]);
109 int get_callchain_buffers(int event_max_stack)
114 mutex_lock(&callchain_mutex);
116 count = atomic_inc_return(&nr_callchain_events);
117 if (WARN_ON_ONCE(count < 1)) {
123 /* If the allocation failed, give up */
124 if (!callchain_cpus_entries)
127 * If requesting per event more than the global cap,
128 * return a different error to help userspace figure
131 * And also do it here so that we have &callchain_mutex held.
133 if (event_max_stack > sysctl_perf_event_max_stack)
138 err = alloc_callchain_buffers();
141 atomic_dec(&nr_callchain_events);
143 mutex_unlock(&callchain_mutex);
148 void put_callchain_buffers(void)
150 if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
151 release_callchain_buffers();
152 mutex_unlock(&callchain_mutex);
156 static struct perf_callchain_entry *get_callchain_entry(int *rctx)
159 struct callchain_cpus_entries *entries;
161 *rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
165 entries = rcu_dereference(callchain_cpus_entries);
169 cpu = smp_processor_id();
171 return (((void *)entries->cpu_entries[cpu]) +
172 (*rctx * perf_callchain_entry__sizeof()));
176 put_callchain_entry(int rctx)
178 put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
181 struct perf_callchain_entry *
182 perf_callchain(struct perf_event *event, struct pt_regs *regs)
184 bool kernel = !event->attr.exclude_callchain_kernel;
185 bool user = !event->attr.exclude_callchain_user;
186 /* Disallow cross-task user callchains. */
187 bool crosstask = event->ctx->task && event->ctx->task != current;
188 const u32 max_stack = event->attr.sample_max_stack;
190 if (!kernel && !user)
193 return get_perf_callchain(regs, 0, kernel, user, max_stack, crosstask, true);
196 struct perf_callchain_entry *
197 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
198 u32 max_stack, bool crosstask, bool add_mark)
200 struct perf_callchain_entry *entry;
201 struct perf_callchain_entry_ctx ctx;
204 entry = get_callchain_entry(&rctx);
212 ctx.max_stack = max_stack;
213 ctx.nr = entry->nr = init_nr;
215 ctx.contexts_maxed = false;
217 if (kernel && !user_mode(regs)) {
219 perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
220 perf_callchain_kernel(&ctx, regs);
224 if (!user_mode(regs)) {
226 regs = task_pt_regs(current);
238 perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
242 perf_callchain_user(&ctx, regs);
248 put_callchain_entry(rctx);
254 * Used for sysctl_perf_event_max_stack and
255 * sysctl_perf_event_max_contexts_per_stack.
257 int perf_event_max_stack_handler(struct ctl_table *table, int write,
258 void __user *buffer, size_t *lenp, loff_t *ppos)
260 int *value = table->data;
261 int new_value = *value, ret;
262 struct ctl_table new_table = *table;
264 new_table.data = &new_value;
265 ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos);
269 mutex_lock(&callchain_mutex);
270 if (atomic_read(&nr_callchain_events))
275 mutex_unlock(&callchain_mutex);