Merge tag 'v3.14.25' into backport/v3.14.24-ltsi-rc1+v3.14.25/snapshot-merge.wip
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / staging / lttng / lttng-context-perf-counters.c
1 /*
2  * lttng-context-perf-counters.c
3  *
4  * LTTng performance monitoring counters (perf-counters) integration module.
5  *
6  * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7  *
8  * This library is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; only
11  * version 2.1 of the License.
12  *
13  * This library is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with this library; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/perf_event.h>
26 #include <linux/list.h>
27 #include <linux/string.h>
28 #include <linux/cpu.h>
29 #include "lttng-events.h"
30 #include "wrapper/ringbuffer/frontend_types.h"
31 #include "wrapper/vmalloc.h"
32 #include "wrapper/perf.h"
33 #include "lttng-tracer.h"
34
35 static
36 size_t perf_counter_get_size(size_t offset)
37 {
38         size_t size = 0;
39
40         size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
41         size += sizeof(uint64_t);
42         return size;
43 }
44
45 static
46 void perf_counter_record(struct lttng_ctx_field *field,
47                          struct lib_ring_buffer_ctx *ctx,
48                          struct lttng_channel *chan)
49 {
50         struct perf_event *event;
51         uint64_t value;
52
53         event = field->u.perf_counter->e[ctx->cpu];
54         if (likely(event)) {
55                 if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) {
56                         value = 0;
57                 } else {
58                         event->pmu->read(event);
59                         value = local64_read(&event->count);
60                 }
61         } else {
62                 /*
63                  * Perf chooses not to be clever and not to support enabling a
64                  * perf counter before the cpu is brought up. Therefore, we need
65                  * to support having events coming (e.g. scheduler events)
66                  * before the counter is setup. Write an arbitrary 0 in this
67                  * case.
68                  */
69                 value = 0;
70         }
71         lib_ring_buffer_align_ctx(ctx, lttng_alignof(value));
72         chan->ops->event_write(ctx, &value, sizeof(value));
73 }
74
75 #if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99))
76 static
77 void overflow_callback(struct perf_event *event,
78                        struct perf_sample_data *data,
79                        struct pt_regs *regs)
80 {
81 }
82 #else
83 static
84 void overflow_callback(struct perf_event *event, int nmi,
85                        struct perf_sample_data *data,
86                        struct pt_regs *regs)
87 {
88 }
89 #endif
90
91 static
92 void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
93 {
94         struct perf_event **events = field->u.perf_counter->e;
95         int cpu;
96
97         get_online_cpus();
98         for_each_online_cpu(cpu)
99                 perf_event_release_kernel(events[cpu]);
100         put_online_cpus();
101 #ifdef CONFIG_HOTPLUG_CPU
102         unregister_cpu_notifier(&field->u.perf_counter->nb);
103 #endif
104         kfree(field->event_field.name);
105         kfree(field->u.perf_counter->attr);
106         kfree(events);
107         kfree(field->u.perf_counter);
108 }
109
110 #ifdef CONFIG_HOTPLUG_CPU
111
112 /**
113  *      lttng_perf_counter_hp_callback - CPU hotplug callback
114  *      @nb: notifier block
115  *      @action: hotplug action to take
116  *      @hcpu: CPU number
117  *
118  *      Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
119  *
120  * We can setup perf counters when the cpu is online (up prepare seems to be too
121  * soon).
122  */
123 static
124 int lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
125                                                  unsigned long action,
126                                                  void *hcpu)
127 {
128         unsigned int cpu = (unsigned long) hcpu;
129         struct lttng_perf_counter_field *perf_field =
130                 container_of(nb, struct lttng_perf_counter_field, nb);
131         struct perf_event **events = perf_field->e;
132         struct perf_event_attr *attr = perf_field->attr;
133         struct perf_event *pevent;
134
135         if (!perf_field->hp_enable)
136                 return NOTIFY_OK;
137
138         switch (action) {
139         case CPU_ONLINE:
140         case CPU_ONLINE_FROZEN:
141                 pevent = wrapper_perf_event_create_kernel_counter(attr,
142                                 cpu, NULL, overflow_callback);
143                 if (!pevent || IS_ERR(pevent))
144                         return NOTIFY_BAD;
145                 if (pevent->state == PERF_EVENT_STATE_ERROR) {
146                         perf_event_release_kernel(pevent);
147                         return NOTIFY_BAD;
148                 }
149                 barrier();      /* Create perf counter before setting event */
150                 events[cpu] = pevent;
151                 break;
152         case CPU_UP_CANCELED:
153         case CPU_UP_CANCELED_FROZEN:
154         case CPU_DEAD:
155         case CPU_DEAD_FROZEN:
156                 pevent = events[cpu];
157                 events[cpu] = NULL;
158                 barrier();      /* NULLify event before perf counter teardown */
159                 perf_event_release_kernel(pevent);
160                 break;
161         }
162         return NOTIFY_OK;
163 }
164
165 #endif
166
167 int lttng_add_perf_counter_to_ctx(uint32_t type,
168                                   uint64_t config,
169                                   const char *name,
170                                   struct lttng_ctx **ctx)
171 {
172         struct lttng_ctx_field *field;
173         struct lttng_perf_counter_field *perf_field;
174         struct perf_event **events;
175         struct perf_event_attr *attr;
176         int ret;
177         int cpu;
178         char *name_alloc;
179
180         events = kzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
181         if (!events)
182                 return -ENOMEM;
183
184         attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
185         if (!attr) {
186                 ret = -ENOMEM;
187                 goto error_attr;
188         }
189
190         attr->type = type;
191         attr->config = config;
192         attr->size = sizeof(struct perf_event_attr);
193         attr->pinned = 1;
194         attr->disabled = 0;
195
196         perf_field = kzalloc(sizeof(struct lttng_perf_counter_field), GFP_KERNEL);
197         if (!perf_field) {
198                 ret = -ENOMEM;
199                 goto error_alloc_perf_field;
200         }
201         perf_field->e = events;
202         perf_field->attr = attr;
203
204         name_alloc = kstrdup(name, GFP_KERNEL);
205         if (!name_alloc) {
206                 ret = -ENOMEM;
207                 goto name_alloc_error;
208         }
209
210         field = lttng_append_context(ctx);
211         if (!field) {
212                 ret = -ENOMEM;
213                 goto append_context_error;
214         }
215         if (lttng_find_context(*ctx, name_alloc)) {
216                 ret = -EEXIST;
217                 goto find_error;
218         }
219
220 #ifdef CONFIG_HOTPLUG_CPU
221         perf_field->nb.notifier_call =
222                 lttng_perf_counter_cpu_hp_callback;
223         perf_field->nb.priority = 0;
224         register_cpu_notifier(&perf_field->nb);
225 #endif
226
227         get_online_cpus();
228         for_each_online_cpu(cpu) {
229                 events[cpu] = wrapper_perf_event_create_kernel_counter(attr,
230                                         cpu, NULL, overflow_callback);
231                 if (!events[cpu] || IS_ERR(events[cpu])) {
232                         ret = -EINVAL;
233                         goto counter_error;
234                 }
235                 if (events[cpu]->state == PERF_EVENT_STATE_ERROR) {
236                         ret = -EBUSY;
237                         goto counter_busy;
238                 }
239         }
240         put_online_cpus();
241
242         field->destroy = lttng_destroy_perf_counter_field;
243
244         field->event_field.name = name_alloc;
245         field->event_field.type.atype = atype_integer;
246         field->event_field.type.u.basic.integer.size = sizeof(uint64_t) * CHAR_BIT;
247         field->event_field.type.u.basic.integer.alignment = lttng_alignof(uint64_t) * CHAR_BIT;
248         field->event_field.type.u.basic.integer.signedness = lttng_is_signed_type(uint64_t);
249         field->event_field.type.u.basic.integer.reverse_byte_order = 0;
250         field->event_field.type.u.basic.integer.base = 10;
251         field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
252         field->get_size = perf_counter_get_size;
253         field->record = perf_counter_record;
254         field->u.perf_counter = perf_field;
255         perf_field->hp_enable = 1;
256
257         wrapper_vmalloc_sync_all();
258         return 0;
259
260 counter_busy:
261 counter_error:
262         for_each_online_cpu(cpu) {
263                 if (events[cpu] && !IS_ERR(events[cpu]))
264                         perf_event_release_kernel(events[cpu]);
265         }
266         put_online_cpus();
267 #ifdef CONFIG_HOTPLUG_CPU
268         unregister_cpu_notifier(&perf_field->nb);
269 #endif
270 find_error:
271         lttng_remove_context_field(ctx, field);
272 append_context_error:
273         kfree(name_alloc);
274 name_alloc_error:
275         kfree(perf_field);
276 error_alloc_perf_field:
277         kfree(attr);
278 error_attr:
279         kfree(events);
280         return ret;
281 }
282
283 MODULE_LICENSE("GPL and additional rights");
284 MODULE_AUTHOR("Mathieu Desnoyers");
285 MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");