Merge tag 'v3.14.25' into backport/v3.14.24-ltsi-rc1+v3.14.25/snapshot-merge.wip
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / staging / ktap / runtime / lib_kdebug.c
1 /*
2  * kdebug.c - ktap probing core implementation
3  *
4  * This file is part of ktap by Jovi Zhangwei.
5  *
6  * Copyright (C) 2012-2013 Jovi Zhangwei <jovi.zhangwei@gmail.com>.
7  *
8  * ktap is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * ktap is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21
22 #include <linux/module.h>
23 #include <linux/ctype.h>
24 #include <linux/version.h>
25 #include <linux/ftrace_event.h>
26 #include "../include/ktap_types.h"
27 #include "ktap.h"
28 #include "kp_obj.h"
29 #include "kp_str.h"
30 #include "kp_transport.h"
31 #include "kp_vm.h"
32
33 static void ktap_call_probe_closure(ktap_state *mainthread, ktap_closure *cl,
34                                     struct ktap_event *e)
35 {
36         ktap_state *ks;
37         ktap_value *func;
38
39         ks = kp_newthread(mainthread);
40         set_closure(ks->top, cl);
41         func = ks->top;
42         incr_top(ks);
43
44         ks->current_event = e;
45
46         kp_call(ks, func, 0);
47
48         ks->current_event = NULL;
49         kp_exitthread(ks);
50 }
51
52 void kp_event_tostring(ktap_state *ks, struct trace_seq *seq)
53 {
54         struct ktap_event *e = ks->current_event;
55         struct trace_iterator *iter;
56         struct trace_event *ev;
57         enum print_line_t ret = TRACE_TYPE_NO_CONSUME;
58
59         /* Simulate the iterator */
60
61         /*
62          * use temp percpu buffer as trace_iterator
63          * we cannot use same temp buffer as printf.
64          */
65         iter = kp_percpu_data(ks, KTAP_PERCPU_DATA_BUFFER2);
66
67         trace_seq_init(&iter->seq);
68         iter->ent = e->entry;
69
70         ev = &(e->call->event);
71         if (ev)
72                 ret = ev->funcs->trace(iter, 0, ev);
73
74         if (ret != TRACE_TYPE_NO_CONSUME) {
75                 struct trace_seq *s = &iter->seq;
76                 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
77
78                 s->buffer[len] = '\0';
79                 _trace_seq_puts(seq, s->buffer);
80         }
81 }
82
83 /* This definition should keep update with kernel/trace/trace.h */
84 struct ftrace_event_field {
85         struct list_head        link;
86         const char              *name;
87         const char              *type;
88         int                     filter_type;
89         int                     offset;
90         int                     size;
91         int                     is_signed;
92 };
93
94 static struct list_head *ktap_get_fields(struct ftrace_event_call *event_call)
95 {
96         if (!event_call->class->get_fields)
97                 return &event_call->class->fields;
98         return event_call->class->get_fields(event_call);
99 }
100
101 static void get_field_value(ktap_state *ks, struct ktap_event *e,
102                             struct ftrace_event_field *field, ktap_value *ra)
103 {
104         void *value = (unsigned char *)e->entry + field->offset;
105
106         if (field->size == 4) {
107                 int n = *(int *)value;
108                 set_number(ra, n);
109                 return;
110         } else if (field->size == 8) {
111                 long n = *(long *)value;
112                 set_number(ra, n);
113                 return;
114         }
115
116         if (!strncmp(field->type, "char", 4)) {
117                 set_string(ra, kp_tstring_new(ks, (char *)value));
118                 return;
119         }
120 }
121
122 void kp_event_getarg(ktap_state *ks, ktap_value *ra, int n)
123 {
124         struct ktap_event *e = ks->current_event;
125         int index = n;
126         struct ftrace_event_field *field;
127         struct list_head *head;
128
129         /* this is very slow and not safe, fix it in future */
130         head = ktap_get_fields(e->call);
131         list_for_each_entry_reverse(field, head, link) {
132                 if (--index == 0) {
133                         get_field_value(ks, e, field, ra);
134                         return;
135                 }
136         }
137
138         set_nil(ra);
139         return;
140 }
141
142 /* Callback function for perf event subsystem
143  * make ktap reentrant, don't disable irq in callback function,
144  * same as perf and ftrace. to make reentrant, we need some
145  * percpu data to be context isolation(irq/sirq/nmi/process)
146  *
147  * The recursion checking in here is mainly purpose for avoiding
148  * corrupt ktap_state with timer closure callback. For tracepoint
149  * recusion, perf core already handle it.
150  *
151  * Note tracepoint handler is calling with rcu_read_lock.
152  */
153 static void ktap_overflow_callback(struct perf_event *event,
154                                    struct perf_sample_data *data,
155                                    struct pt_regs *regs)
156 {
157         struct ktap_probe_event *ktap_pevent;
158         struct ktap_event e;
159         ktap_state  *ks;
160         int rctx;
161
162         ktap_pevent = event->overflow_handler_context;
163         ks = ktap_pevent->ks;
164
165         if (unlikely(ks->stop))
166                 return;
167
168         rctx = get_recursion_context(ks);
169         if (rctx < 0)
170                 return;
171
172         KTAP_STATS(ks)->events_hits += 1;
173
174         /* profile perf event don't have valid associated tp_event */
175         if (event->tp_event) {
176                 e.call = event->tp_event;
177                 e.entry = data->raw->data;
178                 e.entry_size = data->raw->size;
179         }
180         e.pevent = ktap_pevent;
181         e.regs = regs;
182
183         ktap_call_probe_closure(ks, ktap_pevent->cl, &e);
184
185         put_recursion_context(ks, rctx);
186 }
187
188 static void perf_destructor(struct ktap_probe_event *ktap_pevent)
189 {
190         perf_event_release_kernel(ktap_pevent->perf);
191 }
192
193 static int (*kp_ftrace_profile_set_filter)(struct perf_event *event,
194                                            int event_id, char *filter_str);
195
196 /*
197  * Generic perf event register function
198  * used by tracepoints/kprobe/uprobe/profile-timer/hw_breakpoint.
199  */
200 void kp_perf_event_register(ktap_state *ks, struct perf_event_attr *attr,
201                             struct task_struct *task, char *filter,
202                             ktap_closure *cl)
203 {
204         struct ktap_probe_event *ktap_pevent;
205         struct kmem_cache *pevent_cache = G(ks)->pevent_cache;
206         struct perf_event *event;
207         int cpu, ret;
208
209         kp_verbose_printf(ks, "enable perf event id: %d, filter: %s "
210                               "pid: %d\n", attr->config, filter,
211                               task ? task_tgid_vnr(task) : -1);
212
213         /*
214          * don't tracing until ktap_wait, the reason is:
215          * 1). some event may hit before apply filter
216          * 2). more simple to manage tracing thread
217          * 3). avoid race with mainthread.
218          *
219          * Another way to do this is make attr.disabled as 1, then use
220          * perf_event_enable after filter apply, however, perf_event_enable
221          * was not exported in kernel older than 3.3, so we drop this method.
222          */
223         ks->stop = 1;
224
225         for_each_cpu(cpu, G(ks)->cpumask) {
226                 ktap_pevent = kmem_cache_zalloc(pevent_cache, GFP_KERNEL);
227                 if (!ktap_pevent)
228                         return;
229
230                 ktap_pevent->ks = ks;
231                 ktap_pevent->cl = cl;
232                 event = perf_event_create_kernel_counter(attr, cpu, task,
233                                                          ktap_overflow_callback,
234                                                          ktap_pevent);
235                 if (IS_ERR(event)) {
236                         int err = PTR_ERR(event);
237                         kp_error(ks, "unable register perf event %d on cpu %d, "
238                                      "err: %d\n", attr->config, cpu, err);
239                         kp_free(ks, ktap_pevent);
240                         return;
241                 }
242
243                 ktap_pevent->perf = event;
244                 INIT_LIST_HEAD(&ktap_pevent->list);
245                 list_add_tail(&ktap_pevent->list, &G(ks)->probe_events_head);
246
247                 if (!filter)
248                         continue;
249
250                 ret = kp_ftrace_profile_set_filter(event, attr->config, filter);
251                 if (ret) {
252                         kp_error(ks, "unable set filter %s for event id %d, "
253                                      "ret: %d\n", filter, attr->config, ret);
254                         perf_destructor(ktap_pevent);
255                         list_del(&ktap_pevent->list);
256                         kp_free(ks, ktap_pevent);
257                         return;
258                 }
259         }
260 }
261
262 static void end_probes(struct ktap_state *ks)
263 {
264         struct ktap_probe_event *ktap_pevent;
265         struct list_head *tmp, *pos;
266         struct list_head *head = &G(ks)->probe_events_head;
267
268         list_for_each(pos, head) {
269                 ktap_pevent = container_of(pos, struct ktap_probe_event,
270                                            list);
271                 perf_destructor(ktap_pevent);
272         }
273         /*
274          * Ensure our callback won't be called anymore. The buffers
275          * will be freed after that.
276          */
277         tracepoint_synchronize_unregister();
278
279         list_for_each_safe(pos, tmp, head) {
280                 ktap_pevent = container_of(pos, struct ktap_probe_event,
281                                            list);
282                 list_del(&ktap_pevent->list);
283                 kp_free(ks, ktap_pevent);
284         }
285 }
286
287 static int ktap_lib_probe_by_id(ktap_state *ks)
288 {
289         ktap_closure *cl;
290         struct task_struct *task = G(ks)->trace_task;
291         ktap_eventdef_info evdef_info;
292         char *filter = NULL;
293         int *id_arr;
294         int ret, i;
295
296         /* the number is userspace address refer to ktap_eventdef_info */
297         kp_arg_check(ks, 1, KTAP_TNUMBER);
298         kp_arg_check(ks, 2, KTAP_TFUNCTION);
299
300         ret = copy_from_user(&evdef_info, (void *)nvalue(kp_arg(ks, 1)),
301                              sizeof(evdef_info));
302         if (ret < 0)
303                 return -1;
304
305         if (evdef_info.filter) {
306                 int len;
307
308                 len = strlen_user(evdef_info.filter);
309                 if (len > 0x1000)
310                         return -1;
311
312                 filter = kmalloc(len + 1, GFP_KERNEL);
313                 if (!filter)
314                         return -1;
315
316                 if (strncpy_from_user(filter, evdef_info.filter, len) < 0) {
317                         kfree(filter);
318                         return -1;
319                 }
320         }
321
322         id_arr = kmalloc(evdef_info.nr * sizeof(int), GFP_KERNEL);
323         if (!id_arr) {
324                 kfree(filter);
325                 return -1;
326         }
327
328         ret = copy_from_user(id_arr, evdef_info.id_arr,
329                              evdef_info.nr * sizeof(int));
330         if (ret < 0) {
331                 kfree(filter);
332                 kfree(id_arr);
333                 return -1;
334         }
335
336         cl = clvalue(kp_arg(ks, 2));
337
338         for (i = 0; i < evdef_info.nr; i++) {
339                 struct perf_event_attr attr;
340
341                 memset(&attr, 0, sizeof(attr));
342                 attr.type = PERF_TYPE_TRACEPOINT;
343                 attr.config = id_arr[i];
344                 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
345                                    PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD;
346                 attr.sample_period = 1;
347                 attr.size = sizeof(attr);
348                 attr.disabled = 0;
349
350                 kp_perf_event_register(ks, &attr, task, filter, cl);
351         }
352
353         kfree(filter);
354         kfree(id_arr);
355         return 0;
356 }
357
358 static int ktap_lib_probe_end(ktap_state *ks)
359 {
360         kp_arg_check(ks, 1, KTAP_TFUNCTION);
361
362         G(ks)->trace_end_closure = clvalue(kp_arg(ks, 1));
363         return 0;
364 }
365
366 static int ktap_lib_traceoff(ktap_state *ks)
367 {
368         end_probes(ks);
369
370         /* call trace_end_closure after probed end */
371         if (G(ks)->trace_end_closure) {
372                 set_closure(ks->top, G(ks)->trace_end_closure);
373                 incr_top(ks);
374                 kp_call(ks, ks->top - 1, 0);
375                 G(ks)->trace_end_closure = NULL;
376         }
377
378         return 0;
379 }
380
381 void kp_probe_exit(ktap_state *ks)
382 {
383         if (!G(ks)->trace_enabled)
384                 return;
385
386         end_probes(ks);
387
388         /* call trace_end_closure after probed end */
389         if (!G(ks)->error && G(ks)->trace_end_closure) {
390                 set_closure(ks->top, G(ks)->trace_end_closure);
391                 incr_top(ks);
392                 kp_call(ks, ks->top - 1, 0);
393                 G(ks)->trace_end_closure = NULL;
394         }
395
396         kmem_cache_destroy(G(ks)->pevent_cache);
397         G(ks)->trace_enabled = 0;
398 }
399
400 int kp_probe_init(ktap_state *ks)
401 {
402         G(ks)->pevent_cache = KMEM_CACHE(ktap_probe_event, SLAB_PANIC);
403         G(ks)->trace_enabled = 1;
404         return 0;
405 }
406
407 static const ktap_Reg kdebuglib_funcs[] = {
408         {"probe_by_id", ktap_lib_probe_by_id},
409         {"probe_end", ktap_lib_probe_end},
410         {"traceoff", ktap_lib_traceoff},
411         {NULL}
412 };
413
414 void kp_init_kdebuglib(ktap_state *ks)
415 {
416         kp_ftrace_profile_set_filter =
417                 (void *)kallsyms_lookup_name("ftrace_profile_set_filter");
418         if (!kp_ftrace_profile_set_filter) {
419                 kp_error(ks, "ktap: cannot lookup ftrace_profile_set_filter "
420                                 "in kallsyms\n");
421                 return;
422         }
423
424         kp_register_lib(ks, "kdebug", kdebuglib_funcs);
425 }
426