53935259f70196db7da81377ec369ff10f9a00c9
[platform/kernel/linux-starfive.git] / kernel / trace / trace_syscalls.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <trace/syscall.h>
3 #include <trace/events/syscalls.h>
4 #include <linux/syscalls.h>
5 #include <linux/slab.h>
6 #include <linux/kernel.h>
7 #include <linux/module.h>       /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
8 #include <linux/ftrace.h>
9 #include <linux/perf_event.h>
10 #include <asm/syscall.h>
11
12 #include "trace_output.h"
13 #include "trace.h"
14
15 static DEFINE_MUTEX(syscall_trace_lock);
16
17 static int syscall_enter_register(struct trace_event_call *event,
18                                  enum trace_reg type, void *data);
19 static int syscall_exit_register(struct trace_event_call *event,
20                                  enum trace_reg type, void *data);
21
22 static struct list_head *
23 syscall_get_enter_fields(struct trace_event_call *call)
24 {
25         struct syscall_metadata *entry = call->data;
26
27         return &entry->enter_fields;
28 }
29
30 extern struct syscall_metadata *__start_syscalls_metadata[];
31 extern struct syscall_metadata *__stop_syscalls_metadata[];
32
33 static struct syscall_metadata **syscalls_metadata;
34
35 #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
36 static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
37 {
38         /*
39          * Only compare after the "sys" prefix. Archs that use
40          * syscall wrappers may have syscalls symbols aliases prefixed
41          * with ".SyS" or ".sys" instead of "sys", leading to an unwanted
42          * mismatch.
43          */
44         return !strcmp(sym + 3, name + 3);
45 }
46 #endif
47
48 #ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
49 /*
50  * Some architectures that allow for 32bit applications
51  * to run on a 64bit kernel, do not map the syscalls for
52  * the 32bit tasks the same as they do for 64bit tasks.
53  *
54  *     *cough*x86*cough*
55  *
56  * In such a case, instead of reporting the wrong syscalls,
57  * simply ignore them.
58  *
59  * For an arch to ignore the compat syscalls it needs to
60  * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as
61  * define the function arch_trace_is_compat_syscall() to let
62  * the tracing system know that it should ignore it.
63  */
64 static int
65 trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
66 {
67         if (unlikely(arch_trace_is_compat_syscall(regs)))
68                 return -1;
69
70         return syscall_get_nr(task, regs);
71 }
72 #else
73 static inline int
74 trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
75 {
76         return syscall_get_nr(task, regs);
77 }
78 #endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */
79
80 static __init struct syscall_metadata *
81 find_syscall_meta(unsigned long syscall)
82 {
83         struct syscall_metadata **start;
84         struct syscall_metadata **stop;
85         char str[KSYM_SYMBOL_LEN];
86
87
88         start = __start_syscalls_metadata;
89         stop = __stop_syscalls_metadata;
90         kallsyms_lookup(syscall, NULL, NULL, NULL, str);
91
92         if (arch_syscall_match_sym_name(str, "sys_ni_syscall"))
93                 return NULL;
94
95         for ( ; start < stop; start++) {
96                 if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
97                         return *start;
98         }
99         return NULL;
100 }
101
102 static struct syscall_metadata *syscall_nr_to_meta(int nr)
103 {
104         if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
105                 return NULL;
106
107         return syscalls_metadata[nr];
108 }
109
110 const char *get_syscall_name(int syscall)
111 {
112         struct syscall_metadata *entry;
113
114         entry = syscall_nr_to_meta(syscall);
115         if (!entry)
116                 return NULL;
117
118         return entry->name;
119 }
120
121 static enum print_line_t
122 print_syscall_enter(struct trace_iterator *iter, int flags,
123                     struct trace_event *event)
124 {
125         struct trace_array *tr = iter->tr;
126         struct trace_seq *s = &iter->seq;
127         struct trace_entry *ent = iter->ent;
128         struct syscall_trace_enter *trace;
129         struct syscall_metadata *entry;
130         int i, syscall;
131
132         trace = (typeof(trace))ent;
133         syscall = trace->nr;
134         entry = syscall_nr_to_meta(syscall);
135
136         if (!entry)
137                 goto end;
138
139         if (entry->enter_event->event.type != ent->type) {
140                 WARN_ON_ONCE(1);
141                 goto end;
142         }
143
144         trace_seq_printf(s, "%s(", entry->name);
145
146         for (i = 0; i < entry->nb_args; i++) {
147
148                 if (trace_seq_has_overflowed(s))
149                         goto end;
150
151                 /* parameter types */
152                 if (tr->trace_flags & TRACE_ITER_VERBOSE)
153                         trace_seq_printf(s, "%s ", entry->types[i]);
154
155                 /* parameter values */
156                 trace_seq_printf(s, "%s: %lx%s", entry->args[i],
157                                  trace->args[i],
158                                  i == entry->nb_args - 1 ? "" : ", ");
159         }
160
161         trace_seq_putc(s, ')');
162 end:
163         trace_seq_putc(s, '\n');
164
165         return trace_handle_return(s);
166 }
167
168 static enum print_line_t
169 print_syscall_exit(struct trace_iterator *iter, int flags,
170                    struct trace_event *event)
171 {
172         struct trace_seq *s = &iter->seq;
173         struct trace_entry *ent = iter->ent;
174         struct syscall_trace_exit *trace;
175         int syscall;
176         struct syscall_metadata *entry;
177
178         trace = (typeof(trace))ent;
179         syscall = trace->nr;
180         entry = syscall_nr_to_meta(syscall);
181
182         if (!entry) {
183                 trace_seq_putc(s, '\n');
184                 goto out;
185         }
186
187         if (entry->exit_event->event.type != ent->type) {
188                 WARN_ON_ONCE(1);
189                 return TRACE_TYPE_UNHANDLED;
190         }
191
192         trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
193                                 trace->ret);
194
195  out:
196         return trace_handle_return(s);
197 }
198
199 extern char *__bad_type_size(void);
200
201 #define SYSCALL_FIELD(_type, _name) {                                   \
202         .type = #_type, .name = #_name,                                 \
203         .size = sizeof(_type), .align = __alignof__(_type),             \
204         .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER }
205
206 static int __init
207 __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
208 {
209         int i;
210         int pos = 0;
211
212         /* When len=0, we just calculate the needed length */
213 #define LEN_OR_ZERO (len ? len - pos : 0)
214
215         pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
216         for (i = 0; i < entry->nb_args; i++) {
217                 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
218                                 entry->args[i], sizeof(unsigned long),
219                                 i == entry->nb_args - 1 ? "" : ", ");
220         }
221         pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
222
223         for (i = 0; i < entry->nb_args; i++) {
224                 pos += snprintf(buf + pos, LEN_OR_ZERO,
225                                 ", ((unsigned long)(REC->%s))", entry->args[i]);
226         }
227
228 #undef LEN_OR_ZERO
229
230         /* return the length of print_fmt */
231         return pos;
232 }
233
234 static int __init set_syscall_print_fmt(struct trace_event_call *call)
235 {
236         char *print_fmt;
237         int len;
238         struct syscall_metadata *entry = call->data;
239
240         if (entry->enter_event != call) {
241                 call->print_fmt = "\"0x%lx\", REC->ret";
242                 return 0;
243         }
244
245         /* First: called with 0 length to calculate the needed length */
246         len = __set_enter_print_fmt(entry, NULL, 0);
247
248         print_fmt = kmalloc(len + 1, GFP_KERNEL);
249         if (!print_fmt)
250                 return -ENOMEM;
251
252         /* Second: actually write the @print_fmt */
253         __set_enter_print_fmt(entry, print_fmt, len + 1);
254         call->print_fmt = print_fmt;
255
256         return 0;
257 }
258
259 static void __init free_syscall_print_fmt(struct trace_event_call *call)
260 {
261         struct syscall_metadata *entry = call->data;
262
263         if (entry->enter_event == call)
264                 kfree(call->print_fmt);
265 }
266
267 static int __init syscall_enter_define_fields(struct trace_event_call *call)
268 {
269         struct syscall_trace_enter trace;
270         struct syscall_metadata *meta = call->data;
271         int offset = offsetof(typeof(trace), args);
272         int ret, i;
273
274         for (i = 0; i < meta->nb_args; i++) {
275                 ret = trace_define_field(call, meta->types[i],
276                                          meta->args[i], offset,
277                                          sizeof(unsigned long), 0,
278                                          FILTER_OTHER);
279                 if (ret)
280                         break;
281                 offset += sizeof(unsigned long);
282         }
283
284         return ret;
285 }
286
287 static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
288 {
289         struct trace_array *tr = data;
290         struct trace_event_file *trace_file;
291         struct syscall_trace_enter *entry;
292         struct syscall_metadata *sys_data;
293         struct ring_buffer_event *event;
294         struct ring_buffer *buffer;
295         unsigned long irq_flags;
296         unsigned long args[6];
297         int pc;
298         int syscall_nr;
299         int size;
300
301         syscall_nr = trace_get_syscall_nr(current, regs);
302         if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
303                 return;
304
305         /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
306         trace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]);
307         if (!trace_file)
308                 return;
309
310         if (trace_trigger_soft_disabled(trace_file))
311                 return;
312
313         sys_data = syscall_nr_to_meta(syscall_nr);
314         if (!sys_data)
315                 return;
316
317         size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
318
319         local_save_flags(irq_flags);
320         pc = preempt_count();
321
322         buffer = tr->trace_buffer.buffer;
323         event = trace_buffer_lock_reserve(buffer,
324                         sys_data->enter_event->event.type, size, irq_flags, pc);
325         if (!event)
326                 return;
327
328         entry = ring_buffer_event_data(event);
329         entry->nr = syscall_nr;
330         syscall_get_arguments(current, regs, args);
331         memcpy(entry->args, args, sizeof(unsigned long) * sys_data->nb_args);
332
333         event_trigger_unlock_commit(trace_file, buffer, event, entry,
334                                     irq_flags, pc);
335 }
336
337 static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
338 {
339         struct trace_array *tr = data;
340         struct trace_event_file *trace_file;
341         struct syscall_trace_exit *entry;
342         struct syscall_metadata *sys_data;
343         struct ring_buffer_event *event;
344         struct ring_buffer *buffer;
345         unsigned long irq_flags;
346         int pc;
347         int syscall_nr;
348
349         syscall_nr = trace_get_syscall_nr(current, regs);
350         if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
351                 return;
352
353         /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
354         trace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]);
355         if (!trace_file)
356                 return;
357
358         if (trace_trigger_soft_disabled(trace_file))
359                 return;
360
361         sys_data = syscall_nr_to_meta(syscall_nr);
362         if (!sys_data)
363                 return;
364
365         local_save_flags(irq_flags);
366         pc = preempt_count();
367
368         buffer = tr->trace_buffer.buffer;
369         event = trace_buffer_lock_reserve(buffer,
370                         sys_data->exit_event->event.type, sizeof(*entry),
371                         irq_flags, pc);
372         if (!event)
373                 return;
374
375         entry = ring_buffer_event_data(event);
376         entry->nr = syscall_nr;
377         entry->ret = syscall_get_return_value(current, regs);
378
379         event_trigger_unlock_commit(trace_file, buffer, event, entry,
380                                     irq_flags, pc);
381 }
382
383 static int reg_event_syscall_enter(struct trace_event_file *file,
384                                    struct trace_event_call *call)
385 {
386         struct trace_array *tr = file->tr;
387         int ret = 0;
388         int num;
389
390         num = ((struct syscall_metadata *)call->data)->syscall_nr;
391         if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
392                 return -ENOSYS;
393         mutex_lock(&syscall_trace_lock);
394         if (!tr->sys_refcount_enter)
395                 ret = register_trace_sys_enter(ftrace_syscall_enter, tr);
396         if (!ret) {
397                 rcu_assign_pointer(tr->enter_syscall_files[num], file);
398                 tr->sys_refcount_enter++;
399         }
400         mutex_unlock(&syscall_trace_lock);
401         return ret;
402 }
403
404 static void unreg_event_syscall_enter(struct trace_event_file *file,
405                                       struct trace_event_call *call)
406 {
407         struct trace_array *tr = file->tr;
408         int num;
409
410         num = ((struct syscall_metadata *)call->data)->syscall_nr;
411         if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
412                 return;
413         mutex_lock(&syscall_trace_lock);
414         tr->sys_refcount_enter--;
415         RCU_INIT_POINTER(tr->enter_syscall_files[num], NULL);
416         if (!tr->sys_refcount_enter)
417                 unregister_trace_sys_enter(ftrace_syscall_enter, tr);
418         mutex_unlock(&syscall_trace_lock);
419 }
420
421 static int reg_event_syscall_exit(struct trace_event_file *file,
422                                   struct trace_event_call *call)
423 {
424         struct trace_array *tr = file->tr;
425         int ret = 0;
426         int num;
427
428         num = ((struct syscall_metadata *)call->data)->syscall_nr;
429         if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
430                 return -ENOSYS;
431         mutex_lock(&syscall_trace_lock);
432         if (!tr->sys_refcount_exit)
433                 ret = register_trace_sys_exit(ftrace_syscall_exit, tr);
434         if (!ret) {
435                 rcu_assign_pointer(tr->exit_syscall_files[num], file);
436                 tr->sys_refcount_exit++;
437         }
438         mutex_unlock(&syscall_trace_lock);
439         return ret;
440 }
441
442 static void unreg_event_syscall_exit(struct trace_event_file *file,
443                                      struct trace_event_call *call)
444 {
445         struct trace_array *tr = file->tr;
446         int num;
447
448         num = ((struct syscall_metadata *)call->data)->syscall_nr;
449         if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
450                 return;
451         mutex_lock(&syscall_trace_lock);
452         tr->sys_refcount_exit--;
453         RCU_INIT_POINTER(tr->exit_syscall_files[num], NULL);
454         if (!tr->sys_refcount_exit)
455                 unregister_trace_sys_exit(ftrace_syscall_exit, tr);
456         mutex_unlock(&syscall_trace_lock);
457 }
458
459 static int __init init_syscall_trace(struct trace_event_call *call)
460 {
461         int id;
462         int num;
463
464         num = ((struct syscall_metadata *)call->data)->syscall_nr;
465         if (num < 0 || num >= NR_syscalls) {
466                 pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
467                                 ((struct syscall_metadata *)call->data)->name);
468                 return -ENOSYS;
469         }
470
471         if (set_syscall_print_fmt(call) < 0)
472                 return -ENOMEM;
473
474         id = trace_event_raw_init(call);
475
476         if (id < 0) {
477                 free_syscall_print_fmt(call);
478                 return id;
479         }
480
481         return id;
482 }
483
484 static struct trace_event_fields __refdata syscall_enter_fields_array[] = {
485         SYSCALL_FIELD(int, __syscall_nr),
486         { .type = TRACE_FUNCTION_TYPE,
487           .define_fields = syscall_enter_define_fields },
488         {}
489 };
490
491 struct trace_event_functions enter_syscall_print_funcs = {
492         .trace          = print_syscall_enter,
493 };
494
495 struct trace_event_functions exit_syscall_print_funcs = {
496         .trace          = print_syscall_exit,
497 };
498
499 struct trace_event_class __refdata event_class_syscall_enter = {
500         .system         = "syscalls",
501         .reg            = syscall_enter_register,
502         .fields_array   = syscall_enter_fields_array,
503         .get_fields     = syscall_get_enter_fields,
504         .raw_init       = init_syscall_trace,
505 };
506
507 struct trace_event_class __refdata event_class_syscall_exit = {
508         .system         = "syscalls",
509         .reg            = syscall_exit_register,
510         .fields_array   = (struct trace_event_fields[]){
511                 SYSCALL_FIELD(int, __syscall_nr),
512                 SYSCALL_FIELD(long, ret),
513                 {}
514         },
515         .fields         = LIST_HEAD_INIT(event_class_syscall_exit.fields),
516         .raw_init       = init_syscall_trace,
517 };
518
519 unsigned long __init __weak arch_syscall_addr(int nr)
520 {
521         return (unsigned long)sys_call_table[nr];
522 }
523
524 void __init init_ftrace_syscalls(void)
525 {
526         struct syscall_metadata *meta;
527         unsigned long addr;
528         int i;
529
530         syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata),
531                                     GFP_KERNEL);
532         if (!syscalls_metadata) {
533                 WARN_ON(1);
534                 return;
535         }
536
537         for (i = 0; i < NR_syscalls; i++) {
538                 addr = arch_syscall_addr(i);
539                 meta = find_syscall_meta(addr);
540                 if (!meta)
541                         continue;
542
543                 meta->syscall_nr = i;
544                 syscalls_metadata[i] = meta;
545         }
546 }
547
548 #ifdef CONFIG_PERF_EVENTS
549
550 static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
551 static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
552 static int sys_perf_refcount_enter;
553 static int sys_perf_refcount_exit;
554
555 static int perf_call_bpf_enter(struct trace_event_call *call, struct pt_regs *regs,
556                                struct syscall_metadata *sys_data,
557                                struct syscall_trace_enter *rec)
558 {
559         struct syscall_tp_t {
560                 unsigned long long regs;
561                 unsigned long syscall_nr;
562                 unsigned long args[SYSCALL_DEFINE_MAXARGS];
563         } param;
564         int i;
565
566         *(struct pt_regs **)&param = regs;
567         param.syscall_nr = rec->nr;
568         for (i = 0; i < sys_data->nb_args; i++)
569                 param.args[i] = rec->args[i];
570         return trace_call_bpf(call, &param);
571 }
572
573 static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
574 {
575         struct syscall_metadata *sys_data;
576         struct syscall_trace_enter *rec;
577         struct hlist_head *head;
578         unsigned long args[6];
579         bool valid_prog_array;
580         int syscall_nr;
581         int rctx;
582         int size;
583
584         syscall_nr = trace_get_syscall_nr(current, regs);
585         if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
586                 return;
587         if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
588                 return;
589
590         sys_data = syscall_nr_to_meta(syscall_nr);
591         if (!sys_data)
592                 return;
593
594         head = this_cpu_ptr(sys_data->enter_event->perf_events);
595         valid_prog_array = bpf_prog_array_valid(sys_data->enter_event);
596         if (!valid_prog_array && hlist_empty(head))
597                 return;
598
599         /* get the size after alignment with the u32 buffer size field */
600         size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
601         size = ALIGN(size + sizeof(u32), sizeof(u64));
602         size -= sizeof(u32);
603
604         rec = perf_trace_buf_alloc(size, NULL, &rctx);
605         if (!rec)
606                 return;
607
608         rec->nr = syscall_nr;
609         syscall_get_arguments(current, regs, args);
610         memcpy(&rec->args, args, sizeof(unsigned long) * sys_data->nb_args);
611
612         if ((valid_prog_array &&
613              !perf_call_bpf_enter(sys_data->enter_event, regs, sys_data, rec)) ||
614             hlist_empty(head)) {
615                 perf_swevent_put_recursion_context(rctx);
616                 return;
617         }
618
619         perf_trace_buf_submit(rec, size, rctx,
620                               sys_data->enter_event->event.type, 1, regs,
621                               head, NULL);
622 }
623
624 static int perf_sysenter_enable(struct trace_event_call *call)
625 {
626         int ret = 0;
627         int num;
628
629         num = ((struct syscall_metadata *)call->data)->syscall_nr;
630
631         mutex_lock(&syscall_trace_lock);
632         if (!sys_perf_refcount_enter)
633                 ret = register_trace_sys_enter(perf_syscall_enter, NULL);
634         if (ret) {
635                 pr_info("event trace: Could not activate syscall entry trace point");
636         } else {
637                 set_bit(num, enabled_perf_enter_syscalls);
638                 sys_perf_refcount_enter++;
639         }
640         mutex_unlock(&syscall_trace_lock);
641         return ret;
642 }
643
644 static void perf_sysenter_disable(struct trace_event_call *call)
645 {
646         int num;
647
648         num = ((struct syscall_metadata *)call->data)->syscall_nr;
649
650         mutex_lock(&syscall_trace_lock);
651         sys_perf_refcount_enter--;
652         clear_bit(num, enabled_perf_enter_syscalls);
653         if (!sys_perf_refcount_enter)
654                 unregister_trace_sys_enter(perf_syscall_enter, NULL);
655         mutex_unlock(&syscall_trace_lock);
656 }
657
658 static int perf_call_bpf_exit(struct trace_event_call *call, struct pt_regs *regs,
659                               struct syscall_trace_exit *rec)
660 {
661         struct syscall_tp_t {
662                 unsigned long long regs;
663                 unsigned long syscall_nr;
664                 unsigned long ret;
665         } param;
666
667         *(struct pt_regs **)&param = regs;
668         param.syscall_nr = rec->nr;
669         param.ret = rec->ret;
670         return trace_call_bpf(call, &param);
671 }
672
673 static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
674 {
675         struct syscall_metadata *sys_data;
676         struct syscall_trace_exit *rec;
677         struct hlist_head *head;
678         bool valid_prog_array;
679         int syscall_nr;
680         int rctx;
681         int size;
682
683         syscall_nr = trace_get_syscall_nr(current, regs);
684         if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
685                 return;
686         if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
687                 return;
688
689         sys_data = syscall_nr_to_meta(syscall_nr);
690         if (!sys_data)
691                 return;
692
693         head = this_cpu_ptr(sys_data->exit_event->perf_events);
694         valid_prog_array = bpf_prog_array_valid(sys_data->exit_event);
695         if (!valid_prog_array && hlist_empty(head))
696                 return;
697
698         /* We can probably do that at build time */
699         size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
700         size -= sizeof(u32);
701
702         rec = perf_trace_buf_alloc(size, NULL, &rctx);
703         if (!rec)
704                 return;
705
706         rec->nr = syscall_nr;
707         rec->ret = syscall_get_return_value(current, regs);
708
709         if ((valid_prog_array &&
710              !perf_call_bpf_exit(sys_data->exit_event, regs, rec)) ||
711             hlist_empty(head)) {
712                 perf_swevent_put_recursion_context(rctx);
713                 return;
714         }
715
716         perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type,
717                               1, regs, head, NULL);
718 }
719
720 static int perf_sysexit_enable(struct trace_event_call *call)
721 {
722         int ret = 0;
723         int num;
724
725         num = ((struct syscall_metadata *)call->data)->syscall_nr;
726
727         mutex_lock(&syscall_trace_lock);
728         if (!sys_perf_refcount_exit)
729                 ret = register_trace_sys_exit(perf_syscall_exit, NULL);
730         if (ret) {
731                 pr_info("event trace: Could not activate syscall exit trace point");
732         } else {
733                 set_bit(num, enabled_perf_exit_syscalls);
734                 sys_perf_refcount_exit++;
735         }
736         mutex_unlock(&syscall_trace_lock);
737         return ret;
738 }
739
740 static void perf_sysexit_disable(struct trace_event_call *call)
741 {
742         int num;
743
744         num = ((struct syscall_metadata *)call->data)->syscall_nr;
745
746         mutex_lock(&syscall_trace_lock);
747         sys_perf_refcount_exit--;
748         clear_bit(num, enabled_perf_exit_syscalls);
749         if (!sys_perf_refcount_exit)
750                 unregister_trace_sys_exit(perf_syscall_exit, NULL);
751         mutex_unlock(&syscall_trace_lock);
752 }
753
754 #endif /* CONFIG_PERF_EVENTS */
755
756 static int syscall_enter_register(struct trace_event_call *event,
757                                  enum trace_reg type, void *data)
758 {
759         struct trace_event_file *file = data;
760
761         switch (type) {
762         case TRACE_REG_REGISTER:
763                 return reg_event_syscall_enter(file, event);
764         case TRACE_REG_UNREGISTER:
765                 unreg_event_syscall_enter(file, event);
766                 return 0;
767
768 #ifdef CONFIG_PERF_EVENTS
769         case TRACE_REG_PERF_REGISTER:
770                 return perf_sysenter_enable(event);
771         case TRACE_REG_PERF_UNREGISTER:
772                 perf_sysenter_disable(event);
773                 return 0;
774         case TRACE_REG_PERF_OPEN:
775         case TRACE_REG_PERF_CLOSE:
776         case TRACE_REG_PERF_ADD:
777         case TRACE_REG_PERF_DEL:
778                 return 0;
779 #endif
780         }
781         return 0;
782 }
783
784 static int syscall_exit_register(struct trace_event_call *event,
785                                  enum trace_reg type, void *data)
786 {
787         struct trace_event_file *file = data;
788
789         switch (type) {
790         case TRACE_REG_REGISTER:
791                 return reg_event_syscall_exit(file, event);
792         case TRACE_REG_UNREGISTER:
793                 unreg_event_syscall_exit(file, event);
794                 return 0;
795
796 #ifdef CONFIG_PERF_EVENTS
797         case TRACE_REG_PERF_REGISTER:
798                 return perf_sysexit_enable(event);
799         case TRACE_REG_PERF_UNREGISTER:
800                 perf_sysexit_disable(event);
801                 return 0;
802         case TRACE_REG_PERF_OPEN:
803         case TRACE_REG_PERF_CLOSE:
804         case TRACE_REG_PERF_ADD:
805         case TRACE_REG_PERF_DEL:
806                 return 0;
807 #endif
808         }
809         return 0;
810 }