1 #include <trace/syscall.h>
2 #include <trace/events/syscalls.h>
3 #include <linux/syscalls.h>
4 #include <linux/slab.h>
5 #include <linux/kernel.h>
6 #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
7 #include <linux/ftrace.h>
8 #include <linux/perf_event.h>
9 #include <asm/syscall.h>
11 #include "trace_output.h"
14 static DEFINE_MUTEX(syscall_trace_lock);
16 static int syscall_enter_register(struct ftrace_event_call *event,
17 enum trace_reg type, void *data);
18 static int syscall_exit_register(struct ftrace_event_call *event,
19 enum trace_reg type, void *data);
21 static struct list_head *
22 syscall_get_enter_fields(struct ftrace_event_call *call)
24 struct syscall_metadata *entry = call->data;
26 return &entry->enter_fields;
29 extern struct syscall_metadata *__start_syscalls_metadata[];
30 extern struct syscall_metadata *__stop_syscalls_metadata[];
32 static struct syscall_metadata **syscalls_metadata;
34 #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
35 static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
38 * Only compare after the "sys" prefix. Archs that use
39 * syscall wrappers may have syscalls symbols aliases prefixed
40 * with ".SyS" or ".sys" instead of "sys", leading to an unwanted
43 return !strcmp(sym + 3, name + 3);
47 #ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
49 * Some architectures that allow for 32bit applications
50 * to run on a 64bit kernel, do not map the syscalls for
51 * the 32bit tasks the same as they do for 64bit tasks.
55 * In such a case, instead of reporting the wrong syscalls,
58 * For an arch to ignore the compat syscalls it needs to
59 * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as
60 * define the function arch_trace_is_compat_syscall() to let
61 * the tracing system know that it should ignore it.
64 trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
66 if (unlikely(arch_trace_is_compat_syscall(regs)))
69 return syscall_get_nr(task, regs);
73 trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
75 return syscall_get_nr(task, regs);
77 #endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */
79 static __init struct syscall_metadata *
80 find_syscall_meta(unsigned long syscall)
82 struct syscall_metadata **start;
83 struct syscall_metadata **stop;
84 char str[KSYM_SYMBOL_LEN];
87 start = __start_syscalls_metadata;
88 stop = __stop_syscalls_metadata;
89 kallsyms_lookup(syscall, NULL, NULL, NULL, str);
91 if (arch_syscall_match_sym_name(str, "sys_ni_syscall"))
94 for ( ; start < stop; start++) {
95 if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
101 static struct syscall_metadata *syscall_nr_to_meta(int nr)
103 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
106 return syscalls_metadata[nr];
109 static enum print_line_t
110 print_syscall_enter(struct trace_iterator *iter, int flags,
111 struct trace_event *event)
113 struct trace_seq *s = &iter->seq;
114 struct trace_entry *ent = iter->ent;
115 struct syscall_trace_enter *trace;
116 struct syscall_metadata *entry;
119 trace = (typeof(trace))ent;
121 entry = syscall_nr_to_meta(syscall);
126 if (entry->enter_event->event.type != ent->type) {
131 ret = trace_seq_printf(s, "%s(", entry->name);
133 return TRACE_TYPE_PARTIAL_LINE;
135 for (i = 0; i < entry->nb_args; i++) {
136 /* parameter types */
137 if (trace_flags & TRACE_ITER_VERBOSE) {
138 ret = trace_seq_printf(s, "%s ", entry->types[i]);
140 return TRACE_TYPE_PARTIAL_LINE;
142 /* parameter values */
143 ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
145 i == entry->nb_args - 1 ? "" : ", ");
147 return TRACE_TYPE_PARTIAL_LINE;
150 ret = trace_seq_putc(s, ')');
152 return TRACE_TYPE_PARTIAL_LINE;
155 ret = trace_seq_putc(s, '\n');
157 return TRACE_TYPE_PARTIAL_LINE;
159 return TRACE_TYPE_HANDLED;
162 static enum print_line_t
163 print_syscall_exit(struct trace_iterator *iter, int flags,
164 struct trace_event *event)
166 struct trace_seq *s = &iter->seq;
167 struct trace_entry *ent = iter->ent;
168 struct syscall_trace_exit *trace;
170 struct syscall_metadata *entry;
173 trace = (typeof(trace))ent;
175 entry = syscall_nr_to_meta(syscall);
178 trace_seq_printf(s, "\n");
179 return TRACE_TYPE_HANDLED;
182 if (entry->exit_event->event.type != ent->type) {
184 return TRACE_TYPE_UNHANDLED;
187 ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
190 return TRACE_TYPE_PARTIAL_LINE;
192 return TRACE_TYPE_HANDLED;
195 extern char *__bad_type_size(void);
197 #define SYSCALL_FIELD(type, name) \
198 sizeof(type) != sizeof(trace.name) ? \
199 __bad_type_size() : \
200 #type, #name, offsetof(typeof(trace), name), \
201 sizeof(trace.name), is_signed_type(type)
204 int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
209 /* When len=0, we just calculate the needed length */
210 #define LEN_OR_ZERO (len ? len - pos : 0)
212 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
213 for (i = 0; i < entry->nb_args; i++) {
214 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
215 entry->args[i], sizeof(unsigned long),
216 i == entry->nb_args - 1 ? "" : ", ");
218 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
220 for (i = 0; i < entry->nb_args; i++) {
221 pos += snprintf(buf + pos, LEN_OR_ZERO,
222 ", ((unsigned long)(REC->%s))", entry->args[i]);
227 /* return the length of print_fmt */
231 static int set_syscall_print_fmt(struct ftrace_event_call *call)
235 struct syscall_metadata *entry = call->data;
237 if (entry->enter_event != call) {
238 call->print_fmt = "\"0x%lx\", REC->ret";
242 /* First: called with 0 length to calculate the needed length */
243 len = __set_enter_print_fmt(entry, NULL, 0);
245 print_fmt = kmalloc(len + 1, GFP_KERNEL);
249 /* Second: actually write the @print_fmt */
250 __set_enter_print_fmt(entry, print_fmt, len + 1);
251 call->print_fmt = print_fmt;
256 static void free_syscall_print_fmt(struct ftrace_event_call *call)
258 struct syscall_metadata *entry = call->data;
260 if (entry->enter_event == call)
261 kfree(call->print_fmt);
264 static int __init syscall_enter_define_fields(struct ftrace_event_call *call)
266 struct syscall_trace_enter trace;
267 struct syscall_metadata *meta = call->data;
270 int offset = offsetof(typeof(trace), args);
272 ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
276 for (i = 0; i < meta->nb_args; i++) {
277 ret = trace_define_field(call, meta->types[i],
278 meta->args[i], offset,
279 sizeof(unsigned long), 0,
281 offset += sizeof(unsigned long);
287 static int __init syscall_exit_define_fields(struct ftrace_event_call *call)
289 struct syscall_trace_exit trace;
292 ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
296 ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
302 static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
304 struct trace_array *tr = data;
305 struct syscall_trace_enter *entry;
306 struct syscall_metadata *sys_data;
307 struct ring_buffer_event *event;
308 struct ring_buffer *buffer;
309 unsigned long irq_flags;
314 syscall_nr = trace_get_syscall_nr(current, regs);
317 if (!test_bit(syscall_nr, tr->enabled_enter_syscalls))
320 sys_data = syscall_nr_to_meta(syscall_nr);
324 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
326 local_save_flags(irq_flags);
327 pc = preempt_count();
329 buffer = tr->trace_buffer.buffer;
330 event = trace_buffer_lock_reserve(buffer,
331 sys_data->enter_event->event.type, size, irq_flags, pc);
335 entry = ring_buffer_event_data(event);
336 entry->nr = syscall_nr;
337 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
339 if (!filter_current_check_discard(buffer, sys_data->enter_event,
341 trace_current_buffer_unlock_commit(buffer, event,
345 static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
347 struct trace_array *tr = data;
348 struct syscall_trace_exit *entry;
349 struct syscall_metadata *sys_data;
350 struct ring_buffer_event *event;
351 struct ring_buffer *buffer;
352 unsigned long irq_flags;
356 syscall_nr = trace_get_syscall_nr(current, regs);
359 if (!test_bit(syscall_nr, tr->enabled_exit_syscalls))
362 sys_data = syscall_nr_to_meta(syscall_nr);
366 local_save_flags(irq_flags);
367 pc = preempt_count();
369 buffer = tr->trace_buffer.buffer;
370 event = trace_buffer_lock_reserve(buffer,
371 sys_data->exit_event->event.type, sizeof(*entry),
376 entry = ring_buffer_event_data(event);
377 entry->nr = syscall_nr;
378 entry->ret = syscall_get_return_value(current, regs);
380 if (!filter_current_check_discard(buffer, sys_data->exit_event,
382 trace_current_buffer_unlock_commit(buffer, event,
386 static int reg_event_syscall_enter(struct ftrace_event_file *file,
387 struct ftrace_event_call *call)
389 struct trace_array *tr = file->tr;
393 num = ((struct syscall_metadata *)call->data)->syscall_nr;
394 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
396 mutex_lock(&syscall_trace_lock);
397 if (!tr->sys_refcount_enter)
398 ret = register_trace_sys_enter(ftrace_syscall_enter, tr);
400 set_bit(num, tr->enabled_enter_syscalls);
401 tr->sys_refcount_enter++;
403 mutex_unlock(&syscall_trace_lock);
407 static void unreg_event_syscall_enter(struct ftrace_event_file *file,
408 struct ftrace_event_call *call)
410 struct trace_array *tr = file->tr;
413 num = ((struct syscall_metadata *)call->data)->syscall_nr;
414 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
416 mutex_lock(&syscall_trace_lock);
417 tr->sys_refcount_enter--;
418 clear_bit(num, tr->enabled_enter_syscalls);
419 if (!tr->sys_refcount_enter)
420 unregister_trace_sys_enter(ftrace_syscall_enter, tr);
421 mutex_unlock(&syscall_trace_lock);
424 static int reg_event_syscall_exit(struct ftrace_event_file *file,
425 struct ftrace_event_call *call)
427 struct trace_array *tr = file->tr;
431 num = ((struct syscall_metadata *)call->data)->syscall_nr;
432 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
434 mutex_lock(&syscall_trace_lock);
435 if (!tr->sys_refcount_exit)
436 ret = register_trace_sys_exit(ftrace_syscall_exit, tr);
438 set_bit(num, tr->enabled_exit_syscalls);
439 tr->sys_refcount_exit++;
441 mutex_unlock(&syscall_trace_lock);
445 static void unreg_event_syscall_exit(struct ftrace_event_file *file,
446 struct ftrace_event_call *call)
448 struct trace_array *tr = file->tr;
451 num = ((struct syscall_metadata *)call->data)->syscall_nr;
452 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
454 mutex_lock(&syscall_trace_lock);
455 tr->sys_refcount_exit--;
456 clear_bit(num, tr->enabled_exit_syscalls);
457 if (!tr->sys_refcount_exit)
458 unregister_trace_sys_exit(ftrace_syscall_exit, tr);
459 mutex_unlock(&syscall_trace_lock);
462 static int init_syscall_trace(struct ftrace_event_call *call)
467 num = ((struct syscall_metadata *)call->data)->syscall_nr;
468 if (num < 0 || num >= NR_syscalls) {
469 pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
470 ((struct syscall_metadata *)call->data)->name);
474 if (set_syscall_print_fmt(call) < 0)
477 id = trace_event_raw_init(call);
480 free_syscall_print_fmt(call);
487 struct trace_event_functions enter_syscall_print_funcs = {
488 .trace = print_syscall_enter,
491 struct trace_event_functions exit_syscall_print_funcs = {
492 .trace = print_syscall_exit,
495 struct ftrace_event_class __refdata event_class_syscall_enter = {
496 .system = "syscalls",
497 .reg = syscall_enter_register,
498 .define_fields = syscall_enter_define_fields,
499 .get_fields = syscall_get_enter_fields,
500 .raw_init = init_syscall_trace,
503 struct ftrace_event_class __refdata event_class_syscall_exit = {
504 .system = "syscalls",
505 .reg = syscall_exit_register,
506 .define_fields = syscall_exit_define_fields,
507 .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
508 .raw_init = init_syscall_trace,
511 unsigned long __init __weak arch_syscall_addr(int nr)
513 return (unsigned long)sys_call_table[nr];
516 static int __init init_ftrace_syscalls(void)
518 struct syscall_metadata *meta;
522 syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata),
524 if (!syscalls_metadata) {
529 for (i = 0; i < NR_syscalls; i++) {
530 addr = arch_syscall_addr(i);
531 meta = find_syscall_meta(addr);
535 meta->syscall_nr = i;
536 syscalls_metadata[i] = meta;
541 early_initcall(init_ftrace_syscalls);
543 #ifdef CONFIG_PERF_EVENTS
545 static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
546 static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
547 static int sys_perf_refcount_enter;
548 static int sys_perf_refcount_exit;
550 static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
552 struct syscall_metadata *sys_data;
553 struct syscall_trace_enter *rec;
554 struct hlist_head *head;
559 syscall_nr = trace_get_syscall_nr(current, regs);
562 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
565 sys_data = syscall_nr_to_meta(syscall_nr);
569 /* get the size after alignment with the u32 buffer size field */
570 size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
571 size = ALIGN(size + sizeof(u32), sizeof(u64));
574 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
575 "perf buffer not large enough"))
578 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
579 sys_data->enter_event->event.type, regs, &rctx);
583 rec->nr = syscall_nr;
584 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
585 (unsigned long *)&rec->args);
587 head = this_cpu_ptr(sys_data->enter_event->perf_events);
588 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
591 static int perf_sysenter_enable(struct ftrace_event_call *call)
596 num = ((struct syscall_metadata *)call->data)->syscall_nr;
598 mutex_lock(&syscall_trace_lock);
599 if (!sys_perf_refcount_enter)
600 ret = register_trace_sys_enter(perf_syscall_enter, NULL);
602 pr_info("event trace: Could not activate"
603 "syscall entry trace point");
605 set_bit(num, enabled_perf_enter_syscalls);
606 sys_perf_refcount_enter++;
608 mutex_unlock(&syscall_trace_lock);
612 static void perf_sysenter_disable(struct ftrace_event_call *call)
616 num = ((struct syscall_metadata *)call->data)->syscall_nr;
618 mutex_lock(&syscall_trace_lock);
619 sys_perf_refcount_enter--;
620 clear_bit(num, enabled_perf_enter_syscalls);
621 if (!sys_perf_refcount_enter)
622 unregister_trace_sys_enter(perf_syscall_enter, NULL);
623 mutex_unlock(&syscall_trace_lock);
626 static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
628 struct syscall_metadata *sys_data;
629 struct syscall_trace_exit *rec;
630 struct hlist_head *head;
635 syscall_nr = trace_get_syscall_nr(current, regs);
638 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
641 sys_data = syscall_nr_to_meta(syscall_nr);
645 /* We can probably do that at build time */
646 size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
650 * Impossible, but be paranoid with the future
651 * How to put this check outside runtime?
653 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
654 "exit event has grown above perf buffer size"))
657 rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
658 sys_data->exit_event->event.type, regs, &rctx);
662 rec->nr = syscall_nr;
663 rec->ret = syscall_get_return_value(current, regs);
665 head = this_cpu_ptr(sys_data->exit_event->perf_events);
666 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
669 static int perf_sysexit_enable(struct ftrace_event_call *call)
674 num = ((struct syscall_metadata *)call->data)->syscall_nr;
676 mutex_lock(&syscall_trace_lock);
677 if (!sys_perf_refcount_exit)
678 ret = register_trace_sys_exit(perf_syscall_exit, NULL);
680 pr_info("event trace: Could not activate"
681 "syscall exit trace point");
683 set_bit(num, enabled_perf_exit_syscalls);
684 sys_perf_refcount_exit++;
686 mutex_unlock(&syscall_trace_lock);
690 static void perf_sysexit_disable(struct ftrace_event_call *call)
694 num = ((struct syscall_metadata *)call->data)->syscall_nr;
696 mutex_lock(&syscall_trace_lock);
697 sys_perf_refcount_exit--;
698 clear_bit(num, enabled_perf_exit_syscalls);
699 if (!sys_perf_refcount_exit)
700 unregister_trace_sys_exit(perf_syscall_exit, NULL);
701 mutex_unlock(&syscall_trace_lock);
704 #endif /* CONFIG_PERF_EVENTS */
706 static int syscall_enter_register(struct ftrace_event_call *event,
707 enum trace_reg type, void *data)
709 struct ftrace_event_file *file = data;
712 case TRACE_REG_REGISTER:
713 return reg_event_syscall_enter(file, event);
714 case TRACE_REG_UNREGISTER:
715 unreg_event_syscall_enter(file, event);
718 #ifdef CONFIG_PERF_EVENTS
719 case TRACE_REG_PERF_REGISTER:
720 return perf_sysenter_enable(event);
721 case TRACE_REG_PERF_UNREGISTER:
722 perf_sysenter_disable(event);
724 case TRACE_REG_PERF_OPEN:
725 case TRACE_REG_PERF_CLOSE:
726 case TRACE_REG_PERF_ADD:
727 case TRACE_REG_PERF_DEL:
734 static int syscall_exit_register(struct ftrace_event_call *event,
735 enum trace_reg type, void *data)
737 struct ftrace_event_file *file = data;
740 case TRACE_REG_REGISTER:
741 return reg_event_syscall_exit(file, event);
742 case TRACE_REG_UNREGISTER:
743 unreg_event_syscall_exit(file, event);
746 #ifdef CONFIG_PERF_EVENTS
747 case TRACE_REG_PERF_REGISTER:
748 return perf_sysexit_enable(event);
749 case TRACE_REG_PERF_UNREGISTER:
750 perf_sysexit_disable(event);
752 case TRACE_REG_PERF_OPEN:
753 case TRACE_REG_PERF_CLOSE:
754 case TRACE_REG_PERF_ADD:
755 case TRACE_REG_PERF_DEL: