1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/panic_notifier.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
52 #include <asm/setup.h> /* COMMAND_LINE_SIZE */
55 #include "trace_output.h"
58 * On boot up, the ring buffer is set to the minimum size, so that
59 * we do not waste memory on systems that are not using tracing.
61 bool ring_buffer_expanded;
63 #ifdef CONFIG_FTRACE_STARTUP_TEST
65 * We need to change this state when a selftest is running.
66 * A selftest will lurk into the ring-buffer to count the
67 * entries inserted during the selftest although some concurrent
68 * insertions into the ring-buffer such as trace_printk could occurred
69 * at the same time, giving false positive or negative results.
71 static bool __read_mostly tracing_selftest_running;
74 * If boot-time tracing including tracers/events via kernel cmdline
75 * is running, we do not want to run SELFTEST.
77 bool __read_mostly tracing_selftest_disabled;
79 void __init disable_tracing_selftest(const char *reason)
81 if (!tracing_selftest_disabled) {
82 tracing_selftest_disabled = true;
83 pr_info("Ftrace startup test is disabled due to %s\n", reason);
87 #define tracing_selftest_running 0
88 #define tracing_selftest_disabled 0
91 /* Pipe tracepoints to printk */
92 static struct trace_iterator *tracepoint_print_iter;
93 int tracepoint_printk;
94 static bool tracepoint_printk_stop_on_boot __initdata;
95 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
97 /* For tracers that don't implement custom flags */
98 static struct tracer_opt dummy_tracer_opt[] = {
103 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
109 * To prevent the comm cache from being overwritten when no
110 * tracing is active, only save the comm when a trace event
113 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
116 * Kill all tracing for good (never come back).
117 * It is initialized to 1 but will turn to zero if the initialization
118 * of the tracer is successful. But that is the only place that sets
121 static int tracing_disabled = 1;
123 cpumask_var_t __read_mostly tracing_buffer_mask;
126 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
128 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
129 * is set, then ftrace_dump is called. This will output the contents
130 * of the ftrace buffers to the console. This is very useful for
131 * capturing traces that lead to crashes and outputing it to a
134 * It is default off, but you can enable it with either specifying
135 * "ftrace_dump_on_oops" in the kernel command line, or setting
136 * /proc/sys/kernel/ftrace_dump_on_oops
137 * Set 1 if you want to dump buffers of all CPUs
138 * Set 2 if you want to dump the buffer of the CPU that triggered oops
141 enum ftrace_dump_mode ftrace_dump_on_oops;
143 /* When set, tracing will stop when a WARN*() is hit */
144 int __disable_trace_on_warning;
146 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
147 /* Map of enums to their values, for "eval_map" file */
148 struct trace_eval_map_head {
150 unsigned long length;
153 union trace_eval_map_item;
155 struct trace_eval_map_tail {
157 * "end" is first and points to NULL as it must be different
158 * than "mod" or "eval_string"
160 union trace_eval_map_item *next;
161 const char *end; /* points to NULL */
164 static DEFINE_MUTEX(trace_eval_mutex);
167 * The trace_eval_maps are saved in an array with two extra elements,
168 * one at the beginning, and one at the end. The beginning item contains
169 * the count of the saved maps (head.length), and the module they
170 * belong to if not built in (head.mod). The ending item contains a
171 * pointer to the next array of saved eval_map items.
173 union trace_eval_map_item {
174 struct trace_eval_map map;
175 struct trace_eval_map_head head;
176 struct trace_eval_map_tail tail;
179 static union trace_eval_map_item *trace_eval_maps;
180 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
182 int tracing_set_tracer(struct trace_array *tr, const char *buf);
183 static void ftrace_trace_userstack(struct trace_array *tr,
184 struct trace_buffer *buffer,
185 unsigned int trace_ctx);
187 #define MAX_TRACER_SIZE 100
188 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
189 static char *default_bootup_tracer;
191 static bool allocate_snapshot;
192 static bool snapshot_at_boot;
194 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata;
195 static int boot_instance_index;
197 static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
198 static int boot_snapshot_index;
200 static int __init set_cmdline_ftrace(char *str)
202 strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
203 default_bootup_tracer = bootup_tracer_buf;
204 /* We are using ftrace early, expand it */
205 ring_buffer_expanded = true;
208 __setup("ftrace=", set_cmdline_ftrace);
210 static int __init set_ftrace_dump_on_oops(char *str)
212 if (*str++ != '=' || !*str || !strcmp("1", str)) {
213 ftrace_dump_on_oops = DUMP_ALL;
217 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
218 ftrace_dump_on_oops = DUMP_ORIG;
224 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
226 static int __init stop_trace_on_warning(char *str)
228 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
229 __disable_trace_on_warning = 1;
232 __setup("traceoff_on_warning", stop_trace_on_warning);
234 static int __init boot_alloc_snapshot(char *str)
236 char *slot = boot_snapshot_info + boot_snapshot_index;
237 int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
242 if (strlen(str) >= left)
245 ret = snprintf(slot, left, "%s\t", str);
246 boot_snapshot_index += ret;
248 allocate_snapshot = true;
249 /* We also need the main ring buffer expanded */
250 ring_buffer_expanded = true;
254 __setup("alloc_snapshot", boot_alloc_snapshot);
257 static int __init boot_snapshot(char *str)
259 snapshot_at_boot = true;
260 boot_alloc_snapshot(str);
263 __setup("ftrace_boot_snapshot", boot_snapshot);
266 static int __init boot_instance(char *str)
268 char *slot = boot_instance_info + boot_instance_index;
269 int left = sizeof(boot_instance_info) - boot_instance_index;
272 if (strlen(str) >= left)
275 ret = snprintf(slot, left, "%s\t", str);
276 boot_instance_index += ret;
280 __setup("trace_instance=", boot_instance);
283 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
285 static int __init set_trace_boot_options(char *str)
287 strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
290 __setup("trace_options=", set_trace_boot_options);
292 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
293 static char *trace_boot_clock __initdata;
295 static int __init set_trace_boot_clock(char *str)
297 strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
298 trace_boot_clock = trace_boot_clock_buf;
301 __setup("trace_clock=", set_trace_boot_clock);
303 static int __init set_tracepoint_printk(char *str)
305 /* Ignore the "tp_printk_stop_on_boot" param */
309 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
310 tracepoint_printk = 1;
313 __setup("tp_printk", set_tracepoint_printk);
315 static int __init set_tracepoint_printk_stop(char *str)
317 tracepoint_printk_stop_on_boot = true;
320 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
322 unsigned long long ns2usecs(u64 nsec)
330 trace_process_export(struct trace_export *export,
331 struct ring_buffer_event *event, int flag)
333 struct trace_entry *entry;
334 unsigned int size = 0;
336 if (export->flags & flag) {
337 entry = ring_buffer_event_data(event);
338 size = ring_buffer_event_length(event);
339 export->write(export, entry, size);
343 static DEFINE_MUTEX(ftrace_export_lock);
345 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
347 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
348 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
349 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
351 static inline void ftrace_exports_enable(struct trace_export *export)
353 if (export->flags & TRACE_EXPORT_FUNCTION)
354 static_branch_inc(&trace_function_exports_enabled);
356 if (export->flags & TRACE_EXPORT_EVENT)
357 static_branch_inc(&trace_event_exports_enabled);
359 if (export->flags & TRACE_EXPORT_MARKER)
360 static_branch_inc(&trace_marker_exports_enabled);
363 static inline void ftrace_exports_disable(struct trace_export *export)
365 if (export->flags & TRACE_EXPORT_FUNCTION)
366 static_branch_dec(&trace_function_exports_enabled);
368 if (export->flags & TRACE_EXPORT_EVENT)
369 static_branch_dec(&trace_event_exports_enabled);
371 if (export->flags & TRACE_EXPORT_MARKER)
372 static_branch_dec(&trace_marker_exports_enabled);
375 static void ftrace_exports(struct ring_buffer_event *event, int flag)
377 struct trace_export *export;
379 preempt_disable_notrace();
381 export = rcu_dereference_raw_check(ftrace_exports_list);
383 trace_process_export(export, event, flag);
384 export = rcu_dereference_raw_check(export->next);
387 preempt_enable_notrace();
391 add_trace_export(struct trace_export **list, struct trace_export *export)
393 rcu_assign_pointer(export->next, *list);
395 * We are entering export into the list but another
396 * CPU might be walking that list. We need to make sure
397 * the export->next pointer is valid before another CPU sees
398 * the export pointer included into the list.
400 rcu_assign_pointer(*list, export);
404 rm_trace_export(struct trace_export **list, struct trace_export *export)
406 struct trace_export **p;
408 for (p = list; *p != NULL; p = &(*p)->next)
415 rcu_assign_pointer(*p, (*p)->next);
421 add_ftrace_export(struct trace_export **list, struct trace_export *export)
423 ftrace_exports_enable(export);
425 add_trace_export(list, export);
429 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
433 ret = rm_trace_export(list, export);
434 ftrace_exports_disable(export);
439 int register_ftrace_export(struct trace_export *export)
441 if (WARN_ON_ONCE(!export->write))
444 mutex_lock(&ftrace_export_lock);
446 add_ftrace_export(&ftrace_exports_list, export);
448 mutex_unlock(&ftrace_export_lock);
452 EXPORT_SYMBOL_GPL(register_ftrace_export);
454 int unregister_ftrace_export(struct trace_export *export)
458 mutex_lock(&ftrace_export_lock);
460 ret = rm_ftrace_export(&ftrace_exports_list, export);
462 mutex_unlock(&ftrace_export_lock);
466 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
468 /* trace_flags holds trace_options default values */
469 #define TRACE_DEFAULT_FLAGS \
470 (FUNCTION_DEFAULT_FLAGS | \
471 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
472 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
473 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
474 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
477 /* trace_options that are only supported by global_trace */
478 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
479 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
481 /* trace_flags that are default zero for instances */
482 #define ZEROED_TRACE_FLAGS \
483 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
486 * The global_trace is the descriptor that holds the top-level tracing
487 * buffers for the live tracing.
489 static struct trace_array global_trace = {
490 .trace_flags = TRACE_DEFAULT_FLAGS,
493 LIST_HEAD(ftrace_trace_arrays);
495 int trace_array_get(struct trace_array *this_tr)
497 struct trace_array *tr;
500 mutex_lock(&trace_types_lock);
501 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
508 mutex_unlock(&trace_types_lock);
513 static void __trace_array_put(struct trace_array *this_tr)
515 WARN_ON(!this_tr->ref);
520 * trace_array_put - Decrement the reference counter for this trace array.
521 * @this_tr : pointer to the trace array
523 * NOTE: Use this when we no longer need the trace array returned by
524 * trace_array_get_by_name(). This ensures the trace array can be later
528 void trace_array_put(struct trace_array *this_tr)
533 mutex_lock(&trace_types_lock);
534 __trace_array_put(this_tr);
535 mutex_unlock(&trace_types_lock);
537 EXPORT_SYMBOL_GPL(trace_array_put);
539 int tracing_check_open_get_tr(struct trace_array *tr)
543 ret = security_locked_down(LOCKDOWN_TRACEFS);
547 if (tracing_disabled)
550 if (tr && trace_array_get(tr) < 0)
556 int call_filter_check_discard(struct trace_event_call *call, void *rec,
557 struct trace_buffer *buffer,
558 struct ring_buffer_event *event)
560 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
561 !filter_match_preds(call->filter, rec)) {
562 __trace_event_discard_commit(buffer, event);
570 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
571 * @filtered_pids: The list of pids to check
572 * @search_pid: The PID to find in @filtered_pids
574 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
577 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
579 return trace_pid_list_is_set(filtered_pids, search_pid);
583 * trace_ignore_this_task - should a task be ignored for tracing
584 * @filtered_pids: The list of pids to check
585 * @filtered_no_pids: The list of pids not to be traced
586 * @task: The task that should be ignored if not filtered
588 * Checks if @task should be traced or not from @filtered_pids.
589 * Returns true if @task should *NOT* be traced.
590 * Returns false if @task should be traced.
593 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
594 struct trace_pid_list *filtered_no_pids,
595 struct task_struct *task)
598 * If filtered_no_pids is not empty, and the task's pid is listed
599 * in filtered_no_pids, then return true.
600 * Otherwise, if filtered_pids is empty, that means we can
601 * trace all tasks. If it has content, then only trace pids
602 * within filtered_pids.
605 return (filtered_pids &&
606 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
608 trace_find_filtered_pid(filtered_no_pids, task->pid));
612 * trace_filter_add_remove_task - Add or remove a task from a pid_list
613 * @pid_list: The list to modify
614 * @self: The current task for fork or NULL for exit
615 * @task: The task to add or remove
617 * If adding a task, if @self is defined, the task is only added if @self
618 * is also included in @pid_list. This happens on fork and tasks should
619 * only be added when the parent is listed. If @self is NULL, then the
620 * @task pid will be removed from the list, which would happen on exit
623 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
624 struct task_struct *self,
625 struct task_struct *task)
630 /* For forks, we only add if the forking task is listed */
632 if (!trace_find_filtered_pid(pid_list, self->pid))
636 /* "self" is set for forks, and NULL for exits */
638 trace_pid_list_set(pid_list, task->pid);
640 trace_pid_list_clear(pid_list, task->pid);
644 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
645 * @pid_list: The pid list to show
646 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
647 * @pos: The position of the file
649 * This is used by the seq_file "next" operation to iterate the pids
650 * listed in a trace_pid_list structure.
652 * Returns the pid+1 as we want to display pid of zero, but NULL would
653 * stop the iteration.
655 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
657 long pid = (unsigned long)v;
662 /* pid already is +1 of the actual previous bit */
663 if (trace_pid_list_next(pid_list, pid, &next) < 0)
668 /* Return pid + 1 to allow zero to be represented */
669 return (void *)(pid + 1);
673 * trace_pid_start - Used for seq_file to start reading pid lists
674 * @pid_list: The pid list to show
675 * @pos: The position of the file
677 * This is used by seq_file "start" operation to start the iteration
680 * Returns the pid+1 as we want to display pid of zero, but NULL would
681 * stop the iteration.
683 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
689 if (trace_pid_list_first(pid_list, &first) < 0)
694 /* Return pid + 1 so that zero can be the exit value */
695 for (pid++; pid && l < *pos;
696 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
702 * trace_pid_show - show the current pid in seq_file processing
703 * @m: The seq_file structure to write into
704 * @v: A void pointer of the pid (+1) value to display
706 * Can be directly used by seq_file operations to display the current
709 int trace_pid_show(struct seq_file *m, void *v)
711 unsigned long pid = (unsigned long)v - 1;
713 seq_printf(m, "%lu\n", pid);
717 /* 128 should be much more than enough */
718 #define PID_BUF_SIZE 127
720 int trace_pid_write(struct trace_pid_list *filtered_pids,
721 struct trace_pid_list **new_pid_list,
722 const char __user *ubuf, size_t cnt)
724 struct trace_pid_list *pid_list;
725 struct trace_parser parser;
733 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
737 * Always recreate a new array. The write is an all or nothing
738 * operation. Always create a new array when adding new pids by
739 * the user. If the operation fails, then the current list is
742 pid_list = trace_pid_list_alloc();
744 trace_parser_put(&parser);
749 /* copy the current bits to the new max */
750 ret = trace_pid_list_first(filtered_pids, &pid);
752 trace_pid_list_set(pid_list, pid);
753 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
763 ret = trace_get_user(&parser, ubuf, cnt, &pos);
771 if (!trace_parser_loaded(&parser))
775 if (kstrtoul(parser.buffer, 0, &val))
780 if (trace_pid_list_set(pid_list, pid) < 0) {
786 trace_parser_clear(&parser);
789 trace_parser_put(&parser);
792 trace_pid_list_free(pid_list);
797 /* Cleared the list of pids */
798 trace_pid_list_free(pid_list);
802 *new_pid_list = pid_list;
807 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
811 /* Early boot up does not have a buffer yet */
813 return trace_clock_local();
815 ts = ring_buffer_time_stamp(buf->buffer);
816 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
821 u64 ftrace_now(int cpu)
823 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
827 * tracing_is_enabled - Show if global_trace has been enabled
829 * Shows if the global trace has been enabled or not. It uses the
830 * mirror flag "buffer_disabled" to be used in fast paths such as for
831 * the irqsoff tracer. But it may be inaccurate due to races. If you
832 * need to know the accurate state, use tracing_is_on() which is a little
833 * slower, but accurate.
835 int tracing_is_enabled(void)
838 * For quick access (irqsoff uses this in fast path), just
839 * return the mirror variable of the state of the ring buffer.
840 * It's a little racy, but we don't really care.
843 return !global_trace.buffer_disabled;
847 * trace_buf_size is the size in bytes that is allocated
848 * for a buffer. Note, the number of bytes is always rounded
851 * This number is purposely set to a low number of 16384.
852 * If the dump on oops happens, it will be much appreciated
853 * to not have to wait for all that output. Anyway this can be
854 * boot time and run time configurable.
856 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
858 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
860 /* trace_types holds a link list of available tracers. */
861 static struct tracer *trace_types __read_mostly;
864 * trace_types_lock is used to protect the trace_types list.
866 DEFINE_MUTEX(trace_types_lock);
869 * serialize the access of the ring buffer
871 * ring buffer serializes readers, but it is low level protection.
872 * The validity of the events (which returns by ring_buffer_peek() ..etc)
873 * are not protected by ring buffer.
875 * The content of events may become garbage if we allow other process consumes
876 * these events concurrently:
877 * A) the page of the consumed events may become a normal page
878 * (not reader page) in ring buffer, and this page will be rewritten
879 * by events producer.
880 * B) The page of the consumed events may become a page for splice_read,
881 * and this page will be returned to system.
883 * These primitives allow multi process access to different cpu ring buffer
886 * These primitives don't distinguish read-only and read-consume access.
887 * Multi read-only access are also serialized.
891 static DECLARE_RWSEM(all_cpu_access_lock);
892 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
894 static inline void trace_access_lock(int cpu)
896 if (cpu == RING_BUFFER_ALL_CPUS) {
897 /* gain it for accessing the whole ring buffer. */
898 down_write(&all_cpu_access_lock);
900 /* gain it for accessing a cpu ring buffer. */
902 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
903 down_read(&all_cpu_access_lock);
905 /* Secondly block other access to this @cpu ring buffer. */
906 mutex_lock(&per_cpu(cpu_access_lock, cpu));
910 static inline void trace_access_unlock(int cpu)
912 if (cpu == RING_BUFFER_ALL_CPUS) {
913 up_write(&all_cpu_access_lock);
915 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
916 up_read(&all_cpu_access_lock);
920 static inline void trace_access_lock_init(void)
924 for_each_possible_cpu(cpu)
925 mutex_init(&per_cpu(cpu_access_lock, cpu));
930 static DEFINE_MUTEX(access_lock);
932 static inline void trace_access_lock(int cpu)
935 mutex_lock(&access_lock);
938 static inline void trace_access_unlock(int cpu)
941 mutex_unlock(&access_lock);
944 static inline void trace_access_lock_init(void)
950 #ifdef CONFIG_STACKTRACE
951 static void __ftrace_trace_stack(struct trace_buffer *buffer,
952 unsigned int trace_ctx,
953 int skip, struct pt_regs *regs);
954 static inline void ftrace_trace_stack(struct trace_array *tr,
955 struct trace_buffer *buffer,
956 unsigned int trace_ctx,
957 int skip, struct pt_regs *regs);
960 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
961 unsigned int trace_ctx,
962 int skip, struct pt_regs *regs)
965 static inline void ftrace_trace_stack(struct trace_array *tr,
966 struct trace_buffer *buffer,
967 unsigned long trace_ctx,
968 int skip, struct pt_regs *regs)
974 static __always_inline void
975 trace_event_setup(struct ring_buffer_event *event,
976 int type, unsigned int trace_ctx)
978 struct trace_entry *ent = ring_buffer_event_data(event);
980 tracing_generic_entry_update(ent, type, trace_ctx);
983 static __always_inline struct ring_buffer_event *
984 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
987 unsigned int trace_ctx)
989 struct ring_buffer_event *event;
991 event = ring_buffer_lock_reserve(buffer, len);
993 trace_event_setup(event, type, trace_ctx);
998 void tracer_tracing_on(struct trace_array *tr)
1000 if (tr->array_buffer.buffer)
1001 ring_buffer_record_on(tr->array_buffer.buffer);
1003 * This flag is looked at when buffers haven't been allocated
1004 * yet, or by some tracers (like irqsoff), that just want to
1005 * know if the ring buffer has been disabled, but it can handle
1006 * races of where it gets disabled but we still do a record.
1007 * As the check is in the fast path of the tracers, it is more
1008 * important to be fast than accurate.
1010 tr->buffer_disabled = 0;
1011 /* Make the flag seen by readers */
1016 * tracing_on - enable tracing buffers
1018 * This function enables tracing buffers that may have been
1019 * disabled with tracing_off.
1021 void tracing_on(void)
1023 tracer_tracing_on(&global_trace);
1025 EXPORT_SYMBOL_GPL(tracing_on);
1028 static __always_inline void
1029 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
1031 __this_cpu_write(trace_taskinfo_save, true);
1033 /* If this is the temp buffer, we need to commit fully */
1034 if (this_cpu_read(trace_buffered_event) == event) {
1035 /* Length is in event->array[0] */
1036 ring_buffer_write(buffer, event->array[0], &event->array[1]);
1037 /* Release the temp buffer */
1038 this_cpu_dec(trace_buffered_event_cnt);
1039 /* ring_buffer_unlock_commit() enables preemption */
1040 preempt_enable_notrace();
1042 ring_buffer_unlock_commit(buffer);
1045 int __trace_array_puts(struct trace_array *tr, unsigned long ip,
1046 const char *str, int size)
1048 struct ring_buffer_event *event;
1049 struct trace_buffer *buffer;
1050 struct print_entry *entry;
1051 unsigned int trace_ctx;
1054 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1057 if (unlikely(tracing_selftest_running && tr == &global_trace))
1060 if (unlikely(tracing_disabled))
1063 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1065 trace_ctx = tracing_gen_ctx();
1066 buffer = tr->array_buffer.buffer;
1067 ring_buffer_nest_start(buffer);
1068 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1075 entry = ring_buffer_event_data(event);
1078 memcpy(&entry->buf, str, size);
1080 /* Add a newline if necessary */
1081 if (entry->buf[size - 1] != '\n') {
1082 entry->buf[size] = '\n';
1083 entry->buf[size + 1] = '\0';
1085 entry->buf[size] = '\0';
1087 __buffer_unlock_commit(buffer, event);
1088 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1090 ring_buffer_nest_end(buffer);
1093 EXPORT_SYMBOL_GPL(__trace_array_puts);
1096 * __trace_puts - write a constant string into the trace buffer.
1097 * @ip: The address of the caller
1098 * @str: The constant string to write
1099 * @size: The size of the string.
1101 int __trace_puts(unsigned long ip, const char *str, int size)
1103 return __trace_array_puts(&global_trace, ip, str, size);
1105 EXPORT_SYMBOL_GPL(__trace_puts);
1108 * __trace_bputs - write the pointer to a constant string into trace buffer
1109 * @ip: The address of the caller
1110 * @str: The constant string to write to the buffer to
1112 int __trace_bputs(unsigned long ip, const char *str)
1114 struct ring_buffer_event *event;
1115 struct trace_buffer *buffer;
1116 struct bputs_entry *entry;
1117 unsigned int trace_ctx;
1118 int size = sizeof(struct bputs_entry);
1121 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1124 if (unlikely(tracing_selftest_running || tracing_disabled))
1127 trace_ctx = tracing_gen_ctx();
1128 buffer = global_trace.array_buffer.buffer;
1130 ring_buffer_nest_start(buffer);
1131 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1136 entry = ring_buffer_event_data(event);
1140 __buffer_unlock_commit(buffer, event);
1141 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1145 ring_buffer_nest_end(buffer);
1148 EXPORT_SYMBOL_GPL(__trace_bputs);
1150 #ifdef CONFIG_TRACER_SNAPSHOT
1151 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1154 struct tracer *tracer = tr->current_trace;
1155 unsigned long flags;
1158 trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1159 trace_array_puts(tr, "*** snapshot is being ignored ***\n");
1163 if (!tr->allocated_snapshot) {
1164 trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
1165 trace_array_puts(tr, "*** stopping trace here! ***\n");
1166 tracer_tracing_off(tr);
1170 /* Note, snapshot can not be used when the tracer uses it */
1171 if (tracer->use_max_tr) {
1172 trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
1173 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1177 local_irq_save(flags);
1178 update_max_tr(tr, current, smp_processor_id(), cond_data);
1179 local_irq_restore(flags);
1182 void tracing_snapshot_instance(struct trace_array *tr)
1184 tracing_snapshot_instance_cond(tr, NULL);
1188 * tracing_snapshot - take a snapshot of the current buffer.
1190 * This causes a swap between the snapshot buffer and the current live
1191 * tracing buffer. You can use this to take snapshots of the live
1192 * trace when some condition is triggered, but continue to trace.
1194 * Note, make sure to allocate the snapshot with either
1195 * a tracing_snapshot_alloc(), or by doing it manually
1196 * with: echo 1 > /sys/kernel/tracing/snapshot
1198 * If the snapshot buffer is not allocated, it will stop tracing.
1199 * Basically making a permanent snapshot.
1201 void tracing_snapshot(void)
1203 struct trace_array *tr = &global_trace;
1205 tracing_snapshot_instance(tr);
1207 EXPORT_SYMBOL_GPL(tracing_snapshot);
1210 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1211 * @tr: The tracing instance to snapshot
1212 * @cond_data: The data to be tested conditionally, and possibly saved
1214 * This is the same as tracing_snapshot() except that the snapshot is
1215 * conditional - the snapshot will only happen if the
1216 * cond_snapshot.update() implementation receiving the cond_data
1217 * returns true, which means that the trace array's cond_snapshot
1218 * update() operation used the cond_data to determine whether the
1219 * snapshot should be taken, and if it was, presumably saved it along
1220 * with the snapshot.
1222 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1224 tracing_snapshot_instance_cond(tr, cond_data);
1226 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1229 * tracing_cond_snapshot_data - get the user data associated with a snapshot
1230 * @tr: The tracing instance
1232 * When the user enables a conditional snapshot using
1233 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1234 * with the snapshot. This accessor is used to retrieve it.
1236 * Should not be called from cond_snapshot.update(), since it takes
1237 * the tr->max_lock lock, which the code calling
1238 * cond_snapshot.update() has already done.
1240 * Returns the cond_data associated with the trace array's snapshot.
1242 void *tracing_cond_snapshot_data(struct trace_array *tr)
1244 void *cond_data = NULL;
1246 local_irq_disable();
1247 arch_spin_lock(&tr->max_lock);
1249 if (tr->cond_snapshot)
1250 cond_data = tr->cond_snapshot->cond_data;
1252 arch_spin_unlock(&tr->max_lock);
1257 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1259 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1260 struct array_buffer *size_buf, int cpu_id);
1261 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1263 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1267 if (!tr->allocated_snapshot) {
1269 /* allocate spare buffer */
1270 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1271 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1275 tr->allocated_snapshot = true;
1281 static void free_snapshot(struct trace_array *tr)
1284 * We don't free the ring buffer. instead, resize it because
1285 * The max_tr ring buffer has some state (e.g. ring->clock) and
1286 * we want preserve it.
1288 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1289 set_buffer_entries(&tr->max_buffer, 1);
1290 tracing_reset_online_cpus(&tr->max_buffer);
1291 tr->allocated_snapshot = false;
1295 * tracing_alloc_snapshot - allocate snapshot buffer.
1297 * This only allocates the snapshot buffer if it isn't already
1298 * allocated - it doesn't also take a snapshot.
1300 * This is meant to be used in cases where the snapshot buffer needs
1301 * to be set up for events that can't sleep but need to be able to
1302 * trigger a snapshot.
1304 int tracing_alloc_snapshot(void)
1306 struct trace_array *tr = &global_trace;
1309 ret = tracing_alloc_snapshot_instance(tr);
1314 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1317 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1319 * This is similar to tracing_snapshot(), but it will allocate the
1320 * snapshot buffer if it isn't already allocated. Use this only
1321 * where it is safe to sleep, as the allocation may sleep.
1323 * This causes a swap between the snapshot buffer and the current live
1324 * tracing buffer. You can use this to take snapshots of the live
1325 * trace when some condition is triggered, but continue to trace.
1327 void tracing_snapshot_alloc(void)
1331 ret = tracing_alloc_snapshot();
1337 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1340 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1341 * @tr: The tracing instance
1342 * @cond_data: User data to associate with the snapshot
1343 * @update: Implementation of the cond_snapshot update function
1345 * Check whether the conditional snapshot for the given instance has
1346 * already been enabled, or if the current tracer is already using a
1347 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1348 * save the cond_data and update function inside.
1350 * Returns 0 if successful, error otherwise.
1352 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1353 cond_update_fn_t update)
1355 struct cond_snapshot *cond_snapshot;
1358 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1362 cond_snapshot->cond_data = cond_data;
1363 cond_snapshot->update = update;
1365 mutex_lock(&trace_types_lock);
1367 ret = tracing_alloc_snapshot_instance(tr);
1371 if (tr->current_trace->use_max_tr) {
1377 * The cond_snapshot can only change to NULL without the
1378 * trace_types_lock. We don't care if we race with it going
1379 * to NULL, but we want to make sure that it's not set to
1380 * something other than NULL when we get here, which we can
1381 * do safely with only holding the trace_types_lock and not
1382 * having to take the max_lock.
1384 if (tr->cond_snapshot) {
1389 local_irq_disable();
1390 arch_spin_lock(&tr->max_lock);
1391 tr->cond_snapshot = cond_snapshot;
1392 arch_spin_unlock(&tr->max_lock);
1395 mutex_unlock(&trace_types_lock);
1400 mutex_unlock(&trace_types_lock);
1401 kfree(cond_snapshot);
1404 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1407 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1408 * @tr: The tracing instance
1410 * Check whether the conditional snapshot for the given instance is
1411 * enabled; if so, free the cond_snapshot associated with it,
1412 * otherwise return -EINVAL.
1414 * Returns 0 if successful, error otherwise.
1416 int tracing_snapshot_cond_disable(struct trace_array *tr)
1420 local_irq_disable();
1421 arch_spin_lock(&tr->max_lock);
1423 if (!tr->cond_snapshot)
1426 kfree(tr->cond_snapshot);
1427 tr->cond_snapshot = NULL;
1430 arch_spin_unlock(&tr->max_lock);
1435 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1437 void tracing_snapshot(void)
1439 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1441 EXPORT_SYMBOL_GPL(tracing_snapshot);
1442 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1444 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1446 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1447 int tracing_alloc_snapshot(void)
1449 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1452 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1453 void tracing_snapshot_alloc(void)
1458 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1459 void *tracing_cond_snapshot_data(struct trace_array *tr)
1463 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1464 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1468 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1469 int tracing_snapshot_cond_disable(struct trace_array *tr)
1473 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1474 #define free_snapshot(tr) do { } while (0)
1475 #endif /* CONFIG_TRACER_SNAPSHOT */
1477 void tracer_tracing_off(struct trace_array *tr)
1479 if (tr->array_buffer.buffer)
1480 ring_buffer_record_off(tr->array_buffer.buffer);
1482 * This flag is looked at when buffers haven't been allocated
1483 * yet, or by some tracers (like irqsoff), that just want to
1484 * know if the ring buffer has been disabled, but it can handle
1485 * races of where it gets disabled but we still do a record.
1486 * As the check is in the fast path of the tracers, it is more
1487 * important to be fast than accurate.
1489 tr->buffer_disabled = 1;
1490 /* Make the flag seen by readers */
1495 * tracing_off - turn off tracing buffers
1497 * This function stops the tracing buffers from recording data.
1498 * It does not disable any overhead the tracers themselves may
1499 * be causing. This function simply causes all recording to
1500 * the ring buffers to fail.
1502 void tracing_off(void)
1504 tracer_tracing_off(&global_trace);
1506 EXPORT_SYMBOL_GPL(tracing_off);
1508 void disable_trace_on_warning(void)
1510 if (__disable_trace_on_warning) {
1511 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1512 "Disabling tracing due to warning\n");
1518 * tracer_tracing_is_on - show real state of ring buffer enabled
1519 * @tr : the trace array to know if ring buffer is enabled
1521 * Shows real state of the ring buffer if it is enabled or not.
1523 bool tracer_tracing_is_on(struct trace_array *tr)
1525 if (tr->array_buffer.buffer)
1526 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1527 return !tr->buffer_disabled;
1531 * tracing_is_on - show state of ring buffers enabled
1533 int tracing_is_on(void)
1535 return tracer_tracing_is_on(&global_trace);
1537 EXPORT_SYMBOL_GPL(tracing_is_on);
1539 static int __init set_buf_size(char *str)
1541 unsigned long buf_size;
1545 buf_size = memparse(str, &str);
1547 * nr_entries can not be zero and the startup
1548 * tests require some buffer space. Therefore
1549 * ensure we have at least 4096 bytes of buffer.
1551 trace_buf_size = max(4096UL, buf_size);
1554 __setup("trace_buf_size=", set_buf_size);
1556 static int __init set_tracing_thresh(char *str)
1558 unsigned long threshold;
1563 ret = kstrtoul(str, 0, &threshold);
1566 tracing_thresh = threshold * 1000;
1569 __setup("tracing_thresh=", set_tracing_thresh);
1571 unsigned long nsecs_to_usecs(unsigned long nsecs)
1573 return nsecs / 1000;
1577 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1578 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1579 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1580 * of strings in the order that the evals (enum) were defined.
1585 /* These must match the bit positions in trace_iterator_flags */
1586 static const char *trace_options[] = {
1594 int in_ns; /* is this clock in nanoseconds? */
1595 } trace_clocks[] = {
1596 { trace_clock_local, "local", 1 },
1597 { trace_clock_global, "global", 1 },
1598 { trace_clock_counter, "counter", 0 },
1599 { trace_clock_jiffies, "uptime", 0 },
1600 { trace_clock, "perf", 1 },
1601 { ktime_get_mono_fast_ns, "mono", 1 },
1602 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1603 { ktime_get_boot_fast_ns, "boot", 1 },
1604 { ktime_get_tai_fast_ns, "tai", 1 },
1608 bool trace_clock_in_ns(struct trace_array *tr)
1610 if (trace_clocks[tr->clock_id].in_ns)
1617 * trace_parser_get_init - gets the buffer for trace parser
1619 int trace_parser_get_init(struct trace_parser *parser, int size)
1621 memset(parser, 0, sizeof(*parser));
1623 parser->buffer = kmalloc(size, GFP_KERNEL);
1624 if (!parser->buffer)
1627 parser->size = size;
1632 * trace_parser_put - frees the buffer for trace parser
1634 void trace_parser_put(struct trace_parser *parser)
1636 kfree(parser->buffer);
1637 parser->buffer = NULL;
1641 * trace_get_user - reads the user input string separated by space
1642 * (matched by isspace(ch))
1644 * For each string found the 'struct trace_parser' is updated,
1645 * and the function returns.
1647 * Returns number of bytes read.
1649 * See kernel/trace/trace.h for 'struct trace_parser' details.
1651 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1652 size_t cnt, loff_t *ppos)
1659 trace_parser_clear(parser);
1661 ret = get_user(ch, ubuf++);
1669 * The parser is not finished with the last write,
1670 * continue reading the user input without skipping spaces.
1672 if (!parser->cont) {
1673 /* skip white space */
1674 while (cnt && isspace(ch)) {
1675 ret = get_user(ch, ubuf++);
1684 /* only spaces were written */
1685 if (isspace(ch) || !ch) {
1692 /* read the non-space input */
1693 while (cnt && !isspace(ch) && ch) {
1694 if (parser->idx < parser->size - 1)
1695 parser->buffer[parser->idx++] = ch;
1700 ret = get_user(ch, ubuf++);
1707 /* We either got finished input or we have to wait for another call. */
1708 if (isspace(ch) || !ch) {
1709 parser->buffer[parser->idx] = 0;
1710 parser->cont = false;
1711 } else if (parser->idx < parser->size - 1) {
1712 parser->cont = true;
1713 parser->buffer[parser->idx++] = ch;
1714 /* Make sure the parsed string always terminates with '\0'. */
1715 parser->buffer[parser->idx] = 0;
1728 /* TODO add a seq_buf_to_buffer() */
1729 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1733 if (trace_seq_used(s) <= s->seq.readpos)
1736 len = trace_seq_used(s) - s->seq.readpos;
1739 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1741 s->seq.readpos += cnt;
1745 unsigned long __read_mostly tracing_thresh;
1747 #ifdef CONFIG_TRACER_MAX_TRACE
1748 static const struct file_operations tracing_max_lat_fops;
1750 #ifdef LATENCY_FS_NOTIFY
1752 static struct workqueue_struct *fsnotify_wq;
1754 static void latency_fsnotify_workfn(struct work_struct *work)
1756 struct trace_array *tr = container_of(work, struct trace_array,
1758 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1761 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1763 struct trace_array *tr = container_of(iwork, struct trace_array,
1765 queue_work(fsnotify_wq, &tr->fsnotify_work);
1768 static void trace_create_maxlat_file(struct trace_array *tr,
1769 struct dentry *d_tracer)
1771 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1772 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1773 tr->d_max_latency = trace_create_file("tracing_max_latency",
1776 &tracing_max_lat_fops);
1779 __init static int latency_fsnotify_init(void)
1781 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1782 WQ_UNBOUND | WQ_HIGHPRI, 0);
1784 pr_err("Unable to allocate tr_max_lat_wq\n");
1790 late_initcall_sync(latency_fsnotify_init);
1792 void latency_fsnotify(struct trace_array *tr)
1797 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1798 * possible that we are called from __schedule() or do_idle(), which
1799 * could cause a deadlock.
1801 irq_work_queue(&tr->fsnotify_irqwork);
1804 #else /* !LATENCY_FS_NOTIFY */
1806 #define trace_create_maxlat_file(tr, d_tracer) \
1807 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1808 d_tracer, tr, &tracing_max_lat_fops)
1813 * Copy the new maximum trace into the separate maximum-trace
1814 * structure. (this way the maximum trace is permanently saved,
1815 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1818 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1820 struct array_buffer *trace_buf = &tr->array_buffer;
1821 struct array_buffer *max_buf = &tr->max_buffer;
1822 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1823 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1826 max_buf->time_start = data->preempt_timestamp;
1828 max_data->saved_latency = tr->max_latency;
1829 max_data->critical_start = data->critical_start;
1830 max_data->critical_end = data->critical_end;
1832 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1833 max_data->pid = tsk->pid;
1835 * If tsk == current, then use current_uid(), as that does not use
1836 * RCU. The irq tracer can be called out of RCU scope.
1839 max_data->uid = current_uid();
1841 max_data->uid = task_uid(tsk);
1843 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1844 max_data->policy = tsk->policy;
1845 max_data->rt_priority = tsk->rt_priority;
1847 /* record this tasks comm */
1848 tracing_record_cmdline(tsk);
1849 latency_fsnotify(tr);
1853 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1855 * @tsk: the task with the latency
1856 * @cpu: The cpu that initiated the trace.
1857 * @cond_data: User data associated with a conditional snapshot
1859 * Flip the buffers between the @tr and the max_tr and record information
1860 * about which task was the cause of this latency.
1863 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1869 WARN_ON_ONCE(!irqs_disabled());
1871 if (!tr->allocated_snapshot) {
1872 /* Only the nop tracer should hit this when disabling */
1873 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1877 arch_spin_lock(&tr->max_lock);
1879 /* Inherit the recordable setting from array_buffer */
1880 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1881 ring_buffer_record_on(tr->max_buffer.buffer);
1883 ring_buffer_record_off(tr->max_buffer.buffer);
1885 #ifdef CONFIG_TRACER_SNAPSHOT
1886 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1887 arch_spin_unlock(&tr->max_lock);
1891 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1893 __update_max_tr(tr, tsk, cpu);
1895 arch_spin_unlock(&tr->max_lock);
1897 /* Any waiters on the old snapshot buffer need to wake up */
1898 ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
1902 * update_max_tr_single - only copy one trace over, and reset the rest
1904 * @tsk: task with the latency
1905 * @cpu: the cpu of the buffer to copy.
1907 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1910 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1917 WARN_ON_ONCE(!irqs_disabled());
1918 if (!tr->allocated_snapshot) {
1919 /* Only the nop tracer should hit this when disabling */
1920 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1924 arch_spin_lock(&tr->max_lock);
1926 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1928 if (ret == -EBUSY) {
1930 * We failed to swap the buffer due to a commit taking
1931 * place on this CPU. We fail to record, but we reset
1932 * the max trace buffer (no one writes directly to it)
1933 * and flag that it failed.
1934 * Another reason is resize is in progress.
1936 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1937 "Failed to swap buffers due to commit or resize in progress\n");
1940 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1942 __update_max_tr(tr, tsk, cpu);
1943 arch_spin_unlock(&tr->max_lock);
1946 #endif /* CONFIG_TRACER_MAX_TRACE */
1948 static int wait_on_pipe(struct trace_iterator *iter, int full)
1952 /* Iterators are static, they should be filled or empty */
1953 if (trace_buffer_iter(iter, iter->cpu_file))
1956 ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full);
1958 #ifdef CONFIG_TRACER_MAX_TRACE
1960 * Make sure this is still the snapshot buffer, as if a snapshot were
1961 * to happen, this would now be the main buffer.
1964 iter->array_buffer = &iter->tr->max_buffer;
1969 #ifdef CONFIG_FTRACE_STARTUP_TEST
1970 static bool selftests_can_run;
1972 struct trace_selftests {
1973 struct list_head list;
1974 struct tracer *type;
1977 static LIST_HEAD(postponed_selftests);
1979 static int save_selftest(struct tracer *type)
1981 struct trace_selftests *selftest;
1983 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1987 selftest->type = type;
1988 list_add(&selftest->list, &postponed_selftests);
1992 static int run_tracer_selftest(struct tracer *type)
1994 struct trace_array *tr = &global_trace;
1995 struct tracer *saved_tracer = tr->current_trace;
1998 if (!type->selftest || tracing_selftest_disabled)
2002 * If a tracer registers early in boot up (before scheduling is
2003 * initialized and such), then do not run its selftests yet.
2004 * Instead, run it a little later in the boot process.
2006 if (!selftests_can_run)
2007 return save_selftest(type);
2009 if (!tracing_is_on()) {
2010 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
2016 * Run a selftest on this tracer.
2017 * Here we reset the trace buffer, and set the current
2018 * tracer to be this tracer. The tracer can then run some
2019 * internal tracing to verify that everything is in order.
2020 * If we fail, we do not register this tracer.
2022 tracing_reset_online_cpus(&tr->array_buffer);
2024 tr->current_trace = type;
2026 #ifdef CONFIG_TRACER_MAX_TRACE
2027 if (type->use_max_tr) {
2028 /* If we expanded the buffers, make sure the max is expanded too */
2029 if (ring_buffer_expanded)
2030 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
2031 RING_BUFFER_ALL_CPUS);
2032 tr->allocated_snapshot = true;
2036 /* the test is responsible for initializing and enabling */
2037 pr_info("Testing tracer %s: ", type->name);
2038 ret = type->selftest(type, tr);
2039 /* the test is responsible for resetting too */
2040 tr->current_trace = saved_tracer;
2042 printk(KERN_CONT "FAILED!\n");
2043 /* Add the warning after printing 'FAILED' */
2047 /* Only reset on passing, to avoid touching corrupted buffers */
2048 tracing_reset_online_cpus(&tr->array_buffer);
2050 #ifdef CONFIG_TRACER_MAX_TRACE
2051 if (type->use_max_tr) {
2052 tr->allocated_snapshot = false;
2054 /* Shrink the max buffer again */
2055 if (ring_buffer_expanded)
2056 ring_buffer_resize(tr->max_buffer.buffer, 1,
2057 RING_BUFFER_ALL_CPUS);
2061 printk(KERN_CONT "PASSED\n");
2065 static int do_run_tracer_selftest(struct tracer *type)
2070 * Tests can take a long time, especially if they are run one after the
2071 * other, as does happen during bootup when all the tracers are
2072 * registered. This could cause the soft lockup watchdog to trigger.
2076 tracing_selftest_running = true;
2077 ret = run_tracer_selftest(type);
2078 tracing_selftest_running = false;
2083 static __init int init_trace_selftests(void)
2085 struct trace_selftests *p, *n;
2086 struct tracer *t, **last;
2089 selftests_can_run = true;
2091 mutex_lock(&trace_types_lock);
2093 if (list_empty(&postponed_selftests))
2096 pr_info("Running postponed tracer tests:\n");
2098 tracing_selftest_running = true;
2099 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2100 /* This loop can take minutes when sanitizers are enabled, so
2101 * lets make sure we allow RCU processing.
2104 ret = run_tracer_selftest(p->type);
2105 /* If the test fails, then warn and remove from available_tracers */
2107 WARN(1, "tracer: %s failed selftest, disabling\n",
2109 last = &trace_types;
2110 for (t = trace_types; t; t = t->next) {
2121 tracing_selftest_running = false;
2124 mutex_unlock(&trace_types_lock);
2128 core_initcall(init_trace_selftests);
2130 static inline int run_tracer_selftest(struct tracer *type)
2134 static inline int do_run_tracer_selftest(struct tracer *type)
2138 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2140 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2142 static void __init apply_trace_boot_options(void);
2145 * register_tracer - register a tracer with the ftrace system.
2146 * @type: the plugin for the tracer
2148 * Register a new plugin tracer.
2150 int __init register_tracer(struct tracer *type)
2156 pr_info("Tracer must have a name\n");
2160 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2161 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2165 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2166 pr_warn("Can not register tracer %s due to lockdown\n",
2171 mutex_lock(&trace_types_lock);
2173 for (t = trace_types; t; t = t->next) {
2174 if (strcmp(type->name, t->name) == 0) {
2176 pr_info("Tracer %s already registered\n",
2183 if (!type->set_flag)
2184 type->set_flag = &dummy_set_flag;
2186 /*allocate a dummy tracer_flags*/
2187 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2192 type->flags->val = 0;
2193 type->flags->opts = dummy_tracer_opt;
2195 if (!type->flags->opts)
2196 type->flags->opts = dummy_tracer_opt;
2198 /* store the tracer for __set_tracer_option */
2199 type->flags->trace = type;
2201 ret = do_run_tracer_selftest(type);
2205 type->next = trace_types;
2207 add_tracer_options(&global_trace, type);
2210 mutex_unlock(&trace_types_lock);
2212 if (ret || !default_bootup_tracer)
2215 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2218 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2219 /* Do we want this tracer to start on bootup? */
2220 tracing_set_tracer(&global_trace, type->name);
2221 default_bootup_tracer = NULL;
2223 apply_trace_boot_options();
2225 /* disable other selftests, since this will break it. */
2226 disable_tracing_selftest("running a tracer");
2232 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2234 struct trace_buffer *buffer = buf->buffer;
2239 ring_buffer_record_disable(buffer);
2241 /* Make sure all commits have finished */
2243 ring_buffer_reset_cpu(buffer, cpu);
2245 ring_buffer_record_enable(buffer);
2248 void tracing_reset_online_cpus(struct array_buffer *buf)
2250 struct trace_buffer *buffer = buf->buffer;
2255 ring_buffer_record_disable(buffer);
2257 /* Make sure all commits have finished */
2260 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2262 ring_buffer_reset_online_cpus(buffer);
2264 ring_buffer_record_enable(buffer);
2267 /* Must have trace_types_lock held */
2268 void tracing_reset_all_online_cpus_unlocked(void)
2270 struct trace_array *tr;
2272 lockdep_assert_held(&trace_types_lock);
2274 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2275 if (!tr->clear_trace)
2277 tr->clear_trace = false;
2278 tracing_reset_online_cpus(&tr->array_buffer);
2279 #ifdef CONFIG_TRACER_MAX_TRACE
2280 tracing_reset_online_cpus(&tr->max_buffer);
2285 void tracing_reset_all_online_cpus(void)
2287 mutex_lock(&trace_types_lock);
2288 tracing_reset_all_online_cpus_unlocked();
2289 mutex_unlock(&trace_types_lock);
2293 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2294 * is the tgid last observed corresponding to pid=i.
2296 static int *tgid_map;
2298 /* The maximum valid index into tgid_map. */
2299 static size_t tgid_map_max;
2301 #define SAVED_CMDLINES_DEFAULT 128
2302 #define NO_CMDLINE_MAP UINT_MAX
2304 * Preemption must be disabled before acquiring trace_cmdline_lock.
2305 * The various trace_arrays' max_lock must be acquired in a context
2306 * where interrupt is disabled.
2308 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2309 struct saved_cmdlines_buffer {
2310 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2311 unsigned *map_cmdline_to_pid;
2312 unsigned cmdline_num;
2314 char *saved_cmdlines;
2316 static struct saved_cmdlines_buffer *savedcmd;
2318 static inline char *get_saved_cmdlines(int idx)
2320 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2323 static inline void set_cmdline(int idx, const char *cmdline)
2325 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2328 static int allocate_cmdlines_buffer(unsigned int val,
2329 struct saved_cmdlines_buffer *s)
2331 s->map_cmdline_to_pid = kmalloc_array(val,
2332 sizeof(*s->map_cmdline_to_pid),
2334 if (!s->map_cmdline_to_pid)
2337 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2338 if (!s->saved_cmdlines) {
2339 kfree(s->map_cmdline_to_pid);
2344 s->cmdline_num = val;
2345 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2346 sizeof(s->map_pid_to_cmdline));
2347 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2348 val * sizeof(*s->map_cmdline_to_pid));
2353 static int trace_create_savedcmd(void)
2357 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2361 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2371 int is_tracing_stopped(void)
2373 return global_trace.stop_count;
2376 static void tracing_start_tr(struct trace_array *tr)
2378 struct trace_buffer *buffer;
2379 unsigned long flags;
2381 if (tracing_disabled)
2384 raw_spin_lock_irqsave(&tr->start_lock, flags);
2385 if (--tr->stop_count) {
2386 if (WARN_ON_ONCE(tr->stop_count < 0)) {
2387 /* Someone screwed up their debugging */
2393 /* Prevent the buffers from switching */
2394 arch_spin_lock(&tr->max_lock);
2396 buffer = tr->array_buffer.buffer;
2398 ring_buffer_record_enable(buffer);
2400 #ifdef CONFIG_TRACER_MAX_TRACE
2401 buffer = tr->max_buffer.buffer;
2403 ring_buffer_record_enable(buffer);
2406 arch_spin_unlock(&tr->max_lock);
2409 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2413 * tracing_start - quick start of the tracer
2415 * If tracing is enabled but was stopped by tracing_stop,
2416 * this will start the tracer back up.
2418 void tracing_start(void)
2421 return tracing_start_tr(&global_trace);
2424 static void tracing_stop_tr(struct trace_array *tr)
2426 struct trace_buffer *buffer;
2427 unsigned long flags;
2429 raw_spin_lock_irqsave(&tr->start_lock, flags);
2430 if (tr->stop_count++)
2433 /* Prevent the buffers from switching */
2434 arch_spin_lock(&tr->max_lock);
2436 buffer = tr->array_buffer.buffer;
2438 ring_buffer_record_disable(buffer);
2440 #ifdef CONFIG_TRACER_MAX_TRACE
2441 buffer = tr->max_buffer.buffer;
2443 ring_buffer_record_disable(buffer);
2446 arch_spin_unlock(&tr->max_lock);
2449 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2453 * tracing_stop - quick stop of the tracer
2455 * Light weight way to stop tracing. Use in conjunction with
2458 void tracing_stop(void)
2460 return tracing_stop_tr(&global_trace);
2463 static int trace_save_cmdline(struct task_struct *tsk)
2467 /* treat recording of idle task as a success */
2471 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2474 * It's not the end of the world if we don't get
2475 * the lock, but we also don't want to spin
2476 * nor do we want to disable interrupts,
2477 * so if we miss here, then better luck next time.
2479 * This is called within the scheduler and wake up, so interrupts
2480 * had better been disabled and run queue lock been held.
2482 lockdep_assert_preemption_disabled();
2483 if (!arch_spin_trylock(&trace_cmdline_lock))
2486 idx = savedcmd->map_pid_to_cmdline[tpid];
2487 if (idx == NO_CMDLINE_MAP) {
2488 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2490 savedcmd->map_pid_to_cmdline[tpid] = idx;
2491 savedcmd->cmdline_idx = idx;
2494 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2495 set_cmdline(idx, tsk->comm);
2497 arch_spin_unlock(&trace_cmdline_lock);
2502 static void __trace_find_cmdline(int pid, char comm[])
2508 strcpy(comm, "<idle>");
2512 if (WARN_ON_ONCE(pid < 0)) {
2513 strcpy(comm, "<XXX>");
2517 tpid = pid & (PID_MAX_DEFAULT - 1);
2518 map = savedcmd->map_pid_to_cmdline[tpid];
2519 if (map != NO_CMDLINE_MAP) {
2520 tpid = savedcmd->map_cmdline_to_pid[map];
2522 strscpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2526 strcpy(comm, "<...>");
2529 void trace_find_cmdline(int pid, char comm[])
2532 arch_spin_lock(&trace_cmdline_lock);
2534 __trace_find_cmdline(pid, comm);
2536 arch_spin_unlock(&trace_cmdline_lock);
2540 static int *trace_find_tgid_ptr(int pid)
2543 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2544 * if we observe a non-NULL tgid_map then we also observe the correct
2547 int *map = smp_load_acquire(&tgid_map);
2549 if (unlikely(!map || pid > tgid_map_max))
2555 int trace_find_tgid(int pid)
2557 int *ptr = trace_find_tgid_ptr(pid);
2559 return ptr ? *ptr : 0;
2562 static int trace_save_tgid(struct task_struct *tsk)
2566 /* treat recording of idle task as a success */
2570 ptr = trace_find_tgid_ptr(tsk->pid);
2578 static bool tracing_record_taskinfo_skip(int flags)
2580 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2582 if (!__this_cpu_read(trace_taskinfo_save))
2588 * tracing_record_taskinfo - record the task info of a task
2590 * @task: task to record
2591 * @flags: TRACE_RECORD_CMDLINE for recording comm
2592 * TRACE_RECORD_TGID for recording tgid
2594 void tracing_record_taskinfo(struct task_struct *task, int flags)
2598 if (tracing_record_taskinfo_skip(flags))
2602 * Record as much task information as possible. If some fail, continue
2603 * to try to record the others.
2605 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2606 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2608 /* If recording any information failed, retry again soon. */
2612 __this_cpu_write(trace_taskinfo_save, false);
2616 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2618 * @prev: previous task during sched_switch
2619 * @next: next task during sched_switch
2620 * @flags: TRACE_RECORD_CMDLINE for recording comm
2621 * TRACE_RECORD_TGID for recording tgid
2623 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2624 struct task_struct *next, int flags)
2628 if (tracing_record_taskinfo_skip(flags))
2632 * Record as much task information as possible. If some fail, continue
2633 * to try to record the others.
2635 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2636 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2637 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2638 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2640 /* If recording any information failed, retry again soon. */
2644 __this_cpu_write(trace_taskinfo_save, false);
2647 /* Helpers to record a specific task information */
2648 void tracing_record_cmdline(struct task_struct *task)
2650 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2653 void tracing_record_tgid(struct task_struct *task)
2655 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2659 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2660 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2661 * simplifies those functions and keeps them in sync.
2663 enum print_line_t trace_handle_return(struct trace_seq *s)
2665 return trace_seq_has_overflowed(s) ?
2666 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2668 EXPORT_SYMBOL_GPL(trace_handle_return);
2670 static unsigned short migration_disable_value(void)
2672 #if defined(CONFIG_SMP)
2673 return current->migration_disabled;
2679 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2681 unsigned int trace_flags = irqs_status;
2684 pc = preempt_count();
2687 trace_flags |= TRACE_FLAG_NMI;
2688 if (pc & HARDIRQ_MASK)
2689 trace_flags |= TRACE_FLAG_HARDIRQ;
2690 if (in_serving_softirq())
2691 trace_flags |= TRACE_FLAG_SOFTIRQ;
2692 if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2693 trace_flags |= TRACE_FLAG_BH_OFF;
2695 if (tif_need_resched())
2696 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2697 if (test_preempt_need_resched())
2698 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2699 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2700 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2703 struct ring_buffer_event *
2704 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2707 unsigned int trace_ctx)
2709 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2712 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2713 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2714 static int trace_buffered_event_ref;
2717 * trace_buffered_event_enable - enable buffering events
2719 * When events are being filtered, it is quicker to use a temporary
2720 * buffer to write the event data into if there's a likely chance
2721 * that it will not be committed. The discard of the ring buffer
2722 * is not as fast as committing, and is much slower than copying
2725 * When an event is to be filtered, allocate per cpu buffers to
2726 * write the event data into, and if the event is filtered and discarded
2727 * it is simply dropped, otherwise, the entire data is to be committed
2730 void trace_buffered_event_enable(void)
2732 struct ring_buffer_event *event;
2736 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2738 if (trace_buffered_event_ref++)
2741 for_each_tracing_cpu(cpu) {
2742 page = alloc_pages_node(cpu_to_node(cpu),
2743 GFP_KERNEL | __GFP_NORETRY, 0);
2744 /* This is just an optimization and can handle failures */
2746 pr_err("Failed to allocate event buffer\n");
2750 event = page_address(page);
2751 memset(event, 0, sizeof(*event));
2753 per_cpu(trace_buffered_event, cpu) = event;
2756 if (cpu == smp_processor_id() &&
2757 __this_cpu_read(trace_buffered_event) !=
2758 per_cpu(trace_buffered_event, cpu))
2764 static void enable_trace_buffered_event(void *data)
2766 /* Probably not needed, but do it anyway */
2768 this_cpu_dec(trace_buffered_event_cnt);
2771 static void disable_trace_buffered_event(void *data)
2773 this_cpu_inc(trace_buffered_event_cnt);
2777 * trace_buffered_event_disable - disable buffering events
2779 * When a filter is removed, it is faster to not use the buffered
2780 * events, and to commit directly into the ring buffer. Free up
2781 * the temp buffers when there are no more users. This requires
2782 * special synchronization with current events.
2784 void trace_buffered_event_disable(void)
2788 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2790 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2793 if (--trace_buffered_event_ref)
2796 /* For each CPU, set the buffer as used. */
2797 on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
2800 /* Wait for all current users to finish */
2803 for_each_tracing_cpu(cpu) {
2804 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2805 per_cpu(trace_buffered_event, cpu) = NULL;
2809 * Wait for all CPUs that potentially started checking if they can use
2810 * their event buffer only after the previous synchronize_rcu() call and
2811 * they still read a valid pointer from trace_buffered_event. It must be
2812 * ensured they don't see cleared trace_buffered_event_cnt else they
2813 * could wrongly decide to use the pointed-to buffer which is now freed.
2817 /* For each CPU, relinquish the buffer */
2818 on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
2822 static struct trace_buffer *temp_buffer;
2824 struct ring_buffer_event *
2825 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2826 struct trace_event_file *trace_file,
2827 int type, unsigned long len,
2828 unsigned int trace_ctx)
2830 struct ring_buffer_event *entry;
2831 struct trace_array *tr = trace_file->tr;
2834 *current_rb = tr->array_buffer.buffer;
2836 if (!tr->no_filter_buffering_ref &&
2837 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2838 preempt_disable_notrace();
2840 * Filtering is on, so try to use the per cpu buffer first.
2841 * This buffer will simulate a ring_buffer_event,
2842 * where the type_len is zero and the array[0] will
2843 * hold the full length.
2844 * (see include/linux/ring-buffer.h for details on
2845 * how the ring_buffer_event is structured).
2847 * Using a temp buffer during filtering and copying it
2848 * on a matched filter is quicker than writing directly
2849 * into the ring buffer and then discarding it when
2850 * it doesn't match. That is because the discard
2851 * requires several atomic operations to get right.
2852 * Copying on match and doing nothing on a failed match
2853 * is still quicker than no copy on match, but having
2854 * to discard out of the ring buffer on a failed match.
2856 if ((entry = __this_cpu_read(trace_buffered_event))) {
2857 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2859 val = this_cpu_inc_return(trace_buffered_event_cnt);
2862 * Preemption is disabled, but interrupts and NMIs
2863 * can still come in now. If that happens after
2864 * the above increment, then it will have to go
2865 * back to the old method of allocating the event
2866 * on the ring buffer, and if the filter fails, it
2867 * will have to call ring_buffer_discard_commit()
2870 * Need to also check the unlikely case that the
2871 * length is bigger than the temp buffer size.
2872 * If that happens, then the reserve is pretty much
2873 * guaranteed to fail, as the ring buffer currently
2874 * only allows events less than a page. But that may
2875 * change in the future, so let the ring buffer reserve
2876 * handle the failure in that case.
2878 if (val == 1 && likely(len <= max_len)) {
2879 trace_event_setup(entry, type, trace_ctx);
2880 entry->array[0] = len;
2881 /* Return with preemption disabled */
2884 this_cpu_dec(trace_buffered_event_cnt);
2886 /* __trace_buffer_lock_reserve() disables preemption */
2887 preempt_enable_notrace();
2890 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2893 * If tracing is off, but we have triggers enabled
2894 * we still need to look at the event data. Use the temp_buffer
2895 * to store the trace event for the trigger to use. It's recursive
2896 * safe and will not be recorded anywhere.
2898 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2899 *current_rb = temp_buffer;
2900 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2905 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2907 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2908 static DEFINE_MUTEX(tracepoint_printk_mutex);
2910 static void output_printk(struct trace_event_buffer *fbuffer)
2912 struct trace_event_call *event_call;
2913 struct trace_event_file *file;
2914 struct trace_event *event;
2915 unsigned long flags;
2916 struct trace_iterator *iter = tracepoint_print_iter;
2918 /* We should never get here if iter is NULL */
2919 if (WARN_ON_ONCE(!iter))
2922 event_call = fbuffer->trace_file->event_call;
2923 if (!event_call || !event_call->event.funcs ||
2924 !event_call->event.funcs->trace)
2927 file = fbuffer->trace_file;
2928 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2929 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2930 !filter_match_preds(file->filter, fbuffer->entry)))
2933 event = &fbuffer->trace_file->event_call->event;
2935 raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2936 trace_seq_init(&iter->seq);
2937 iter->ent = fbuffer->entry;
2938 event_call->event.funcs->trace(iter, 0, event);
2939 trace_seq_putc(&iter->seq, 0);
2940 printk("%s", iter->seq.buffer);
2942 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2945 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2946 void *buffer, size_t *lenp,
2949 int save_tracepoint_printk;
2952 mutex_lock(&tracepoint_printk_mutex);
2953 save_tracepoint_printk = tracepoint_printk;
2955 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2958 * This will force exiting early, as tracepoint_printk
2959 * is always zero when tracepoint_printk_iter is not allocated
2961 if (!tracepoint_print_iter)
2962 tracepoint_printk = 0;
2964 if (save_tracepoint_printk == tracepoint_printk)
2967 if (tracepoint_printk)
2968 static_key_enable(&tracepoint_printk_key.key);
2970 static_key_disable(&tracepoint_printk_key.key);
2973 mutex_unlock(&tracepoint_printk_mutex);
2978 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2980 enum event_trigger_type tt = ETT_NONE;
2981 struct trace_event_file *file = fbuffer->trace_file;
2983 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2984 fbuffer->entry, &tt))
2987 if (static_key_false(&tracepoint_printk_key.key))
2988 output_printk(fbuffer);
2990 if (static_branch_unlikely(&trace_event_exports_enabled))
2991 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2993 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2994 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2998 event_triggers_post_call(file, tt);
3001 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
3006 * trace_buffer_unlock_commit_regs()
3007 * trace_event_buffer_commit()
3008 * trace_event_raw_event_xxx()
3010 # define STACK_SKIP 3
3012 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
3013 struct trace_buffer *buffer,
3014 struct ring_buffer_event *event,
3015 unsigned int trace_ctx,
3016 struct pt_regs *regs)
3018 __buffer_unlock_commit(buffer, event);
3021 * If regs is not set, then skip the necessary functions.
3022 * Note, we can still get here via blktrace, wakeup tracer
3023 * and mmiotrace, but that's ok if they lose a function or
3024 * two. They are not that meaningful.
3026 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
3027 ftrace_trace_userstack(tr, buffer, trace_ctx);
3031 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
3034 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
3035 struct ring_buffer_event *event)
3037 __buffer_unlock_commit(buffer, event);
3041 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
3042 parent_ip, unsigned int trace_ctx)
3044 struct trace_event_call *call = &event_function;
3045 struct trace_buffer *buffer = tr->array_buffer.buffer;
3046 struct ring_buffer_event *event;
3047 struct ftrace_entry *entry;
3049 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
3053 entry = ring_buffer_event_data(event);
3055 entry->parent_ip = parent_ip;
3057 if (!call_filter_check_discard(call, entry, buffer, event)) {
3058 if (static_branch_unlikely(&trace_function_exports_enabled))
3059 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
3060 __buffer_unlock_commit(buffer, event);
3064 #ifdef CONFIG_STACKTRACE
3066 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
3067 #define FTRACE_KSTACK_NESTING 4
3069 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
3071 struct ftrace_stack {
3072 unsigned long calls[FTRACE_KSTACK_ENTRIES];
3076 struct ftrace_stacks {
3077 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
3080 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
3081 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
3083 static void __ftrace_trace_stack(struct trace_buffer *buffer,
3084 unsigned int trace_ctx,
3085 int skip, struct pt_regs *regs)
3087 struct trace_event_call *call = &event_kernel_stack;
3088 struct ring_buffer_event *event;
3089 unsigned int size, nr_entries;
3090 struct ftrace_stack *fstack;
3091 struct stack_entry *entry;
3095 * Add one, for this function and the call to save_stack_trace()
3096 * If regs is set, then these functions will not be in the way.
3098 #ifndef CONFIG_UNWINDER_ORC
3103 preempt_disable_notrace();
3105 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3107 /* This should never happen. If it does, yell once and skip */
3108 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3112 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3113 * interrupt will either see the value pre increment or post
3114 * increment. If the interrupt happens pre increment it will have
3115 * restored the counter when it returns. We just need a barrier to
3116 * keep gcc from moving things around.
3120 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3121 size = ARRAY_SIZE(fstack->calls);
3124 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3127 nr_entries = stack_trace_save(fstack->calls, size, skip);
3130 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3131 struct_size(entry, caller, nr_entries),
3135 entry = ring_buffer_event_data(event);
3137 entry->size = nr_entries;
3138 memcpy(&entry->caller, fstack->calls,
3139 flex_array_size(entry, caller, nr_entries));
3141 if (!call_filter_check_discard(call, entry, buffer, event))
3142 __buffer_unlock_commit(buffer, event);
3145 /* Again, don't let gcc optimize things here */
3147 __this_cpu_dec(ftrace_stack_reserve);
3148 preempt_enable_notrace();
3152 static inline void ftrace_trace_stack(struct trace_array *tr,
3153 struct trace_buffer *buffer,
3154 unsigned int trace_ctx,
3155 int skip, struct pt_regs *regs)
3157 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3160 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3163 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3166 struct trace_buffer *buffer = tr->array_buffer.buffer;
3168 if (rcu_is_watching()) {
3169 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3173 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY)))
3177 * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
3178 * but if the above rcu_is_watching() failed, then the NMI
3179 * triggered someplace critical, and ct_irq_enter() should
3180 * not be called from NMI.
3182 if (unlikely(in_nmi()))
3185 ct_irq_enter_irqson();
3186 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3187 ct_irq_exit_irqson();
3191 * trace_dump_stack - record a stack back trace in the trace buffer
3192 * @skip: Number of functions to skip (helper handlers)
3194 void trace_dump_stack(int skip)
3196 if (tracing_disabled || tracing_selftest_running)
3199 #ifndef CONFIG_UNWINDER_ORC
3200 /* Skip 1 to skip this function. */
3203 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3204 tracing_gen_ctx(), skip, NULL);
3206 EXPORT_SYMBOL_GPL(trace_dump_stack);
3208 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3209 static DEFINE_PER_CPU(int, user_stack_count);
3212 ftrace_trace_userstack(struct trace_array *tr,
3213 struct trace_buffer *buffer, unsigned int trace_ctx)
3215 struct trace_event_call *call = &event_user_stack;
3216 struct ring_buffer_event *event;
3217 struct userstack_entry *entry;
3219 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3223 * NMIs can not handle page faults, even with fix ups.
3224 * The save user stack can (and often does) fault.
3226 if (unlikely(in_nmi()))
3230 * prevent recursion, since the user stack tracing may
3231 * trigger other kernel events.
3234 if (__this_cpu_read(user_stack_count))
3237 __this_cpu_inc(user_stack_count);
3239 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3240 sizeof(*entry), trace_ctx);
3242 goto out_drop_count;
3243 entry = ring_buffer_event_data(event);
3245 entry->tgid = current->tgid;
3246 memset(&entry->caller, 0, sizeof(entry->caller));
3248 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3249 if (!call_filter_check_discard(call, entry, buffer, event))
3250 __buffer_unlock_commit(buffer, event);
3253 __this_cpu_dec(user_stack_count);
3257 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3258 static void ftrace_trace_userstack(struct trace_array *tr,
3259 struct trace_buffer *buffer,
3260 unsigned int trace_ctx)
3263 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3265 #endif /* CONFIG_STACKTRACE */
3268 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3269 unsigned long long delta)
3271 entry->bottom_delta_ts = delta & U32_MAX;
3272 entry->top_delta_ts = (delta >> 32);
3275 void trace_last_func_repeats(struct trace_array *tr,
3276 struct trace_func_repeats *last_info,
3277 unsigned int trace_ctx)
3279 struct trace_buffer *buffer = tr->array_buffer.buffer;
3280 struct func_repeats_entry *entry;
3281 struct ring_buffer_event *event;
3284 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3285 sizeof(*entry), trace_ctx);
3289 delta = ring_buffer_event_time_stamp(buffer, event) -
3290 last_info->ts_last_call;
3292 entry = ring_buffer_event_data(event);
3293 entry->ip = last_info->ip;
3294 entry->parent_ip = last_info->parent_ip;
3295 entry->count = last_info->count;
3296 func_repeats_set_delta_ts(entry, delta);
3298 __buffer_unlock_commit(buffer, event);
3301 /* created for use with alloc_percpu */
3302 struct trace_buffer_struct {
3304 char buffer[4][TRACE_BUF_SIZE];
3307 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3310 * This allows for lockless recording. If we're nested too deeply, then
3311 * this returns NULL.
3313 static char *get_trace_buf(void)
3315 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3317 if (!trace_percpu_buffer || buffer->nesting >= 4)
3322 /* Interrupts must see nesting incremented before we use the buffer */
3324 return &buffer->buffer[buffer->nesting - 1][0];
3327 static void put_trace_buf(void)
3329 /* Don't let the decrement of nesting leak before this */
3331 this_cpu_dec(trace_percpu_buffer->nesting);
3334 static int alloc_percpu_trace_buffer(void)
3336 struct trace_buffer_struct __percpu *buffers;
3338 if (trace_percpu_buffer)
3341 buffers = alloc_percpu(struct trace_buffer_struct);
3342 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3345 trace_percpu_buffer = buffers;
3349 static int buffers_allocated;
3351 void trace_printk_init_buffers(void)
3353 if (buffers_allocated)
3356 if (alloc_percpu_trace_buffer())
3359 /* trace_printk() is for debug use only. Don't use it in production. */
3362 pr_warn("**********************************************************\n");
3363 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3365 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3367 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3368 pr_warn("** unsafe for production use. **\n");
3370 pr_warn("** If you see this message and you are not debugging **\n");
3371 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3373 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3374 pr_warn("**********************************************************\n");
3376 /* Expand the buffers to set size */
3377 tracing_update_buffers();
3379 buffers_allocated = 1;
3382 * trace_printk_init_buffers() can be called by modules.
3383 * If that happens, then we need to start cmdline recording
3384 * directly here. If the global_trace.buffer is already
3385 * allocated here, then this was called by module code.
3387 if (global_trace.array_buffer.buffer)
3388 tracing_start_cmdline_record();
3390 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3392 void trace_printk_start_comm(void)
3394 /* Start tracing comms if trace printk is set */
3395 if (!buffers_allocated)
3397 tracing_start_cmdline_record();
3400 static void trace_printk_start_stop_comm(int enabled)
3402 if (!buffers_allocated)
3406 tracing_start_cmdline_record();
3408 tracing_stop_cmdline_record();
3412 * trace_vbprintk - write binary msg to tracing buffer
3413 * @ip: The address of the caller
3414 * @fmt: The string format to write to the buffer
3415 * @args: Arguments for @fmt
3417 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3419 struct trace_event_call *call = &event_bprint;
3420 struct ring_buffer_event *event;
3421 struct trace_buffer *buffer;
3422 struct trace_array *tr = &global_trace;
3423 struct bprint_entry *entry;
3424 unsigned int trace_ctx;
3428 if (unlikely(tracing_selftest_running || tracing_disabled))
3431 /* Don't pollute graph traces with trace_vprintk internals */
3432 pause_graph_tracing();
3434 trace_ctx = tracing_gen_ctx();
3435 preempt_disable_notrace();
3437 tbuffer = get_trace_buf();
3443 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3445 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3448 size = sizeof(*entry) + sizeof(u32) * len;
3449 buffer = tr->array_buffer.buffer;
3450 ring_buffer_nest_start(buffer);
3451 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3455 entry = ring_buffer_event_data(event);
3459 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3460 if (!call_filter_check_discard(call, entry, buffer, event)) {
3461 __buffer_unlock_commit(buffer, event);
3462 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3466 ring_buffer_nest_end(buffer);
3471 preempt_enable_notrace();
3472 unpause_graph_tracing();
3476 EXPORT_SYMBOL_GPL(trace_vbprintk);
3480 __trace_array_vprintk(struct trace_buffer *buffer,
3481 unsigned long ip, const char *fmt, va_list args)
3483 struct trace_event_call *call = &event_print;
3484 struct ring_buffer_event *event;
3486 struct print_entry *entry;
3487 unsigned int trace_ctx;
3490 if (tracing_disabled)
3493 /* Don't pollute graph traces with trace_vprintk internals */
3494 pause_graph_tracing();
3496 trace_ctx = tracing_gen_ctx();
3497 preempt_disable_notrace();
3500 tbuffer = get_trace_buf();
3506 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3508 size = sizeof(*entry) + len + 1;
3509 ring_buffer_nest_start(buffer);
3510 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3514 entry = ring_buffer_event_data(event);
3517 memcpy(&entry->buf, tbuffer, len + 1);
3518 if (!call_filter_check_discard(call, entry, buffer, event)) {
3519 __buffer_unlock_commit(buffer, event);
3520 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3524 ring_buffer_nest_end(buffer);
3528 preempt_enable_notrace();
3529 unpause_graph_tracing();
3535 int trace_array_vprintk(struct trace_array *tr,
3536 unsigned long ip, const char *fmt, va_list args)
3538 if (tracing_selftest_running && tr == &global_trace)
3541 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3545 * trace_array_printk - Print a message to a specific instance
3546 * @tr: The instance trace_array descriptor
3547 * @ip: The instruction pointer that this is called from.
3548 * @fmt: The format to print (printf format)
3550 * If a subsystem sets up its own instance, they have the right to
3551 * printk strings into their tracing instance buffer using this
3552 * function. Note, this function will not write into the top level
3553 * buffer (use trace_printk() for that), as writing into the top level
3554 * buffer should only have events that can be individually disabled.
3555 * trace_printk() is only used for debugging a kernel, and should not
3556 * be ever incorporated in normal use.
3558 * trace_array_printk() can be used, as it will not add noise to the
3559 * top level tracing buffer.
3561 * Note, trace_array_init_printk() must be called on @tr before this
3565 int trace_array_printk(struct trace_array *tr,
3566 unsigned long ip, const char *fmt, ...)
3574 /* This is only allowed for created instances */
3575 if (tr == &global_trace)
3578 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3582 ret = trace_array_vprintk(tr, ip, fmt, ap);
3586 EXPORT_SYMBOL_GPL(trace_array_printk);
3589 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3590 * @tr: The trace array to initialize the buffers for
3592 * As trace_array_printk() only writes into instances, they are OK to
3593 * have in the kernel (unlike trace_printk()). This needs to be called
3594 * before trace_array_printk() can be used on a trace_array.
3596 int trace_array_init_printk(struct trace_array *tr)
3601 /* This is only allowed for created instances */
3602 if (tr == &global_trace)
3605 return alloc_percpu_trace_buffer();
3607 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3610 int trace_array_printk_buf(struct trace_buffer *buffer,
3611 unsigned long ip, const char *fmt, ...)
3616 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3620 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3626 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3628 return trace_array_vprintk(&global_trace, ip, fmt, args);
3630 EXPORT_SYMBOL_GPL(trace_vprintk);
3632 static void trace_iterator_increment(struct trace_iterator *iter)
3634 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3638 ring_buffer_iter_advance(buf_iter);
3641 static struct trace_entry *
3642 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3643 unsigned long *lost_events)
3645 struct ring_buffer_event *event;
3646 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3649 event = ring_buffer_iter_peek(buf_iter, ts);
3651 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3652 (unsigned long)-1 : 0;
3654 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3659 iter->ent_size = ring_buffer_event_length(event);
3660 return ring_buffer_event_data(event);
3666 static struct trace_entry *
3667 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3668 unsigned long *missing_events, u64 *ent_ts)
3670 struct trace_buffer *buffer = iter->array_buffer->buffer;
3671 struct trace_entry *ent, *next = NULL;
3672 unsigned long lost_events = 0, next_lost = 0;
3673 int cpu_file = iter->cpu_file;
3674 u64 next_ts = 0, ts;
3680 * If we are in a per_cpu trace file, don't bother by iterating over
3681 * all cpu and peek directly.
3683 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3684 if (ring_buffer_empty_cpu(buffer, cpu_file))
3686 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3688 *ent_cpu = cpu_file;
3693 for_each_tracing_cpu(cpu) {
3695 if (ring_buffer_empty_cpu(buffer, cpu))
3698 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3701 * Pick the entry with the smallest timestamp:
3703 if (ent && (!next || ts < next_ts)) {
3707 next_lost = lost_events;
3708 next_size = iter->ent_size;
3712 iter->ent_size = next_size;
3715 *ent_cpu = next_cpu;
3721 *missing_events = next_lost;
3726 #define STATIC_FMT_BUF_SIZE 128
3727 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3729 char *trace_iter_expand_format(struct trace_iterator *iter)
3734 * iter->tr is NULL when used with tp_printk, which makes
3735 * this get called where it is not safe to call krealloc().
3737 if (!iter->tr || iter->fmt == static_fmt_buf)
3740 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3743 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3750 /* Returns true if the string is safe to dereference from an event */
3751 static bool trace_safe_str(struct trace_iterator *iter, const char *str,
3754 unsigned long addr = (unsigned long)str;
3755 struct trace_event *trace_event;
3756 struct trace_event_call *event;
3758 /* Ignore strings with no length */
3762 /* OK if part of the event data */
3763 if ((addr >= (unsigned long)iter->ent) &&
3764 (addr < (unsigned long)iter->ent + iter->ent_size))
3767 /* OK if part of the temp seq buffer */
3768 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3769 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3772 /* Core rodata can not be freed */
3773 if (is_kernel_rodata(addr))
3776 if (trace_is_tracepoint_string(str))
3780 * Now this could be a module event, referencing core module
3781 * data, which is OK.
3786 trace_event = ftrace_find_event(iter->ent->type);
3790 event = container_of(trace_event, struct trace_event_call, event);
3791 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3794 /* Would rather have rodata, but this will suffice */
3795 if (within_module_core(addr, event->module))
3801 static const char *show_buffer(struct trace_seq *s)
3803 struct seq_buf *seq = &s->seq;
3805 seq_buf_terminate(seq);
3810 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3812 static int test_can_verify_check(const char *fmt, ...)
3819 * The verifier is dependent on vsnprintf() modifies the va_list
3820 * passed to it, where it is sent as a reference. Some architectures
3821 * (like x86_32) passes it by value, which means that vsnprintf()
3822 * does not modify the va_list passed to it, and the verifier
3823 * would then need to be able to understand all the values that
3824 * vsnprintf can use. If it is passed by value, then the verifier
3828 vsnprintf(buf, 16, "%d", ap);
3829 ret = va_arg(ap, int);
3835 static void test_can_verify(void)
3837 if (!test_can_verify_check("%d %d", 0, 1)) {
3838 pr_info("trace event string verifier disabled\n");
3839 static_branch_inc(&trace_no_verify);
3844 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3845 * @iter: The iterator that holds the seq buffer and the event being printed
3846 * @fmt: The format used to print the event
3847 * @ap: The va_list holding the data to print from @fmt.
3849 * This writes the data into the @iter->seq buffer using the data from
3850 * @fmt and @ap. If the format has a %s, then the source of the string
3851 * is examined to make sure it is safe to print, otherwise it will
3852 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3855 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3858 const char *p = fmt;
3862 if (WARN_ON_ONCE(!fmt))
3865 if (static_branch_unlikely(&trace_no_verify))
3868 /* Don't bother checking when doing a ftrace_dump() */
3869 if (iter->fmt == static_fmt_buf)
3878 /* We only care about %s and variants */
3879 for (i = 0; p[i]; i++) {
3880 if (i + 1 >= iter->fmt_size) {
3882 * If we can't expand the copy buffer,
3885 if (!trace_iter_expand_format(iter))
3889 if (p[i] == '\\' && p[i+1]) {
3894 /* Need to test cases like %08.*s */
3895 for (j = 1; p[i+j]; j++) {
3896 if (isdigit(p[i+j]) ||
3899 if (p[i+j] == '*') {
3911 /* If no %s found then just print normally */
3915 /* Copy up to the %s, and print that */
3916 strncpy(iter->fmt, p, i);
3917 iter->fmt[i] = '\0';
3918 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3921 * If iter->seq is full, the above call no longer guarantees
3922 * that ap is in sync with fmt processing, and further calls
3923 * to va_arg() can return wrong positional arguments.
3925 * Ensure that ap is no longer used in this case.
3927 if (iter->seq.full) {
3933 len = va_arg(ap, int);
3935 /* The ap now points to the string data of the %s */
3936 str = va_arg(ap, const char *);
3939 * If you hit this warning, it is likely that the
3940 * trace event in question used %s on a string that
3941 * was saved at the time of the event, but may not be
3942 * around when the trace is read. Use __string(),
3943 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3944 * instead. See samples/trace_events/trace-events-sample.h
3947 if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
3948 "fmt: '%s' current_buffer: '%s'",
3949 fmt, show_buffer(&iter->seq))) {
3952 /* Try to safely read the string */
3954 if (len + 1 > iter->fmt_size)
3955 len = iter->fmt_size - 1;
3958 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3962 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3966 trace_seq_printf(&iter->seq, "(0x%px)", str);
3968 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3970 str = "[UNSAFE-MEMORY]";
3971 strcpy(iter->fmt, "%s");
3973 strncpy(iter->fmt, p + i, j + 1);
3974 iter->fmt[j+1] = '\0';
3977 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3979 trace_seq_printf(&iter->seq, iter->fmt, str);
3985 trace_seq_vprintf(&iter->seq, p, ap);
3988 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3990 const char *p, *new_fmt;
3993 if (WARN_ON_ONCE(!fmt))
3996 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
4000 new_fmt = q = iter->fmt;
4002 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
4003 if (!trace_iter_expand_format(iter))
4006 q += iter->fmt - new_fmt;
4007 new_fmt = iter->fmt;
4012 /* Replace %p with %px */
4016 } else if (p[0] == 'p' && !isalnum(p[1])) {
4027 #define STATIC_TEMP_BUF_SIZE 128
4028 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
4030 /* Find the next real entry, without updating the iterator itself */
4031 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
4032 int *ent_cpu, u64 *ent_ts)
4034 /* __find_next_entry will reset ent_size */
4035 int ent_size = iter->ent_size;
4036 struct trace_entry *entry;
4039 * If called from ftrace_dump(), then the iter->temp buffer
4040 * will be the static_temp_buf and not created from kmalloc.
4041 * If the entry size is greater than the buffer, we can
4042 * not save it. Just return NULL in that case. This is only
4043 * used to add markers when two consecutive events' time
4044 * stamps have a large delta. See trace_print_lat_context()
4046 if (iter->temp == static_temp_buf &&
4047 STATIC_TEMP_BUF_SIZE < ent_size)
4051 * The __find_next_entry() may call peek_next_entry(), which may
4052 * call ring_buffer_peek() that may make the contents of iter->ent
4053 * undefined. Need to copy iter->ent now.
4055 if (iter->ent && iter->ent != iter->temp) {
4056 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
4057 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
4059 temp = kmalloc(iter->ent_size, GFP_KERNEL);
4064 iter->temp_size = iter->ent_size;
4066 memcpy(iter->temp, iter->ent, iter->ent_size);
4067 iter->ent = iter->temp;
4069 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
4070 /* Put back the original ent_size */
4071 iter->ent_size = ent_size;
4076 /* Find the next real entry, and increment the iterator to the next entry */
4077 void *trace_find_next_entry_inc(struct trace_iterator *iter)
4079 iter->ent = __find_next_entry(iter, &iter->cpu,
4080 &iter->lost_events, &iter->ts);
4083 trace_iterator_increment(iter);
4085 return iter->ent ? iter : NULL;
4088 static void trace_consume(struct trace_iterator *iter)
4090 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
4091 &iter->lost_events);
4094 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
4096 struct trace_iterator *iter = m->private;
4100 WARN_ON_ONCE(iter->leftover);
4104 /* can't go backwards */
4109 ent = trace_find_next_entry_inc(iter);
4113 while (ent && iter->idx < i)
4114 ent = trace_find_next_entry_inc(iter);
4121 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4123 struct ring_buffer_iter *buf_iter;
4124 unsigned long entries = 0;
4127 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4129 buf_iter = trace_buffer_iter(iter, cpu);
4133 ring_buffer_iter_reset(buf_iter);
4136 * We could have the case with the max latency tracers
4137 * that a reset never took place on a cpu. This is evident
4138 * by the timestamp being before the start of the buffer.
4140 while (ring_buffer_iter_peek(buf_iter, &ts)) {
4141 if (ts >= iter->array_buffer->time_start)
4144 ring_buffer_iter_advance(buf_iter);
4147 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4151 * The current tracer is copied to avoid a global locking
4154 static void *s_start(struct seq_file *m, loff_t *pos)
4156 struct trace_iterator *iter = m->private;
4157 struct trace_array *tr = iter->tr;
4158 int cpu_file = iter->cpu_file;
4163 mutex_lock(&trace_types_lock);
4164 if (unlikely(tr->current_trace != iter->trace)) {
4165 /* Close iter->trace before switching to the new current tracer */
4166 if (iter->trace->close)
4167 iter->trace->close(iter);
4168 iter->trace = tr->current_trace;
4169 /* Reopen the new current tracer */
4170 if (iter->trace->open)
4171 iter->trace->open(iter);
4173 mutex_unlock(&trace_types_lock);
4175 #ifdef CONFIG_TRACER_MAX_TRACE
4176 if (iter->snapshot && iter->trace->use_max_tr)
4177 return ERR_PTR(-EBUSY);
4180 if (*pos != iter->pos) {
4185 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4186 for_each_tracing_cpu(cpu)
4187 tracing_iter_reset(iter, cpu);
4189 tracing_iter_reset(iter, cpu_file);
4192 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4197 * If we overflowed the seq_file before, then we want
4198 * to just reuse the trace_seq buffer again.
4204 p = s_next(m, p, &l);
4208 trace_event_read_lock();
4209 trace_access_lock(cpu_file);
4213 static void s_stop(struct seq_file *m, void *p)
4215 struct trace_iterator *iter = m->private;
4217 #ifdef CONFIG_TRACER_MAX_TRACE
4218 if (iter->snapshot && iter->trace->use_max_tr)
4222 trace_access_unlock(iter->cpu_file);
4223 trace_event_read_unlock();
4227 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4228 unsigned long *entries, int cpu)
4230 unsigned long count;
4232 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4234 * If this buffer has skipped entries, then we hold all
4235 * entries for the trace and we need to ignore the
4236 * ones before the time stamp.
4238 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4239 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4240 /* total is the same as the entries */
4244 ring_buffer_overrun_cpu(buf->buffer, cpu);
4249 get_total_entries(struct array_buffer *buf,
4250 unsigned long *total, unsigned long *entries)
4258 for_each_tracing_cpu(cpu) {
4259 get_total_entries_cpu(buf, &t, &e, cpu);
4265 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4267 unsigned long total, entries;
4272 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4277 unsigned long trace_total_entries(struct trace_array *tr)
4279 unsigned long total, entries;
4284 get_total_entries(&tr->array_buffer, &total, &entries);
4289 static void print_lat_help_header(struct seq_file *m)
4291 seq_puts(m, "# _------=> CPU# \n"
4292 "# / _-----=> irqs-off/BH-disabled\n"
4293 "# | / _----=> need-resched \n"
4294 "# || / _---=> hardirq/softirq \n"
4295 "# ||| / _--=> preempt-depth \n"
4296 "# |||| / _-=> migrate-disable \n"
4297 "# ||||| / delay \n"
4298 "# cmd pid |||||| time | caller \n"
4299 "# \\ / |||||| \\ | / \n");
4302 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4304 unsigned long total;
4305 unsigned long entries;
4307 get_total_entries(buf, &total, &entries);
4308 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4309 entries, total, num_online_cpus());
4313 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4316 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4318 print_event_info(buf, m);
4320 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4321 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4324 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4327 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4328 static const char space[] = " ";
4329 int prec = tgid ? 12 : 2;
4331 print_event_info(buf, m);
4333 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
4334 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4335 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4336 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4337 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4338 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4339 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4340 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
4344 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4346 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4347 struct array_buffer *buf = iter->array_buffer;
4348 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4349 struct tracer *type = iter->trace;
4350 unsigned long entries;
4351 unsigned long total;
4352 const char *name = type->name;
4354 get_total_entries(buf, &total, &entries);
4356 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4358 seq_puts(m, "# -----------------------------------"
4359 "---------------------------------\n");
4360 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4361 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4362 nsecs_to_usecs(data->saved_latency),
4366 preempt_model_none() ? "server" :
4367 preempt_model_voluntary() ? "desktop" :
4368 preempt_model_full() ? "preempt" :
4369 preempt_model_rt() ? "preempt_rt" :
4371 /* These are reserved for later use */
4374 seq_printf(m, " #P:%d)\n", num_online_cpus());
4378 seq_puts(m, "# -----------------\n");
4379 seq_printf(m, "# | task: %.16s-%d "
4380 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4381 data->comm, data->pid,
4382 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4383 data->policy, data->rt_priority);
4384 seq_puts(m, "# -----------------\n");
4386 if (data->critical_start) {
4387 seq_puts(m, "# => started at: ");
4388 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4389 trace_print_seq(m, &iter->seq);
4390 seq_puts(m, "\n# => ended at: ");
4391 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4392 trace_print_seq(m, &iter->seq);
4393 seq_puts(m, "\n#\n");
4399 static void test_cpu_buff_start(struct trace_iterator *iter)
4401 struct trace_seq *s = &iter->seq;
4402 struct trace_array *tr = iter->tr;
4404 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4407 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4410 if (cpumask_available(iter->started) &&
4411 cpumask_test_cpu(iter->cpu, iter->started))
4414 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4417 if (cpumask_available(iter->started))
4418 cpumask_set_cpu(iter->cpu, iter->started);
4420 /* Don't print started cpu buffer for the first entry of the trace */
4422 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4426 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4428 struct trace_array *tr = iter->tr;
4429 struct trace_seq *s = &iter->seq;
4430 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4431 struct trace_entry *entry;
4432 struct trace_event *event;
4436 test_cpu_buff_start(iter);
4438 event = ftrace_find_event(entry->type);
4440 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4441 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4442 trace_print_lat_context(iter);
4444 trace_print_context(iter);
4447 if (trace_seq_has_overflowed(s))
4448 return TRACE_TYPE_PARTIAL_LINE;
4451 if (tr->trace_flags & TRACE_ITER_FIELDS)
4452 return print_event_fields(iter, event);
4453 return event->funcs->trace(iter, sym_flags, event);
4456 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4458 return trace_handle_return(s);
4461 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4463 struct trace_array *tr = iter->tr;
4464 struct trace_seq *s = &iter->seq;
4465 struct trace_entry *entry;
4466 struct trace_event *event;
4470 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4471 trace_seq_printf(s, "%d %d %llu ",
4472 entry->pid, iter->cpu, iter->ts);
4474 if (trace_seq_has_overflowed(s))
4475 return TRACE_TYPE_PARTIAL_LINE;
4477 event = ftrace_find_event(entry->type);
4479 return event->funcs->raw(iter, 0, event);
4481 trace_seq_printf(s, "%d ?\n", entry->type);
4483 return trace_handle_return(s);
4486 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4488 struct trace_array *tr = iter->tr;
4489 struct trace_seq *s = &iter->seq;
4490 unsigned char newline = '\n';
4491 struct trace_entry *entry;
4492 struct trace_event *event;
4496 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4497 SEQ_PUT_HEX_FIELD(s, entry->pid);
4498 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4499 SEQ_PUT_HEX_FIELD(s, iter->ts);
4500 if (trace_seq_has_overflowed(s))
4501 return TRACE_TYPE_PARTIAL_LINE;
4504 event = ftrace_find_event(entry->type);
4506 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4507 if (ret != TRACE_TYPE_HANDLED)
4511 SEQ_PUT_FIELD(s, newline);
4513 return trace_handle_return(s);
4516 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4518 struct trace_array *tr = iter->tr;
4519 struct trace_seq *s = &iter->seq;
4520 struct trace_entry *entry;
4521 struct trace_event *event;
4525 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4526 SEQ_PUT_FIELD(s, entry->pid);
4527 SEQ_PUT_FIELD(s, iter->cpu);
4528 SEQ_PUT_FIELD(s, iter->ts);
4529 if (trace_seq_has_overflowed(s))
4530 return TRACE_TYPE_PARTIAL_LINE;
4533 event = ftrace_find_event(entry->type);
4534 return event ? event->funcs->binary(iter, 0, event) :
4538 int trace_empty(struct trace_iterator *iter)
4540 struct ring_buffer_iter *buf_iter;
4543 /* If we are looking at one CPU buffer, only check that one */
4544 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4545 cpu = iter->cpu_file;
4546 buf_iter = trace_buffer_iter(iter, cpu);
4548 if (!ring_buffer_iter_empty(buf_iter))
4551 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4557 for_each_tracing_cpu(cpu) {
4558 buf_iter = trace_buffer_iter(iter, cpu);
4560 if (!ring_buffer_iter_empty(buf_iter))
4563 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4571 /* Called with trace_event_read_lock() held. */
4572 enum print_line_t print_trace_line(struct trace_iterator *iter)
4574 struct trace_array *tr = iter->tr;
4575 unsigned long trace_flags = tr->trace_flags;
4576 enum print_line_t ret;
4578 if (iter->lost_events) {
4579 if (iter->lost_events == (unsigned long)-1)
4580 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4583 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4584 iter->cpu, iter->lost_events);
4585 if (trace_seq_has_overflowed(&iter->seq))
4586 return TRACE_TYPE_PARTIAL_LINE;
4589 if (iter->trace && iter->trace->print_line) {
4590 ret = iter->trace->print_line(iter);
4591 if (ret != TRACE_TYPE_UNHANDLED)
4595 if (iter->ent->type == TRACE_BPUTS &&
4596 trace_flags & TRACE_ITER_PRINTK &&
4597 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4598 return trace_print_bputs_msg_only(iter);
4600 if (iter->ent->type == TRACE_BPRINT &&
4601 trace_flags & TRACE_ITER_PRINTK &&
4602 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4603 return trace_print_bprintk_msg_only(iter);
4605 if (iter->ent->type == TRACE_PRINT &&
4606 trace_flags & TRACE_ITER_PRINTK &&
4607 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4608 return trace_print_printk_msg_only(iter);
4610 if (trace_flags & TRACE_ITER_BIN)
4611 return print_bin_fmt(iter);
4613 if (trace_flags & TRACE_ITER_HEX)
4614 return print_hex_fmt(iter);
4616 if (trace_flags & TRACE_ITER_RAW)
4617 return print_raw_fmt(iter);
4619 return print_trace_fmt(iter);
4622 void trace_latency_header(struct seq_file *m)
4624 struct trace_iterator *iter = m->private;
4625 struct trace_array *tr = iter->tr;
4627 /* print nothing if the buffers are empty */
4628 if (trace_empty(iter))
4631 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4632 print_trace_header(m, iter);
4634 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4635 print_lat_help_header(m);
4638 void trace_default_header(struct seq_file *m)
4640 struct trace_iterator *iter = m->private;
4641 struct trace_array *tr = iter->tr;
4642 unsigned long trace_flags = tr->trace_flags;
4644 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4647 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4648 /* print nothing if the buffers are empty */
4649 if (trace_empty(iter))
4651 print_trace_header(m, iter);
4652 if (!(trace_flags & TRACE_ITER_VERBOSE))
4653 print_lat_help_header(m);
4655 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4656 if (trace_flags & TRACE_ITER_IRQ_INFO)
4657 print_func_help_header_irq(iter->array_buffer,
4660 print_func_help_header(iter->array_buffer, m,
4666 static void test_ftrace_alive(struct seq_file *m)
4668 if (!ftrace_is_dead())
4670 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4671 "# MAY BE MISSING FUNCTION EVENTS\n");
4674 #ifdef CONFIG_TRACER_MAX_TRACE
4675 static void show_snapshot_main_help(struct seq_file *m)
4677 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4678 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4679 "# Takes a snapshot of the main buffer.\n"
4680 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4681 "# (Doesn't have to be '2' works with any number that\n"
4682 "# is not a '0' or '1')\n");
4685 static void show_snapshot_percpu_help(struct seq_file *m)
4687 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4688 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4689 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4690 "# Takes a snapshot of the main buffer for this cpu.\n");
4692 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4693 "# Must use main snapshot file to allocate.\n");
4695 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4696 "# (Doesn't have to be '2' works with any number that\n"
4697 "# is not a '0' or '1')\n");
4700 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4702 if (iter->tr->allocated_snapshot)
4703 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4705 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4707 seq_puts(m, "# Snapshot commands:\n");
4708 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4709 show_snapshot_main_help(m);
4711 show_snapshot_percpu_help(m);
4714 /* Should never be called */
4715 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4718 static int s_show(struct seq_file *m, void *v)
4720 struct trace_iterator *iter = v;
4723 if (iter->ent == NULL) {
4725 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4727 test_ftrace_alive(m);
4729 if (iter->snapshot && trace_empty(iter))
4730 print_snapshot_help(m, iter);
4731 else if (iter->trace && iter->trace->print_header)
4732 iter->trace->print_header(m);
4734 trace_default_header(m);
4736 } else if (iter->leftover) {
4738 * If we filled the seq_file buffer earlier, we
4739 * want to just show it now.
4741 ret = trace_print_seq(m, &iter->seq);
4743 /* ret should this time be zero, but you never know */
4744 iter->leftover = ret;
4747 ret = print_trace_line(iter);
4748 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4750 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
4752 ret = trace_print_seq(m, &iter->seq);
4754 * If we overflow the seq_file buffer, then it will
4755 * ask us for this data again at start up.
4757 * ret is 0 if seq_file write succeeded.
4760 iter->leftover = ret;
4767 * Should be used after trace_array_get(), trace_types_lock
4768 * ensures that i_cdev was already initialized.
4770 static inline int tracing_get_cpu(struct inode *inode)
4772 if (inode->i_cdev) /* See trace_create_cpu_file() */
4773 return (long)inode->i_cdev - 1;
4774 return RING_BUFFER_ALL_CPUS;
4777 static const struct seq_operations tracer_seq_ops = {
4785 * Note, as iter itself can be allocated and freed in different
4786 * ways, this function is only used to free its content, and not
4787 * the iterator itself. The only requirement to all the allocations
4788 * is that it must zero all fields (kzalloc), as freeing works with
4789 * ethier allocated content or NULL.
4791 static void free_trace_iter_content(struct trace_iterator *iter)
4793 /* The fmt is either NULL, allocated or points to static_fmt_buf */
4794 if (iter->fmt != static_fmt_buf)
4798 kfree(iter->buffer_iter);
4799 mutex_destroy(&iter->mutex);
4800 free_cpumask_var(iter->started);
4803 static struct trace_iterator *
4804 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4806 struct trace_array *tr = inode->i_private;
4807 struct trace_iterator *iter;
4810 if (tracing_disabled)
4811 return ERR_PTR(-ENODEV);
4813 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4815 return ERR_PTR(-ENOMEM);
4817 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4819 if (!iter->buffer_iter)
4823 * trace_find_next_entry() may need to save off iter->ent.
4824 * It will place it into the iter->temp buffer. As most
4825 * events are less than 128, allocate a buffer of that size.
4826 * If one is greater, then trace_find_next_entry() will
4827 * allocate a new buffer to adjust for the bigger iter->ent.
4828 * It's not critical if it fails to get allocated here.
4830 iter->temp = kmalloc(128, GFP_KERNEL);
4832 iter->temp_size = 128;
4835 * trace_event_printf() may need to modify given format
4836 * string to replace %p with %px so that it shows real address
4837 * instead of hash value. However, that is only for the event
4838 * tracing, other tracer may not need. Defer the allocation
4839 * until it is needed.
4844 mutex_lock(&trace_types_lock);
4845 iter->trace = tr->current_trace;
4847 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4852 #ifdef CONFIG_TRACER_MAX_TRACE
4853 /* Currently only the top directory has a snapshot */
4854 if (tr->current_trace->print_max || snapshot)
4855 iter->array_buffer = &tr->max_buffer;
4858 iter->array_buffer = &tr->array_buffer;
4859 iter->snapshot = snapshot;
4861 iter->cpu_file = tracing_get_cpu(inode);
4862 mutex_init(&iter->mutex);
4864 /* Notify the tracer early; before we stop tracing. */
4865 if (iter->trace->open)
4866 iter->trace->open(iter);
4868 /* Annotate start of buffers if we had overruns */
4869 if (ring_buffer_overruns(iter->array_buffer->buffer))
4870 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4872 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4873 if (trace_clocks[tr->clock_id].in_ns)
4874 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4877 * If pause-on-trace is enabled, then stop the trace while
4878 * dumping, unless this is the "snapshot" file
4880 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4881 tracing_stop_tr(tr);
4883 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4884 for_each_tracing_cpu(cpu) {
4885 iter->buffer_iter[cpu] =
4886 ring_buffer_read_prepare(iter->array_buffer->buffer,
4889 ring_buffer_read_prepare_sync();
4890 for_each_tracing_cpu(cpu) {
4891 ring_buffer_read_start(iter->buffer_iter[cpu]);
4892 tracing_iter_reset(iter, cpu);
4895 cpu = iter->cpu_file;
4896 iter->buffer_iter[cpu] =
4897 ring_buffer_read_prepare(iter->array_buffer->buffer,
4899 ring_buffer_read_prepare_sync();
4900 ring_buffer_read_start(iter->buffer_iter[cpu]);
4901 tracing_iter_reset(iter, cpu);
4904 mutex_unlock(&trace_types_lock);
4909 mutex_unlock(&trace_types_lock);
4910 free_trace_iter_content(iter);
4912 seq_release_private(inode, file);
4913 return ERR_PTR(-ENOMEM);
4916 int tracing_open_generic(struct inode *inode, struct file *filp)
4920 ret = tracing_check_open_get_tr(NULL);
4924 filp->private_data = inode->i_private;
4928 bool tracing_is_disabled(void)
4930 return (tracing_disabled) ? true: false;
4934 * Open and update trace_array ref count.
4935 * Must have the current trace_array passed to it.
4937 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4939 struct trace_array *tr = inode->i_private;
4942 ret = tracing_check_open_get_tr(tr);
4946 filp->private_data = inode->i_private;
4952 * The private pointer of the inode is the trace_event_file.
4953 * Update the tr ref count associated to it.
4955 int tracing_open_file_tr(struct inode *inode, struct file *filp)
4957 struct trace_event_file *file = inode->i_private;
4960 ret = tracing_check_open_get_tr(file->tr);
4964 mutex_lock(&event_mutex);
4966 /* Fail if the file is marked for removal */
4967 if (file->flags & EVENT_FILE_FL_FREED) {
4968 trace_array_put(file->tr);
4971 event_file_get(file);
4974 mutex_unlock(&event_mutex);
4978 filp->private_data = inode->i_private;
4983 int tracing_release_file_tr(struct inode *inode, struct file *filp)
4985 struct trace_event_file *file = inode->i_private;
4987 trace_array_put(file->tr);
4988 event_file_put(file);
4993 int tracing_single_release_file_tr(struct inode *inode, struct file *filp)
4995 tracing_release_file_tr(inode, filp);
4996 return single_release(inode, filp);
4999 static int tracing_mark_open(struct inode *inode, struct file *filp)
5001 stream_open(inode, filp);
5002 return tracing_open_generic_tr(inode, filp);
5005 static int tracing_release(struct inode *inode, struct file *file)
5007 struct trace_array *tr = inode->i_private;
5008 struct seq_file *m = file->private_data;
5009 struct trace_iterator *iter;
5012 if (!(file->f_mode & FMODE_READ)) {
5013 trace_array_put(tr);
5017 /* Writes do not use seq_file */
5019 mutex_lock(&trace_types_lock);
5021 for_each_tracing_cpu(cpu) {
5022 if (iter->buffer_iter[cpu])
5023 ring_buffer_read_finish(iter->buffer_iter[cpu]);
5026 if (iter->trace && iter->trace->close)
5027 iter->trace->close(iter);
5029 if (!iter->snapshot && tr->stop_count)
5030 /* reenable tracing if it was previously enabled */
5031 tracing_start_tr(tr);
5033 __trace_array_put(tr);
5035 mutex_unlock(&trace_types_lock);
5037 free_trace_iter_content(iter);
5038 seq_release_private(inode, file);
5043 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
5045 struct trace_array *tr = inode->i_private;
5047 trace_array_put(tr);
5051 static int tracing_single_release_tr(struct inode *inode, struct file *file)
5053 struct trace_array *tr = inode->i_private;
5055 trace_array_put(tr);
5057 return single_release(inode, file);
5060 static int tracing_open(struct inode *inode, struct file *file)
5062 struct trace_array *tr = inode->i_private;
5063 struct trace_iterator *iter;
5066 ret = tracing_check_open_get_tr(tr);
5070 /* If this file was open for write, then erase contents */
5071 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
5072 int cpu = tracing_get_cpu(inode);
5073 struct array_buffer *trace_buf = &tr->array_buffer;
5075 #ifdef CONFIG_TRACER_MAX_TRACE
5076 if (tr->current_trace->print_max)
5077 trace_buf = &tr->max_buffer;
5080 if (cpu == RING_BUFFER_ALL_CPUS)
5081 tracing_reset_online_cpus(trace_buf);
5083 tracing_reset_cpu(trace_buf, cpu);
5086 if (file->f_mode & FMODE_READ) {
5087 iter = __tracing_open(inode, file, false);
5089 ret = PTR_ERR(iter);
5090 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5091 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5095 trace_array_put(tr);
5101 * Some tracers are not suitable for instance buffers.
5102 * A tracer is always available for the global array (toplevel)
5103 * or if it explicitly states that it is.
5106 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
5108 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
5111 /* Find the next tracer that this trace array may use */
5112 static struct tracer *
5113 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
5115 while (t && !trace_ok_for_array(t, tr))
5122 t_next(struct seq_file *m, void *v, loff_t *pos)
5124 struct trace_array *tr = m->private;
5125 struct tracer *t = v;
5130 t = get_tracer_for_array(tr, t->next);
5135 static void *t_start(struct seq_file *m, loff_t *pos)
5137 struct trace_array *tr = m->private;
5141 mutex_lock(&trace_types_lock);
5143 t = get_tracer_for_array(tr, trace_types);
5144 for (; t && l < *pos; t = t_next(m, t, &l))
5150 static void t_stop(struct seq_file *m, void *p)
5152 mutex_unlock(&trace_types_lock);
5155 static int t_show(struct seq_file *m, void *v)
5157 struct tracer *t = v;
5162 seq_puts(m, t->name);
5171 static const struct seq_operations show_traces_seq_ops = {
5178 static int show_traces_open(struct inode *inode, struct file *file)
5180 struct trace_array *tr = inode->i_private;
5184 ret = tracing_check_open_get_tr(tr);
5188 ret = seq_open(file, &show_traces_seq_ops);
5190 trace_array_put(tr);
5194 m = file->private_data;
5200 static int show_traces_release(struct inode *inode, struct file *file)
5202 struct trace_array *tr = inode->i_private;
5204 trace_array_put(tr);
5205 return seq_release(inode, file);
5209 tracing_write_stub(struct file *filp, const char __user *ubuf,
5210 size_t count, loff_t *ppos)
5215 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5219 if (file->f_mode & FMODE_READ)
5220 ret = seq_lseek(file, offset, whence);
5222 file->f_pos = ret = 0;
5227 static const struct file_operations tracing_fops = {
5228 .open = tracing_open,
5230 .read_iter = seq_read_iter,
5231 .splice_read = copy_splice_read,
5232 .write = tracing_write_stub,
5233 .llseek = tracing_lseek,
5234 .release = tracing_release,
5237 static const struct file_operations show_traces_fops = {
5238 .open = show_traces_open,
5240 .llseek = seq_lseek,
5241 .release = show_traces_release,
5245 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5246 size_t count, loff_t *ppos)
5248 struct trace_array *tr = file_inode(filp)->i_private;
5252 len = snprintf(NULL, 0, "%*pb\n",
5253 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5254 mask_str = kmalloc(len, GFP_KERNEL);
5258 len = snprintf(mask_str, len, "%*pb\n",
5259 cpumask_pr_args(tr->tracing_cpumask));
5264 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5272 int tracing_set_cpumask(struct trace_array *tr,
5273 cpumask_var_t tracing_cpumask_new)
5280 local_irq_disable();
5281 arch_spin_lock(&tr->max_lock);
5282 for_each_tracing_cpu(cpu) {
5284 * Increase/decrease the disabled counter if we are
5285 * about to flip a bit in the cpumask:
5287 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5288 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5289 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5290 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5291 #ifdef CONFIG_TRACER_MAX_TRACE
5292 ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
5295 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5296 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5297 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5298 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5299 #ifdef CONFIG_TRACER_MAX_TRACE
5300 ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
5304 arch_spin_unlock(&tr->max_lock);
5307 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5313 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5314 size_t count, loff_t *ppos)
5316 struct trace_array *tr = file_inode(filp)->i_private;
5317 cpumask_var_t tracing_cpumask_new;
5320 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5323 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5327 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5331 free_cpumask_var(tracing_cpumask_new);
5336 free_cpumask_var(tracing_cpumask_new);
5341 static const struct file_operations tracing_cpumask_fops = {
5342 .open = tracing_open_generic_tr,
5343 .read = tracing_cpumask_read,
5344 .write = tracing_cpumask_write,
5345 .release = tracing_release_generic_tr,
5346 .llseek = generic_file_llseek,
5349 static int tracing_trace_options_show(struct seq_file *m, void *v)
5351 struct tracer_opt *trace_opts;
5352 struct trace_array *tr = m->private;
5356 mutex_lock(&trace_types_lock);
5357 tracer_flags = tr->current_trace->flags->val;
5358 trace_opts = tr->current_trace->flags->opts;
5360 for (i = 0; trace_options[i]; i++) {
5361 if (tr->trace_flags & (1 << i))
5362 seq_printf(m, "%s\n", trace_options[i]);
5364 seq_printf(m, "no%s\n", trace_options[i]);
5367 for (i = 0; trace_opts[i].name; i++) {
5368 if (tracer_flags & trace_opts[i].bit)
5369 seq_printf(m, "%s\n", trace_opts[i].name);
5371 seq_printf(m, "no%s\n", trace_opts[i].name);
5373 mutex_unlock(&trace_types_lock);
5378 static int __set_tracer_option(struct trace_array *tr,
5379 struct tracer_flags *tracer_flags,
5380 struct tracer_opt *opts, int neg)
5382 struct tracer *trace = tracer_flags->trace;
5385 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5390 tracer_flags->val &= ~opts->bit;
5392 tracer_flags->val |= opts->bit;
5396 /* Try to assign a tracer specific option */
5397 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5399 struct tracer *trace = tr->current_trace;
5400 struct tracer_flags *tracer_flags = trace->flags;
5401 struct tracer_opt *opts = NULL;
5404 for (i = 0; tracer_flags->opts[i].name; i++) {
5405 opts = &tracer_flags->opts[i];
5407 if (strcmp(cmp, opts->name) == 0)
5408 return __set_tracer_option(tr, trace->flags, opts, neg);
5414 /* Some tracers require overwrite to stay enabled */
5415 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5417 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5423 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5427 if ((mask == TRACE_ITER_RECORD_TGID) ||
5428 (mask == TRACE_ITER_RECORD_CMD))
5429 lockdep_assert_held(&event_mutex);
5431 /* do nothing if flag is already set */
5432 if (!!(tr->trace_flags & mask) == !!enabled)
5435 /* Give the tracer a chance to approve the change */
5436 if (tr->current_trace->flag_changed)
5437 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5441 tr->trace_flags |= mask;
5443 tr->trace_flags &= ~mask;
5445 if (mask == TRACE_ITER_RECORD_CMD)
5446 trace_event_enable_cmd_record(enabled);
5448 if (mask == TRACE_ITER_RECORD_TGID) {
5450 tgid_map_max = pid_max;
5451 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5455 * Pairs with smp_load_acquire() in
5456 * trace_find_tgid_ptr() to ensure that if it observes
5457 * the tgid_map we just allocated then it also observes
5458 * the corresponding tgid_map_max value.
5460 smp_store_release(&tgid_map, map);
5463 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5467 trace_event_enable_tgid_record(enabled);
5470 if (mask == TRACE_ITER_EVENT_FORK)
5471 trace_event_follow_fork(tr, enabled);
5473 if (mask == TRACE_ITER_FUNC_FORK)
5474 ftrace_pid_follow_fork(tr, enabled);
5476 if (mask == TRACE_ITER_OVERWRITE) {
5477 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5478 #ifdef CONFIG_TRACER_MAX_TRACE
5479 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5483 if (mask == TRACE_ITER_PRINTK) {
5484 trace_printk_start_stop_comm(enabled);
5485 trace_printk_control(enabled);
5491 int trace_set_options(struct trace_array *tr, char *option)
5496 size_t orig_len = strlen(option);
5499 cmp = strstrip(option);
5501 len = str_has_prefix(cmp, "no");
5507 mutex_lock(&event_mutex);
5508 mutex_lock(&trace_types_lock);
5510 ret = match_string(trace_options, -1, cmp);
5511 /* If no option could be set, test the specific tracer options */
5513 ret = set_tracer_option(tr, cmp, neg);
5515 ret = set_tracer_flag(tr, 1 << ret, !neg);
5517 mutex_unlock(&trace_types_lock);
5518 mutex_unlock(&event_mutex);
5521 * If the first trailing whitespace is replaced with '\0' by strstrip,
5522 * turn it back into a space.
5524 if (orig_len > strlen(option))
5525 option[strlen(option)] = ' ';
5530 static void __init apply_trace_boot_options(void)
5532 char *buf = trace_boot_options_buf;
5536 option = strsep(&buf, ",");
5542 trace_set_options(&global_trace, option);
5544 /* Put back the comma to allow this to be called again */
5551 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5552 size_t cnt, loff_t *ppos)
5554 struct seq_file *m = filp->private_data;
5555 struct trace_array *tr = m->private;
5559 if (cnt >= sizeof(buf))
5562 if (copy_from_user(buf, ubuf, cnt))
5567 ret = trace_set_options(tr, buf);
5576 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5578 struct trace_array *tr = inode->i_private;
5581 ret = tracing_check_open_get_tr(tr);
5585 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5587 trace_array_put(tr);
5592 static const struct file_operations tracing_iter_fops = {
5593 .open = tracing_trace_options_open,
5595 .llseek = seq_lseek,
5596 .release = tracing_single_release_tr,
5597 .write = tracing_trace_options_write,
5600 static const char readme_msg[] =
5601 "tracing mini-HOWTO:\n\n"
5602 "# echo 0 > tracing_on : quick way to disable tracing\n"
5603 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5604 " Important files:\n"
5605 " trace\t\t\t- The static contents of the buffer\n"
5606 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5607 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5608 " current_tracer\t- function and latency tracers\n"
5609 " available_tracers\t- list of configured tracers for current_tracer\n"
5610 " error_log\t- error log for failed commands (that support it)\n"
5611 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5612 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5613 " trace_clock\t\t- change the clock used to order events\n"
5614 " local: Per cpu clock but may not be synced across CPUs\n"
5615 " global: Synced across CPUs but slows tracing down.\n"
5616 " counter: Not a clock, but just an increment\n"
5617 " uptime: Jiffy counter from time of boot\n"
5618 " perf: Same clock that perf events use\n"
5619 #ifdef CONFIG_X86_64
5620 " x86-tsc: TSC cycle counter\n"
5622 "\n timestamp_mode\t- view the mode used to timestamp events\n"
5623 " delta: Delta difference against a buffer-wide timestamp\n"
5624 " absolute: Absolute (standalone) timestamp\n"
5625 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5626 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5627 " tracing_cpumask\t- Limit which CPUs to trace\n"
5628 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5629 "\t\t\t Remove sub-buffer with rmdir\n"
5630 " trace_options\t\t- Set format or modify how tracing happens\n"
5631 "\t\t\t Disable an option by prefixing 'no' to the\n"
5632 "\t\t\t option name\n"
5633 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5634 #ifdef CONFIG_DYNAMIC_FTRACE
5635 "\n available_filter_functions - list of functions that can be filtered on\n"
5636 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5637 "\t\t\t functions\n"
5638 "\t accepts: func_full_name or glob-matching-pattern\n"
5639 "\t modules: Can select a group via module\n"
5640 "\t Format: :mod:<module-name>\n"
5641 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5642 "\t triggers: a command to perform when function is hit\n"
5643 "\t Format: <function>:<trigger>[:count]\n"
5644 "\t trigger: traceon, traceoff\n"
5645 "\t\t enable_event:<system>:<event>\n"
5646 "\t\t disable_event:<system>:<event>\n"
5647 #ifdef CONFIG_STACKTRACE
5650 #ifdef CONFIG_TRACER_SNAPSHOT
5655 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5656 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5657 "\t The first one will disable tracing every time do_fault is hit\n"
5658 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5659 "\t The first time do trap is hit and it disables tracing, the\n"
5660 "\t counter will decrement to 2. If tracing is already disabled,\n"
5661 "\t the counter will not decrement. It only decrements when the\n"
5662 "\t trigger did work\n"
5663 "\t To remove trigger without count:\n"
5664 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5665 "\t To remove trigger with a count:\n"
5666 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5667 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5668 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5669 "\t modules: Can select a group via module command :mod:\n"
5670 "\t Does not accept triggers\n"
5671 #endif /* CONFIG_DYNAMIC_FTRACE */
5672 #ifdef CONFIG_FUNCTION_TRACER
5673 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5675 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5678 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5679 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5680 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5681 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5683 #ifdef CONFIG_TRACER_SNAPSHOT
5684 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5685 "\t\t\t snapshot buffer. Read the contents for more\n"
5686 "\t\t\t information\n"
5688 #ifdef CONFIG_STACK_TRACER
5689 " stack_trace\t\t- Shows the max stack trace when active\n"
5690 " stack_max_size\t- Shows current max stack size that was traced\n"
5691 "\t\t\t Write into this file to reset the max size (trigger a\n"
5692 "\t\t\t new trace)\n"
5693 #ifdef CONFIG_DYNAMIC_FTRACE
5694 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5697 #endif /* CONFIG_STACK_TRACER */
5698 #ifdef CONFIG_DYNAMIC_EVENTS
5699 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5700 "\t\t\t Write into this file to define/undefine new trace events.\n"
5702 #ifdef CONFIG_KPROBE_EVENTS
5703 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5704 "\t\t\t Write into this file to define/undefine new trace events.\n"
5706 #ifdef CONFIG_UPROBE_EVENTS
5707 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5708 "\t\t\t Write into this file to define/undefine new trace events.\n"
5710 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \
5711 defined(CONFIG_FPROBE_EVENTS)
5712 "\t accepts: event-definitions (one definition per line)\n"
5713 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5714 "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n"
5715 "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
5717 #ifdef CONFIG_FPROBE_EVENTS
5718 "\t f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
5719 "\t t[:[<group>/][<event>]] <tracepoint> [<args>]\n"
5721 #ifdef CONFIG_HIST_TRIGGERS
5722 "\t s:[synthetic/]<event> <field> [<field>]\n"
5724 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
5725 "\t -:[<group>/][<event>]\n"
5726 #ifdef CONFIG_KPROBE_EVENTS
5727 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5728 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5730 #ifdef CONFIG_UPROBE_EVENTS
5731 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5733 "\t args: <name>=fetcharg[:type]\n"
5734 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5735 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5736 #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS
5737 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5738 "\t <argname>[->field[->field|.field...]],\n"
5740 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5743 "\t $stack<index>, $stack, $retval, $comm,\n"
5745 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5746 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n"
5747 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5748 "\t symstr, <type>\\[<array-size>\\]\n"
5749 #ifdef CONFIG_HIST_TRIGGERS
5750 "\t field: <stype> <name>;\n"
5751 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5752 "\t [unsigned] char/int/long\n"
5754 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5755 "\t of the <attached-group>/<attached-event>.\n"
5757 " events/\t\t- Directory containing all trace event subsystems:\n"
5758 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5759 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5760 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5762 " filter\t\t- If set, only events passing filter are traced\n"
5763 " events/<system>/<event>/\t- Directory containing control files for\n"
5765 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5766 " filter\t\t- If set, only events passing filter are traced\n"
5767 " trigger\t\t- If set, a command to perform when event is hit\n"
5768 "\t Format: <trigger>[:count][if <filter>]\n"
5769 "\t trigger: traceon, traceoff\n"
5770 "\t enable_event:<system>:<event>\n"
5771 "\t disable_event:<system>:<event>\n"
5772 #ifdef CONFIG_HIST_TRIGGERS
5773 "\t enable_hist:<system>:<event>\n"
5774 "\t disable_hist:<system>:<event>\n"
5776 #ifdef CONFIG_STACKTRACE
5779 #ifdef CONFIG_TRACER_SNAPSHOT
5782 #ifdef CONFIG_HIST_TRIGGERS
5783 "\t\t hist (see below)\n"
5785 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5786 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5787 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5788 "\t events/block/block_unplug/trigger\n"
5789 "\t The first disables tracing every time block_unplug is hit.\n"
5790 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5791 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5792 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5793 "\t Like function triggers, the counter is only decremented if it\n"
5794 "\t enabled or disabled tracing.\n"
5795 "\t To remove a trigger without a count:\n"
5796 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5797 "\t To remove a trigger with a count:\n"
5798 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5799 "\t Filters can be ignored when removing a trigger.\n"
5800 #ifdef CONFIG_HIST_TRIGGERS
5801 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5802 "\t Format: hist:keys=<field1[,field2,...]>\n"
5803 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5804 "\t [:values=<field1[,field2,...]>]\n"
5805 "\t [:sort=<field1[,field2,...]>]\n"
5806 "\t [:size=#entries]\n"
5807 "\t [:pause][:continue][:clear]\n"
5808 "\t [:name=histname1]\n"
5809 "\t [:nohitcount]\n"
5810 "\t [:<handler>.<action>]\n"
5811 "\t [if <filter>]\n\n"
5812 "\t Note, special fields can be used as well:\n"
5813 "\t common_timestamp - to record current timestamp\n"
5814 "\t common_cpu - to record the CPU the event happened on\n"
5816 "\t A hist trigger variable can be:\n"
5817 "\t - a reference to a field e.g. x=current_timestamp,\n"
5818 "\t - a reference to another variable e.g. y=$x,\n"
5819 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5820 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5822 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5823 "\t multiplication(*) and division(/) operators. An operand can be either a\n"
5824 "\t variable reference, field or numeric literal.\n"
5826 "\t When a matching event is hit, an entry is added to a hash\n"
5827 "\t table using the key(s) and value(s) named, and the value of a\n"
5828 "\t sum called 'hitcount' is incremented. Keys and values\n"
5829 "\t correspond to fields in the event's format description. Keys\n"
5830 "\t can be any field, or the special string 'common_stacktrace'.\n"
5831 "\t Compound keys consisting of up to two fields can be specified\n"
5832 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5833 "\t fields. Sort keys consisting of up to two fields can be\n"
5834 "\t specified using the 'sort' keyword. The sort direction can\n"
5835 "\t be modified by appending '.descending' or '.ascending' to a\n"
5836 "\t sort field. The 'size' parameter can be used to specify more\n"
5837 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5838 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5839 "\t its histogram data will be shared with other triggers of the\n"
5840 "\t same name, and trigger hits will update this common data.\n\n"
5841 "\t Reading the 'hist' file for the event will dump the hash\n"
5842 "\t table in its entirety to stdout. If there are multiple hist\n"
5843 "\t triggers attached to an event, there will be a table for each\n"
5844 "\t trigger in the output. The table displayed for a named\n"
5845 "\t trigger will be the same as any other instance having the\n"
5846 "\t same name. The default format used to display a given field\n"
5847 "\t can be modified by appending any of the following modifiers\n"
5848 "\t to the field name, as applicable:\n\n"
5849 "\t .hex display a number as a hex value\n"
5850 "\t .sym display an address as a symbol\n"
5851 "\t .sym-offset display an address as a symbol and offset\n"
5852 "\t .execname display a common_pid as a program name\n"
5853 "\t .syscall display a syscall id as a syscall name\n"
5854 "\t .log2 display log2 value rather than raw number\n"
5855 "\t .buckets=size display values in groups of size rather than raw number\n"
5856 "\t .usecs display a common_timestamp in microseconds\n"
5857 "\t .percent display a number of percentage value\n"
5858 "\t .graph display a bar-graph of a value\n\n"
5859 "\t The 'pause' parameter can be used to pause an existing hist\n"
5860 "\t trigger or to start a hist trigger but not log any events\n"
5861 "\t until told to do so. 'continue' can be used to start or\n"
5862 "\t restart a paused hist trigger.\n\n"
5863 "\t The 'clear' parameter will clear the contents of a running\n"
5864 "\t hist trigger and leave its current paused/active state\n"
5866 "\t The 'nohitcount' (or NOHC) parameter will suppress display of\n"
5867 "\t raw hitcount in the histogram.\n\n"
5868 "\t The enable_hist and disable_hist triggers can be used to\n"
5869 "\t have one event conditionally start and stop another event's\n"
5870 "\t already-attached hist trigger. The syntax is analogous to\n"
5871 "\t the enable_event and disable_event triggers.\n\n"
5872 "\t Hist trigger handlers and actions are executed whenever a\n"
5873 "\t a histogram entry is added or updated. They take the form:\n\n"
5874 "\t <handler>.<action>\n\n"
5875 "\t The available handlers are:\n\n"
5876 "\t onmatch(matching.event) - invoke on addition or update\n"
5877 "\t onmax(var) - invoke if var exceeds current max\n"
5878 "\t onchange(var) - invoke action if var changes\n\n"
5879 "\t The available actions are:\n\n"
5880 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5881 "\t save(field,...) - save current event fields\n"
5882 #ifdef CONFIG_TRACER_SNAPSHOT
5883 "\t snapshot() - snapshot the trace buffer\n\n"
5885 #ifdef CONFIG_SYNTH_EVENTS
5886 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5887 "\t Write into this file to define/undefine new synthetic events.\n"
5888 "\t example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n"
5894 tracing_readme_read(struct file *filp, char __user *ubuf,
5895 size_t cnt, loff_t *ppos)
5897 return simple_read_from_buffer(ubuf, cnt, ppos,
5898 readme_msg, strlen(readme_msg));
5901 static const struct file_operations tracing_readme_fops = {
5902 .open = tracing_open_generic,
5903 .read = tracing_readme_read,
5904 .llseek = generic_file_llseek,
5907 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5911 return trace_find_tgid_ptr(pid);
5914 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5918 return trace_find_tgid_ptr(pid);
5921 static void saved_tgids_stop(struct seq_file *m, void *v)
5925 static int saved_tgids_show(struct seq_file *m, void *v)
5927 int *entry = (int *)v;
5928 int pid = entry - tgid_map;
5934 seq_printf(m, "%d %d\n", pid, tgid);
5938 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5939 .start = saved_tgids_start,
5940 .stop = saved_tgids_stop,
5941 .next = saved_tgids_next,
5942 .show = saved_tgids_show,
5945 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5949 ret = tracing_check_open_get_tr(NULL);
5953 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5957 static const struct file_operations tracing_saved_tgids_fops = {
5958 .open = tracing_saved_tgids_open,
5960 .llseek = seq_lseek,
5961 .release = seq_release,
5964 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5966 unsigned int *ptr = v;
5968 if (*pos || m->count)
5973 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5975 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5984 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5990 arch_spin_lock(&trace_cmdline_lock);
5992 v = &savedcmd->map_cmdline_to_pid[0];
5994 v = saved_cmdlines_next(m, v, &l);
6002 static void saved_cmdlines_stop(struct seq_file *m, void *v)
6004 arch_spin_unlock(&trace_cmdline_lock);
6008 static int saved_cmdlines_show(struct seq_file *m, void *v)
6010 char buf[TASK_COMM_LEN];
6011 unsigned int *pid = v;
6013 __trace_find_cmdline(*pid, buf);
6014 seq_printf(m, "%d %s\n", *pid, buf);
6018 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
6019 .start = saved_cmdlines_start,
6020 .next = saved_cmdlines_next,
6021 .stop = saved_cmdlines_stop,
6022 .show = saved_cmdlines_show,
6025 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
6029 ret = tracing_check_open_get_tr(NULL);
6033 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
6036 static const struct file_operations tracing_saved_cmdlines_fops = {
6037 .open = tracing_saved_cmdlines_open,
6039 .llseek = seq_lseek,
6040 .release = seq_release,
6044 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
6045 size_t cnt, loff_t *ppos)
6051 arch_spin_lock(&trace_cmdline_lock);
6052 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
6053 arch_spin_unlock(&trace_cmdline_lock);
6056 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6059 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
6061 kfree(s->saved_cmdlines);
6062 kfree(s->map_cmdline_to_pid);
6066 static int tracing_resize_saved_cmdlines(unsigned int val)
6068 struct saved_cmdlines_buffer *s, *savedcmd_temp;
6070 s = kmalloc(sizeof(*s), GFP_KERNEL);
6074 if (allocate_cmdlines_buffer(val, s) < 0) {
6080 arch_spin_lock(&trace_cmdline_lock);
6081 savedcmd_temp = savedcmd;
6083 arch_spin_unlock(&trace_cmdline_lock);
6085 free_saved_cmdlines_buffer(savedcmd_temp);
6091 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
6092 size_t cnt, loff_t *ppos)
6097 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6101 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
6102 if (!val || val > PID_MAX_DEFAULT)
6105 ret = tracing_resize_saved_cmdlines((unsigned int)val);
6114 static const struct file_operations tracing_saved_cmdlines_size_fops = {
6115 .open = tracing_open_generic,
6116 .read = tracing_saved_cmdlines_size_read,
6117 .write = tracing_saved_cmdlines_size_write,
6120 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
6121 static union trace_eval_map_item *
6122 update_eval_map(union trace_eval_map_item *ptr)
6124 if (!ptr->map.eval_string) {
6125 if (ptr->tail.next) {
6126 ptr = ptr->tail.next;
6127 /* Set ptr to the next real item (skip head) */
6135 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
6137 union trace_eval_map_item *ptr = v;
6140 * Paranoid! If ptr points to end, we don't want to increment past it.
6141 * This really should never happen.
6144 ptr = update_eval_map(ptr);
6145 if (WARN_ON_ONCE(!ptr))
6149 ptr = update_eval_map(ptr);
6154 static void *eval_map_start(struct seq_file *m, loff_t *pos)
6156 union trace_eval_map_item *v;
6159 mutex_lock(&trace_eval_mutex);
6161 v = trace_eval_maps;
6165 while (v && l < *pos) {
6166 v = eval_map_next(m, v, &l);
6172 static void eval_map_stop(struct seq_file *m, void *v)
6174 mutex_unlock(&trace_eval_mutex);
6177 static int eval_map_show(struct seq_file *m, void *v)
6179 union trace_eval_map_item *ptr = v;
6181 seq_printf(m, "%s %ld (%s)\n",
6182 ptr->map.eval_string, ptr->map.eval_value,
6188 static const struct seq_operations tracing_eval_map_seq_ops = {
6189 .start = eval_map_start,
6190 .next = eval_map_next,
6191 .stop = eval_map_stop,
6192 .show = eval_map_show,
6195 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
6199 ret = tracing_check_open_get_tr(NULL);
6203 return seq_open(filp, &tracing_eval_map_seq_ops);
6206 static const struct file_operations tracing_eval_map_fops = {
6207 .open = tracing_eval_map_open,
6209 .llseek = seq_lseek,
6210 .release = seq_release,
6213 static inline union trace_eval_map_item *
6214 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
6216 /* Return tail of array given the head */
6217 return ptr + ptr->head.length + 1;
6221 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
6224 struct trace_eval_map **stop;
6225 struct trace_eval_map **map;
6226 union trace_eval_map_item *map_array;
6227 union trace_eval_map_item *ptr;
6232 * The trace_eval_maps contains the map plus a head and tail item,
6233 * where the head holds the module and length of array, and the
6234 * tail holds a pointer to the next list.
6236 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6238 pr_warn("Unable to allocate trace eval mapping\n");
6242 mutex_lock(&trace_eval_mutex);
6244 if (!trace_eval_maps)
6245 trace_eval_maps = map_array;
6247 ptr = trace_eval_maps;
6249 ptr = trace_eval_jmp_to_tail(ptr);
6250 if (!ptr->tail.next)
6252 ptr = ptr->tail.next;
6255 ptr->tail.next = map_array;
6257 map_array->head.mod = mod;
6258 map_array->head.length = len;
6261 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6262 map_array->map = **map;
6265 memset(map_array, 0, sizeof(*map_array));
6267 mutex_unlock(&trace_eval_mutex);
6270 static void trace_create_eval_file(struct dentry *d_tracer)
6272 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
6273 NULL, &tracing_eval_map_fops);
6276 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
6277 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6278 static inline void trace_insert_eval_map_file(struct module *mod,
6279 struct trace_eval_map **start, int len) { }
6280 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6282 static void trace_insert_eval_map(struct module *mod,
6283 struct trace_eval_map **start, int len)
6285 struct trace_eval_map **map;
6292 trace_event_eval_update(map, len);
6294 trace_insert_eval_map_file(mod, start, len);
6298 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6299 size_t cnt, loff_t *ppos)
6301 struct trace_array *tr = filp->private_data;
6302 char buf[MAX_TRACER_SIZE+2];
6305 mutex_lock(&trace_types_lock);
6306 r = sprintf(buf, "%s\n", tr->current_trace->name);
6307 mutex_unlock(&trace_types_lock);
6309 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6312 int tracer_init(struct tracer *t, struct trace_array *tr)
6314 tracing_reset_online_cpus(&tr->array_buffer);
6318 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6322 for_each_tracing_cpu(cpu)
6323 per_cpu_ptr(buf->data, cpu)->entries = val;
6326 static void update_buffer_entries(struct array_buffer *buf, int cpu)
6328 if (cpu == RING_BUFFER_ALL_CPUS) {
6329 set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
6331 per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
6335 #ifdef CONFIG_TRACER_MAX_TRACE
6336 /* resize @tr's buffer to the size of @size_tr's entries */
6337 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6338 struct array_buffer *size_buf, int cpu_id)
6342 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6343 for_each_tracing_cpu(cpu) {
6344 ret = ring_buffer_resize(trace_buf->buffer,
6345 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6348 per_cpu_ptr(trace_buf->data, cpu)->entries =
6349 per_cpu_ptr(size_buf->data, cpu)->entries;
6352 ret = ring_buffer_resize(trace_buf->buffer,
6353 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6355 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6356 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6361 #endif /* CONFIG_TRACER_MAX_TRACE */
6363 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6364 unsigned long size, int cpu)
6369 * If kernel or user changes the size of the ring buffer
6370 * we use the size that was given, and we can forget about
6371 * expanding it later.
6373 ring_buffer_expanded = true;
6375 /* May be called before buffers are initialized */
6376 if (!tr->array_buffer.buffer)
6379 /* Do not allow tracing while resizing ring buffer */
6380 tracing_stop_tr(tr);
6382 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6386 #ifdef CONFIG_TRACER_MAX_TRACE
6387 if (!tr->allocated_snapshot)
6390 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6392 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6393 &tr->array_buffer, cpu);
6396 * AARGH! We are left with different
6397 * size max buffer!!!!
6398 * The max buffer is our "snapshot" buffer.
6399 * When a tracer needs a snapshot (one of the
6400 * latency tracers), it swaps the max buffer
6401 * with the saved snap shot. We succeeded to
6402 * update the size of the main buffer, but failed to
6403 * update the size of the max buffer. But when we tried
6404 * to reset the main buffer to the original size, we
6405 * failed there too. This is very unlikely to
6406 * happen, but if it does, warn and kill all
6410 tracing_disabled = 1;
6415 update_buffer_entries(&tr->max_buffer, cpu);
6418 #endif /* CONFIG_TRACER_MAX_TRACE */
6420 update_buffer_entries(&tr->array_buffer, cpu);
6422 tracing_start_tr(tr);
6426 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6427 unsigned long size, int cpu_id)
6431 mutex_lock(&trace_types_lock);
6433 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6434 /* make sure, this cpu is enabled in the mask */
6435 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6441 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6446 mutex_unlock(&trace_types_lock);
6453 * tracing_update_buffers - used by tracing facility to expand ring buffers
6455 * To save on memory when the tracing is never used on a system with it
6456 * configured in. The ring buffers are set to a minimum size. But once
6457 * a user starts to use the tracing facility, then they need to grow
6458 * to their default size.
6460 * This function is to be called when a tracer is about to be used.
6462 int tracing_update_buffers(void)
6466 mutex_lock(&trace_types_lock);
6467 if (!ring_buffer_expanded)
6468 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6469 RING_BUFFER_ALL_CPUS);
6470 mutex_unlock(&trace_types_lock);
6475 struct trace_option_dentry;
6478 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6481 * Used to clear out the tracer before deletion of an instance.
6482 * Must have trace_types_lock held.
6484 static void tracing_set_nop(struct trace_array *tr)
6486 if (tr->current_trace == &nop_trace)
6489 tr->current_trace->enabled--;
6491 if (tr->current_trace->reset)
6492 tr->current_trace->reset(tr);
6494 tr->current_trace = &nop_trace;
6497 static bool tracer_options_updated;
6499 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6501 /* Only enable if the directory has been created already. */
6505 /* Only create trace option files after update_tracer_options finish */
6506 if (!tracer_options_updated)
6509 create_trace_option_files(tr, t);
6512 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6515 #ifdef CONFIG_TRACER_MAX_TRACE
6520 mutex_lock(&trace_types_lock);
6522 if (!ring_buffer_expanded) {
6523 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6524 RING_BUFFER_ALL_CPUS);
6530 for (t = trace_types; t; t = t->next) {
6531 if (strcmp(t->name, buf) == 0)
6538 if (t == tr->current_trace)
6541 #ifdef CONFIG_TRACER_SNAPSHOT
6542 if (t->use_max_tr) {
6543 local_irq_disable();
6544 arch_spin_lock(&tr->max_lock);
6545 if (tr->cond_snapshot)
6547 arch_spin_unlock(&tr->max_lock);
6553 /* Some tracers won't work on kernel command line */
6554 if (system_state < SYSTEM_RUNNING && t->noboot) {
6555 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6560 /* Some tracers are only allowed for the top level buffer */
6561 if (!trace_ok_for_array(t, tr)) {
6566 /* If trace pipe files are being read, we can't change the tracer */
6567 if (tr->trace_ref) {
6572 trace_branch_disable();
6574 tr->current_trace->enabled--;
6576 if (tr->current_trace->reset)
6577 tr->current_trace->reset(tr);
6579 #ifdef CONFIG_TRACER_MAX_TRACE
6580 had_max_tr = tr->current_trace->use_max_tr;
6582 /* Current trace needs to be nop_trace before synchronize_rcu */
6583 tr->current_trace = &nop_trace;
6585 if (had_max_tr && !t->use_max_tr) {
6587 * We need to make sure that the update_max_tr sees that
6588 * current_trace changed to nop_trace to keep it from
6589 * swapping the buffers after we resize it.
6590 * The update_max_tr is called from interrupts disabled
6591 * so a synchronized_sched() is sufficient.
6597 if (t->use_max_tr && !tr->allocated_snapshot) {
6598 ret = tracing_alloc_snapshot_instance(tr);
6603 tr->current_trace = &nop_trace;
6607 ret = tracer_init(t, tr);
6612 tr->current_trace = t;
6613 tr->current_trace->enabled++;
6614 trace_branch_enable(tr);
6616 mutex_unlock(&trace_types_lock);
6622 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6623 size_t cnt, loff_t *ppos)
6625 struct trace_array *tr = filp->private_data;
6626 char buf[MAX_TRACER_SIZE+1];
6633 if (cnt > MAX_TRACER_SIZE)
6634 cnt = MAX_TRACER_SIZE;
6636 if (copy_from_user(buf, ubuf, cnt))
6643 err = tracing_set_tracer(tr, name);
6653 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6654 size_t cnt, loff_t *ppos)
6659 r = snprintf(buf, sizeof(buf), "%ld\n",
6660 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6661 if (r > sizeof(buf))
6663 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6667 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6668 size_t cnt, loff_t *ppos)
6673 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6683 tracing_thresh_read(struct file *filp, char __user *ubuf,
6684 size_t cnt, loff_t *ppos)
6686 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6690 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6691 size_t cnt, loff_t *ppos)
6693 struct trace_array *tr = filp->private_data;
6696 mutex_lock(&trace_types_lock);
6697 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6701 if (tr->current_trace->update_thresh) {
6702 ret = tr->current_trace->update_thresh(tr);
6709 mutex_unlock(&trace_types_lock);
6714 #ifdef CONFIG_TRACER_MAX_TRACE
6717 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6718 size_t cnt, loff_t *ppos)
6720 struct trace_array *tr = filp->private_data;
6722 return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
6726 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6727 size_t cnt, loff_t *ppos)
6729 struct trace_array *tr = filp->private_data;
6731 return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
6736 static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
6738 if (cpu == RING_BUFFER_ALL_CPUS) {
6739 if (cpumask_empty(tr->pipe_cpumask)) {
6740 cpumask_setall(tr->pipe_cpumask);
6743 } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
6744 cpumask_set_cpu(cpu, tr->pipe_cpumask);
6750 static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
6752 if (cpu == RING_BUFFER_ALL_CPUS) {
6753 WARN_ON(!cpumask_full(tr->pipe_cpumask));
6754 cpumask_clear(tr->pipe_cpumask);
6756 WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
6757 cpumask_clear_cpu(cpu, tr->pipe_cpumask);
6761 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6763 struct trace_array *tr = inode->i_private;
6764 struct trace_iterator *iter;
6768 ret = tracing_check_open_get_tr(tr);
6772 mutex_lock(&trace_types_lock);
6773 cpu = tracing_get_cpu(inode);
6774 ret = open_pipe_on_cpu(tr, cpu);
6776 goto fail_pipe_on_cpu;
6778 /* create a buffer to store the information to pass to userspace */
6779 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6782 goto fail_alloc_iter;
6785 trace_seq_init(&iter->seq);
6786 iter->trace = tr->current_trace;
6788 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6793 /* trace pipe does not show start of buffer */
6794 cpumask_setall(iter->started);
6796 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6797 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6799 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6800 if (trace_clocks[tr->clock_id].in_ns)
6801 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6804 iter->array_buffer = &tr->array_buffer;
6805 iter->cpu_file = cpu;
6806 mutex_init(&iter->mutex);
6807 filp->private_data = iter;
6809 if (iter->trace->pipe_open)
6810 iter->trace->pipe_open(iter);
6812 nonseekable_open(inode, filp);
6816 mutex_unlock(&trace_types_lock);
6822 close_pipe_on_cpu(tr, cpu);
6824 __trace_array_put(tr);
6825 mutex_unlock(&trace_types_lock);
6829 static int tracing_release_pipe(struct inode *inode, struct file *file)
6831 struct trace_iterator *iter = file->private_data;
6832 struct trace_array *tr = inode->i_private;
6834 mutex_lock(&trace_types_lock);
6838 if (iter->trace->pipe_close)
6839 iter->trace->pipe_close(iter);
6840 close_pipe_on_cpu(tr, iter->cpu_file);
6841 mutex_unlock(&trace_types_lock);
6843 free_trace_iter_content(iter);
6846 trace_array_put(tr);
6852 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6854 struct trace_array *tr = iter->tr;
6856 /* Iterators are static, they should be filled or empty */
6857 if (trace_buffer_iter(iter, iter->cpu_file))
6858 return EPOLLIN | EPOLLRDNORM;
6860 if (tr->trace_flags & TRACE_ITER_BLOCK)
6862 * Always select as readable when in blocking mode
6864 return EPOLLIN | EPOLLRDNORM;
6866 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6867 filp, poll_table, iter->tr->buffer_percent);
6871 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6873 struct trace_iterator *iter = filp->private_data;
6875 return trace_poll(iter, filp, poll_table);
6878 /* Must be called with iter->mutex held. */
6879 static int tracing_wait_pipe(struct file *filp)
6881 struct trace_iterator *iter = filp->private_data;
6884 while (trace_empty(iter)) {
6886 if ((filp->f_flags & O_NONBLOCK)) {
6891 * We block until we read something and tracing is disabled.
6892 * We still block if tracing is disabled, but we have never
6893 * read anything. This allows a user to cat this file, and
6894 * then enable tracing. But after we have read something,
6895 * we give an EOF when tracing is again disabled.
6897 * iter->pos will be 0 if we haven't read anything.
6899 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6902 mutex_unlock(&iter->mutex);
6904 ret = wait_on_pipe(iter, 0);
6906 mutex_lock(&iter->mutex);
6919 tracing_read_pipe(struct file *filp, char __user *ubuf,
6920 size_t cnt, loff_t *ppos)
6922 struct trace_iterator *iter = filp->private_data;
6926 * Avoid more than one consumer on a single file descriptor
6927 * This is just a matter of traces coherency, the ring buffer itself
6930 mutex_lock(&iter->mutex);
6932 /* return any leftover data */
6933 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6937 trace_seq_init(&iter->seq);
6939 if (iter->trace->read) {
6940 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6946 sret = tracing_wait_pipe(filp);
6950 /* stop when tracing is finished */
6951 if (trace_empty(iter)) {
6956 if (cnt >= PAGE_SIZE)
6957 cnt = PAGE_SIZE - 1;
6959 /* reset all but tr, trace, and overruns */
6960 trace_iterator_reset(iter);
6961 cpumask_clear(iter->started);
6962 trace_seq_init(&iter->seq);
6964 trace_event_read_lock();
6965 trace_access_lock(iter->cpu_file);
6966 while (trace_find_next_entry_inc(iter) != NULL) {
6967 enum print_line_t ret;
6968 int save_len = iter->seq.seq.len;
6970 ret = print_trace_line(iter);
6971 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6973 * If one print_trace_line() fills entire trace_seq in one shot,
6974 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6975 * In this case, we need to consume it, otherwise, loop will peek
6976 * this event next time, resulting in an infinite loop.
6978 if (save_len == 0) {
6980 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6981 trace_consume(iter);
6985 /* In other cases, don't print partial lines */
6986 iter->seq.seq.len = save_len;
6989 if (ret != TRACE_TYPE_NO_CONSUME)
6990 trace_consume(iter);
6992 if (trace_seq_used(&iter->seq) >= cnt)
6996 * Setting the full flag means we reached the trace_seq buffer
6997 * size and we should leave by partial output condition above.
6998 * One of the trace_seq_* functions is not used properly.
7000 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
7003 trace_access_unlock(iter->cpu_file);
7004 trace_event_read_unlock();
7006 /* Now copy what we have to the user */
7007 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
7008 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
7009 trace_seq_init(&iter->seq);
7012 * If there was nothing to send to user, in spite of consuming trace
7013 * entries, go back to wait for more entries.
7019 mutex_unlock(&iter->mutex);
7024 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
7027 __free_page(spd->pages[idx]);
7031 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
7037 /* Seq buffer is page-sized, exactly what we need. */
7039 save_len = iter->seq.seq.len;
7040 ret = print_trace_line(iter);
7042 if (trace_seq_has_overflowed(&iter->seq)) {
7043 iter->seq.seq.len = save_len;
7048 * This should not be hit, because it should only
7049 * be set if the iter->seq overflowed. But check it
7050 * anyway to be safe.
7052 if (ret == TRACE_TYPE_PARTIAL_LINE) {
7053 iter->seq.seq.len = save_len;
7057 count = trace_seq_used(&iter->seq) - save_len;
7060 iter->seq.seq.len = save_len;
7064 if (ret != TRACE_TYPE_NO_CONSUME)
7065 trace_consume(iter);
7067 if (!trace_find_next_entry_inc(iter)) {
7077 static ssize_t tracing_splice_read_pipe(struct file *filp,
7079 struct pipe_inode_info *pipe,
7083 struct page *pages_def[PIPE_DEF_BUFFERS];
7084 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7085 struct trace_iterator *iter = filp->private_data;
7086 struct splice_pipe_desc spd = {
7088 .partial = partial_def,
7089 .nr_pages = 0, /* This gets updated below. */
7090 .nr_pages_max = PIPE_DEF_BUFFERS,
7091 .ops = &default_pipe_buf_ops,
7092 .spd_release = tracing_spd_release_pipe,
7098 if (splice_grow_spd(pipe, &spd))
7101 mutex_lock(&iter->mutex);
7103 if (iter->trace->splice_read) {
7104 ret = iter->trace->splice_read(iter, filp,
7105 ppos, pipe, len, flags);
7110 ret = tracing_wait_pipe(filp);
7114 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
7119 trace_event_read_lock();
7120 trace_access_lock(iter->cpu_file);
7122 /* Fill as many pages as possible. */
7123 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
7124 spd.pages[i] = alloc_page(GFP_KERNEL);
7128 rem = tracing_fill_pipe_page(rem, iter);
7130 /* Copy the data into the page, so we can start over. */
7131 ret = trace_seq_to_buffer(&iter->seq,
7132 page_address(spd.pages[i]),
7133 trace_seq_used(&iter->seq));
7135 __free_page(spd.pages[i]);
7138 spd.partial[i].offset = 0;
7139 spd.partial[i].len = trace_seq_used(&iter->seq);
7141 trace_seq_init(&iter->seq);
7144 trace_access_unlock(iter->cpu_file);
7145 trace_event_read_unlock();
7146 mutex_unlock(&iter->mutex);
7151 ret = splice_to_pipe(pipe, &spd);
7155 splice_shrink_spd(&spd);
7159 mutex_unlock(&iter->mutex);
7164 tracing_entries_read(struct file *filp, char __user *ubuf,
7165 size_t cnt, loff_t *ppos)
7167 struct inode *inode = file_inode(filp);
7168 struct trace_array *tr = inode->i_private;
7169 int cpu = tracing_get_cpu(inode);
7174 mutex_lock(&trace_types_lock);
7176 if (cpu == RING_BUFFER_ALL_CPUS) {
7177 int cpu, buf_size_same;
7182 /* check if all cpu sizes are same */
7183 for_each_tracing_cpu(cpu) {
7184 /* fill in the size from first enabled cpu */
7186 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
7187 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
7193 if (buf_size_same) {
7194 if (!ring_buffer_expanded)
7195 r = sprintf(buf, "%lu (expanded: %lu)\n",
7197 trace_buf_size >> 10);
7199 r = sprintf(buf, "%lu\n", size >> 10);
7201 r = sprintf(buf, "X\n");
7203 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
7205 mutex_unlock(&trace_types_lock);
7207 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7212 tracing_entries_write(struct file *filp, const char __user *ubuf,
7213 size_t cnt, loff_t *ppos)
7215 struct inode *inode = file_inode(filp);
7216 struct trace_array *tr = inode->i_private;
7220 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7224 /* must have at least 1 entry */
7228 /* value is in KB */
7230 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
7240 tracing_total_entries_read(struct file *filp, char __user *ubuf,
7241 size_t cnt, loff_t *ppos)
7243 struct trace_array *tr = filp->private_data;
7246 unsigned long size = 0, expanded_size = 0;
7248 mutex_lock(&trace_types_lock);
7249 for_each_tracing_cpu(cpu) {
7250 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7251 if (!ring_buffer_expanded)
7252 expanded_size += trace_buf_size >> 10;
7254 if (ring_buffer_expanded)
7255 r = sprintf(buf, "%lu\n", size);
7257 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
7258 mutex_unlock(&trace_types_lock);
7260 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7264 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7265 size_t cnt, loff_t *ppos)
7268 * There is no need to read what the user has written, this function
7269 * is just to make sure that there is no error when "echo" is used
7278 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7280 struct trace_array *tr = inode->i_private;
7282 /* disable tracing ? */
7283 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7284 tracer_tracing_off(tr);
7285 /* resize the ring buffer to 0 */
7286 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7288 trace_array_put(tr);
7294 tracing_mark_write(struct file *filp, const char __user *ubuf,
7295 size_t cnt, loff_t *fpos)
7297 struct trace_array *tr = filp->private_data;
7298 struct ring_buffer_event *event;
7299 enum event_trigger_type tt = ETT_NONE;
7300 struct trace_buffer *buffer;
7301 struct print_entry *entry;
7306 /* Used in tracing_mark_raw_write() as well */
7307 #define FAULTED_STR "<faulted>"
7308 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7310 if (tracing_disabled)
7313 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7316 if (cnt > TRACE_BUF_SIZE)
7317 cnt = TRACE_BUF_SIZE;
7319 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7321 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7323 /* If less than "<faulted>", then make sure we can still add that */
7324 if (cnt < FAULTED_SIZE)
7325 size += FAULTED_SIZE - cnt;
7327 buffer = tr->array_buffer.buffer;
7328 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7330 if (unlikely(!event))
7331 /* Ring buffer disabled, return as if not open for write */
7334 entry = ring_buffer_event_data(event);
7335 entry->ip = _THIS_IP_;
7337 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7339 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7345 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7346 /* do not add \n before testing triggers, but add \0 */
7347 entry->buf[cnt] = '\0';
7348 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7351 if (entry->buf[cnt - 1] != '\n') {
7352 entry->buf[cnt] = '\n';
7353 entry->buf[cnt + 1] = '\0';
7355 entry->buf[cnt] = '\0';
7357 if (static_branch_unlikely(&trace_marker_exports_enabled))
7358 ftrace_exports(event, TRACE_EXPORT_MARKER);
7359 __buffer_unlock_commit(buffer, event);
7362 event_triggers_post_call(tr->trace_marker_file, tt);
7367 /* Limit it for now to 3K (including tag) */
7368 #define RAW_DATA_MAX_SIZE (1024*3)
7371 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7372 size_t cnt, loff_t *fpos)
7374 struct trace_array *tr = filp->private_data;
7375 struct ring_buffer_event *event;
7376 struct trace_buffer *buffer;
7377 struct raw_data_entry *entry;
7382 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7384 if (tracing_disabled)
7387 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7390 /* The marker must at least have a tag id */
7391 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7394 if (cnt > TRACE_BUF_SIZE)
7395 cnt = TRACE_BUF_SIZE;
7397 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7399 size = sizeof(*entry) + cnt;
7400 if (cnt < FAULT_SIZE_ID)
7401 size += FAULT_SIZE_ID - cnt;
7403 buffer = tr->array_buffer.buffer;
7404 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7407 /* Ring buffer disabled, return as if not open for write */
7410 entry = ring_buffer_event_data(event);
7412 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7415 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7420 __buffer_unlock_commit(buffer, event);
7425 static int tracing_clock_show(struct seq_file *m, void *v)
7427 struct trace_array *tr = m->private;
7430 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7432 "%s%s%s%s", i ? " " : "",
7433 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7434 i == tr->clock_id ? "]" : "");
7440 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7444 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7445 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7448 if (i == ARRAY_SIZE(trace_clocks))
7451 mutex_lock(&trace_types_lock);
7455 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7458 * New clock may not be consistent with the previous clock.
7459 * Reset the buffer so that it doesn't have incomparable timestamps.
7461 tracing_reset_online_cpus(&tr->array_buffer);
7463 #ifdef CONFIG_TRACER_MAX_TRACE
7464 if (tr->max_buffer.buffer)
7465 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7466 tracing_reset_online_cpus(&tr->max_buffer);
7469 mutex_unlock(&trace_types_lock);
7474 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7475 size_t cnt, loff_t *fpos)
7477 struct seq_file *m = filp->private_data;
7478 struct trace_array *tr = m->private;
7480 const char *clockstr;
7483 if (cnt >= sizeof(buf))
7486 if (copy_from_user(buf, ubuf, cnt))
7491 clockstr = strstrip(buf);
7493 ret = tracing_set_clock(tr, clockstr);
7502 static int tracing_clock_open(struct inode *inode, struct file *file)
7504 struct trace_array *tr = inode->i_private;
7507 ret = tracing_check_open_get_tr(tr);
7511 ret = single_open(file, tracing_clock_show, inode->i_private);
7513 trace_array_put(tr);
7518 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7520 struct trace_array *tr = m->private;
7522 mutex_lock(&trace_types_lock);
7524 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7525 seq_puts(m, "delta [absolute]\n");
7527 seq_puts(m, "[delta] absolute\n");
7529 mutex_unlock(&trace_types_lock);
7534 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7536 struct trace_array *tr = inode->i_private;
7539 ret = tracing_check_open_get_tr(tr);
7543 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7545 trace_array_put(tr);
7550 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7552 if (rbe == this_cpu_read(trace_buffered_event))
7553 return ring_buffer_time_stamp(buffer);
7555 return ring_buffer_event_time_stamp(buffer, rbe);
7559 * Set or disable using the per CPU trace_buffer_event when possible.
7561 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7565 mutex_lock(&trace_types_lock);
7567 if (set && tr->no_filter_buffering_ref++)
7571 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7576 --tr->no_filter_buffering_ref;
7579 mutex_unlock(&trace_types_lock);
7584 struct ftrace_buffer_info {
7585 struct trace_iterator iter;
7587 unsigned int spare_cpu;
7591 #ifdef CONFIG_TRACER_SNAPSHOT
7592 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7594 struct trace_array *tr = inode->i_private;
7595 struct trace_iterator *iter;
7599 ret = tracing_check_open_get_tr(tr);
7603 if (file->f_mode & FMODE_READ) {
7604 iter = __tracing_open(inode, file, true);
7606 ret = PTR_ERR(iter);
7608 /* Writes still need the seq_file to hold the private data */
7610 m = kzalloc(sizeof(*m), GFP_KERNEL);
7613 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7621 iter->array_buffer = &tr->max_buffer;
7622 iter->cpu_file = tracing_get_cpu(inode);
7624 file->private_data = m;
7628 trace_array_put(tr);
7633 static void tracing_swap_cpu_buffer(void *tr)
7635 update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
7639 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7642 struct seq_file *m = filp->private_data;
7643 struct trace_iterator *iter = m->private;
7644 struct trace_array *tr = iter->tr;
7648 ret = tracing_update_buffers();
7652 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7656 mutex_lock(&trace_types_lock);
7658 if (tr->current_trace->use_max_tr) {
7663 local_irq_disable();
7664 arch_spin_lock(&tr->max_lock);
7665 if (tr->cond_snapshot)
7667 arch_spin_unlock(&tr->max_lock);
7674 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7678 if (tr->allocated_snapshot)
7682 /* Only allow per-cpu swap if the ring buffer supports it */
7683 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7684 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7689 if (tr->allocated_snapshot)
7690 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7691 &tr->array_buffer, iter->cpu_file);
7693 ret = tracing_alloc_snapshot_instance(tr);
7696 /* Now, we're going to swap */
7697 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
7698 local_irq_disable();
7699 update_max_tr(tr, current, smp_processor_id(), NULL);
7702 smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
7707 if (tr->allocated_snapshot) {
7708 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7709 tracing_reset_online_cpus(&tr->max_buffer);
7711 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7721 mutex_unlock(&trace_types_lock);
7725 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7727 struct seq_file *m = file->private_data;
7730 ret = tracing_release(inode, file);
7732 if (file->f_mode & FMODE_READ)
7735 /* If write only, the seq_file is just a stub */
7743 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7744 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7745 size_t count, loff_t *ppos);
7746 static int tracing_buffers_release(struct inode *inode, struct file *file);
7747 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7748 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7750 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7752 struct ftrace_buffer_info *info;
7755 /* The following checks for tracefs lockdown */
7756 ret = tracing_buffers_open(inode, filp);
7760 info = filp->private_data;
7762 if (info->iter.trace->use_max_tr) {
7763 tracing_buffers_release(inode, filp);
7767 info->iter.snapshot = true;
7768 info->iter.array_buffer = &info->iter.tr->max_buffer;
7773 #endif /* CONFIG_TRACER_SNAPSHOT */
7776 static const struct file_operations tracing_thresh_fops = {
7777 .open = tracing_open_generic,
7778 .read = tracing_thresh_read,
7779 .write = tracing_thresh_write,
7780 .llseek = generic_file_llseek,
7783 #ifdef CONFIG_TRACER_MAX_TRACE
7784 static const struct file_operations tracing_max_lat_fops = {
7785 .open = tracing_open_generic_tr,
7786 .read = tracing_max_lat_read,
7787 .write = tracing_max_lat_write,
7788 .llseek = generic_file_llseek,
7789 .release = tracing_release_generic_tr,
7793 static const struct file_operations set_tracer_fops = {
7794 .open = tracing_open_generic_tr,
7795 .read = tracing_set_trace_read,
7796 .write = tracing_set_trace_write,
7797 .llseek = generic_file_llseek,
7798 .release = tracing_release_generic_tr,
7801 static const struct file_operations tracing_pipe_fops = {
7802 .open = tracing_open_pipe,
7803 .poll = tracing_poll_pipe,
7804 .read = tracing_read_pipe,
7805 .splice_read = tracing_splice_read_pipe,
7806 .release = tracing_release_pipe,
7807 .llseek = no_llseek,
7810 static const struct file_operations tracing_entries_fops = {
7811 .open = tracing_open_generic_tr,
7812 .read = tracing_entries_read,
7813 .write = tracing_entries_write,
7814 .llseek = generic_file_llseek,
7815 .release = tracing_release_generic_tr,
7818 static const struct file_operations tracing_total_entries_fops = {
7819 .open = tracing_open_generic_tr,
7820 .read = tracing_total_entries_read,
7821 .llseek = generic_file_llseek,
7822 .release = tracing_release_generic_tr,
7825 static const struct file_operations tracing_free_buffer_fops = {
7826 .open = tracing_open_generic_tr,
7827 .write = tracing_free_buffer_write,
7828 .release = tracing_free_buffer_release,
7831 static const struct file_operations tracing_mark_fops = {
7832 .open = tracing_mark_open,
7833 .write = tracing_mark_write,
7834 .release = tracing_release_generic_tr,
7837 static const struct file_operations tracing_mark_raw_fops = {
7838 .open = tracing_mark_open,
7839 .write = tracing_mark_raw_write,
7840 .release = tracing_release_generic_tr,
7843 static const struct file_operations trace_clock_fops = {
7844 .open = tracing_clock_open,
7846 .llseek = seq_lseek,
7847 .release = tracing_single_release_tr,
7848 .write = tracing_clock_write,
7851 static const struct file_operations trace_time_stamp_mode_fops = {
7852 .open = tracing_time_stamp_mode_open,
7854 .llseek = seq_lseek,
7855 .release = tracing_single_release_tr,
7858 #ifdef CONFIG_TRACER_SNAPSHOT
7859 static const struct file_operations snapshot_fops = {
7860 .open = tracing_snapshot_open,
7862 .write = tracing_snapshot_write,
7863 .llseek = tracing_lseek,
7864 .release = tracing_snapshot_release,
7867 static const struct file_operations snapshot_raw_fops = {
7868 .open = snapshot_raw_open,
7869 .read = tracing_buffers_read,
7870 .release = tracing_buffers_release,
7871 .splice_read = tracing_buffers_splice_read,
7872 .llseek = no_llseek,
7875 #endif /* CONFIG_TRACER_SNAPSHOT */
7878 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7879 * @filp: The active open file structure
7880 * @ubuf: The userspace provided buffer to read value into
7881 * @cnt: The maximum number of bytes to read
7882 * @ppos: The current "file" position
7884 * This function implements the write interface for a struct trace_min_max_param.
7885 * The filp->private_data must point to a trace_min_max_param structure that
7886 * defines where to write the value, the min and the max acceptable values,
7887 * and a lock to protect the write.
7890 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7892 struct trace_min_max_param *param = filp->private_data;
7899 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7904 mutex_lock(param->lock);
7906 if (param->min && val < *param->min)
7909 if (param->max && val > *param->max)
7916 mutex_unlock(param->lock);
7925 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7926 * @filp: The active open file structure
7927 * @ubuf: The userspace provided buffer to read value into
7928 * @cnt: The maximum number of bytes to read
7929 * @ppos: The current "file" position
7931 * This function implements the read interface for a struct trace_min_max_param.
7932 * The filp->private_data must point to a trace_min_max_param struct with valid
7936 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7938 struct trace_min_max_param *param = filp->private_data;
7939 char buf[U64_STR_SIZE];
7948 if (cnt > sizeof(buf))
7951 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7953 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7956 const struct file_operations trace_min_max_fops = {
7957 .open = tracing_open_generic,
7958 .read = trace_min_max_read,
7959 .write = trace_min_max_write,
7962 #define TRACING_LOG_ERRS_MAX 8
7963 #define TRACING_LOG_LOC_MAX 128
7965 #define CMD_PREFIX " Command: "
7968 const char **errs; /* ptr to loc-specific array of err strings */
7969 u8 type; /* index into errs -> specific err string */
7970 u16 pos; /* caret position */
7974 struct tracing_log_err {
7975 struct list_head list;
7976 struct err_info info;
7977 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7978 char *cmd; /* what caused err */
7981 static DEFINE_MUTEX(tracing_err_log_lock);
7983 static struct tracing_log_err *alloc_tracing_log_err(int len)
7985 struct tracing_log_err *err;
7987 err = kzalloc(sizeof(*err), GFP_KERNEL);
7989 return ERR_PTR(-ENOMEM);
7991 err->cmd = kzalloc(len, GFP_KERNEL);
7994 return ERR_PTR(-ENOMEM);
8000 static void free_tracing_log_err(struct tracing_log_err *err)
8006 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
8009 struct tracing_log_err *err;
8012 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
8013 err = alloc_tracing_log_err(len);
8014 if (PTR_ERR(err) != -ENOMEM)
8015 tr->n_err_log_entries++;
8019 cmd = kzalloc(len, GFP_KERNEL);
8021 return ERR_PTR(-ENOMEM);
8022 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
8025 list_del(&err->list);
8031 * err_pos - find the position of a string within a command for error careting
8032 * @cmd: The tracing command that caused the error
8033 * @str: The string to position the caret at within @cmd
8035 * Finds the position of the first occurrence of @str within @cmd. The
8036 * return value can be passed to tracing_log_err() for caret placement
8039 * Returns the index within @cmd of the first occurrence of @str or 0
8040 * if @str was not found.
8042 unsigned int err_pos(char *cmd, const char *str)
8046 if (WARN_ON(!strlen(cmd)))
8049 found = strstr(cmd, str);
8057 * tracing_log_err - write an error to the tracing error log
8058 * @tr: The associated trace array for the error (NULL for top level array)
8059 * @loc: A string describing where the error occurred
8060 * @cmd: The tracing command that caused the error
8061 * @errs: The array of loc-specific static error strings
8062 * @type: The index into errs[], which produces the specific static err string
8063 * @pos: The position the caret should be placed in the cmd
8065 * Writes an error into tracing/error_log of the form:
8067 * <loc>: error: <text>
8071 * tracing/error_log is a small log file containing the last
8072 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
8073 * unless there has been a tracing error, and the error log can be
8074 * cleared and have its memory freed by writing the empty string in
8075 * truncation mode to it i.e. echo > tracing/error_log.
8077 * NOTE: the @errs array along with the @type param are used to
8078 * produce a static error string - this string is not copied and saved
8079 * when the error is logged - only a pointer to it is saved. See
8080 * existing callers for examples of how static strings are typically
8081 * defined for use with tracing_log_err().
8083 void tracing_log_err(struct trace_array *tr,
8084 const char *loc, const char *cmd,
8085 const char **errs, u8 type, u16 pos)
8087 struct tracing_log_err *err;
8093 len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
8095 mutex_lock(&tracing_err_log_lock);
8096 err = get_tracing_log_err(tr, len);
8097 if (PTR_ERR(err) == -ENOMEM) {
8098 mutex_unlock(&tracing_err_log_lock);
8102 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
8103 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
8105 err->info.errs = errs;
8106 err->info.type = type;
8107 err->info.pos = pos;
8108 err->info.ts = local_clock();
8110 list_add_tail(&err->list, &tr->err_log);
8111 mutex_unlock(&tracing_err_log_lock);
8114 static void clear_tracing_err_log(struct trace_array *tr)
8116 struct tracing_log_err *err, *next;
8118 mutex_lock(&tracing_err_log_lock);
8119 list_for_each_entry_safe(err, next, &tr->err_log, list) {
8120 list_del(&err->list);
8121 free_tracing_log_err(err);
8124 tr->n_err_log_entries = 0;
8125 mutex_unlock(&tracing_err_log_lock);
8128 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
8130 struct trace_array *tr = m->private;
8132 mutex_lock(&tracing_err_log_lock);
8134 return seq_list_start(&tr->err_log, *pos);
8137 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
8139 struct trace_array *tr = m->private;
8141 return seq_list_next(v, &tr->err_log, pos);
8144 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
8146 mutex_unlock(&tracing_err_log_lock);
8149 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
8153 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
8155 for (i = 0; i < pos; i++)
8160 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
8162 struct tracing_log_err *err = v;
8165 const char *err_text = err->info.errs[err->info.type];
8166 u64 sec = err->info.ts;
8169 nsec = do_div(sec, NSEC_PER_SEC);
8170 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
8171 err->loc, err_text);
8172 seq_printf(m, "%s", err->cmd);
8173 tracing_err_log_show_pos(m, err->info.pos);
8179 static const struct seq_operations tracing_err_log_seq_ops = {
8180 .start = tracing_err_log_seq_start,
8181 .next = tracing_err_log_seq_next,
8182 .stop = tracing_err_log_seq_stop,
8183 .show = tracing_err_log_seq_show
8186 static int tracing_err_log_open(struct inode *inode, struct file *file)
8188 struct trace_array *tr = inode->i_private;
8191 ret = tracing_check_open_get_tr(tr);
8195 /* If this file was opened for write, then erase contents */
8196 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
8197 clear_tracing_err_log(tr);
8199 if (file->f_mode & FMODE_READ) {
8200 ret = seq_open(file, &tracing_err_log_seq_ops);
8202 struct seq_file *m = file->private_data;
8205 trace_array_put(tr);
8211 static ssize_t tracing_err_log_write(struct file *file,
8212 const char __user *buffer,
8213 size_t count, loff_t *ppos)
8218 static int tracing_err_log_release(struct inode *inode, struct file *file)
8220 struct trace_array *tr = inode->i_private;
8222 trace_array_put(tr);
8224 if (file->f_mode & FMODE_READ)
8225 seq_release(inode, file);
8230 static const struct file_operations tracing_err_log_fops = {
8231 .open = tracing_err_log_open,
8232 .write = tracing_err_log_write,
8234 .llseek = tracing_lseek,
8235 .release = tracing_err_log_release,
8238 static int tracing_buffers_open(struct inode *inode, struct file *filp)
8240 struct trace_array *tr = inode->i_private;
8241 struct ftrace_buffer_info *info;
8244 ret = tracing_check_open_get_tr(tr);
8248 info = kvzalloc(sizeof(*info), GFP_KERNEL);
8250 trace_array_put(tr);
8254 mutex_lock(&trace_types_lock);
8257 info->iter.cpu_file = tracing_get_cpu(inode);
8258 info->iter.trace = tr->current_trace;
8259 info->iter.array_buffer = &tr->array_buffer;
8261 /* Force reading ring buffer for first read */
8262 info->read = (unsigned int)-1;
8264 filp->private_data = info;
8268 mutex_unlock(&trace_types_lock);
8270 ret = nonseekable_open(inode, filp);
8272 trace_array_put(tr);
8278 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8280 struct ftrace_buffer_info *info = filp->private_data;
8281 struct trace_iterator *iter = &info->iter;
8283 return trace_poll(iter, filp, poll_table);
8287 tracing_buffers_read(struct file *filp, char __user *ubuf,
8288 size_t count, loff_t *ppos)
8290 struct ftrace_buffer_info *info = filp->private_data;
8291 struct trace_iterator *iter = &info->iter;
8298 #ifdef CONFIG_TRACER_MAX_TRACE
8299 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8304 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8306 if (IS_ERR(info->spare)) {
8307 ret = PTR_ERR(info->spare);
8310 info->spare_cpu = iter->cpu_file;
8316 /* Do we have previous read data to read? */
8317 if (info->read < PAGE_SIZE)
8321 trace_access_lock(iter->cpu_file);
8322 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8326 trace_access_unlock(iter->cpu_file);
8329 if (trace_empty(iter)) {
8330 if ((filp->f_flags & O_NONBLOCK))
8333 ret = wait_on_pipe(iter, 0);
8344 size = PAGE_SIZE - info->read;
8348 ret = copy_to_user(ubuf, info->spare + info->read, size);
8360 static int tracing_buffers_release(struct inode *inode, struct file *file)
8362 struct ftrace_buffer_info *info = file->private_data;
8363 struct trace_iterator *iter = &info->iter;
8365 mutex_lock(&trace_types_lock);
8367 iter->tr->trace_ref--;
8369 __trace_array_put(iter->tr);
8372 /* Make sure the waiters see the new wait_index */
8375 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8378 ring_buffer_free_read_page(iter->array_buffer->buffer,
8379 info->spare_cpu, info->spare);
8382 mutex_unlock(&trace_types_lock);
8388 struct trace_buffer *buffer;
8391 refcount_t refcount;
8394 static void buffer_ref_release(struct buffer_ref *ref)
8396 if (!refcount_dec_and_test(&ref->refcount))
8398 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8402 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8403 struct pipe_buffer *buf)
8405 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8407 buffer_ref_release(ref);
8411 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8412 struct pipe_buffer *buf)
8414 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8416 if (refcount_read(&ref->refcount) > INT_MAX/2)
8419 refcount_inc(&ref->refcount);
8423 /* Pipe buffer operations for a buffer. */
8424 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8425 .release = buffer_pipe_buf_release,
8426 .get = buffer_pipe_buf_get,
8430 * Callback from splice_to_pipe(), if we need to release some pages
8431 * at the end of the spd in case we error'ed out in filling the pipe.
8433 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8435 struct buffer_ref *ref =
8436 (struct buffer_ref *)spd->partial[i].private;
8438 buffer_ref_release(ref);
8439 spd->partial[i].private = 0;
8443 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8444 struct pipe_inode_info *pipe, size_t len,
8447 struct ftrace_buffer_info *info = file->private_data;
8448 struct trace_iterator *iter = &info->iter;
8449 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8450 struct page *pages_def[PIPE_DEF_BUFFERS];
8451 struct splice_pipe_desc spd = {
8453 .partial = partial_def,
8454 .nr_pages_max = PIPE_DEF_BUFFERS,
8455 .ops = &buffer_pipe_buf_ops,
8456 .spd_release = buffer_spd_release,
8458 struct buffer_ref *ref;
8462 #ifdef CONFIG_TRACER_MAX_TRACE
8463 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8467 if (*ppos & (PAGE_SIZE - 1))
8470 if (len & (PAGE_SIZE - 1)) {
8471 if (len < PAGE_SIZE)
8476 if (splice_grow_spd(pipe, &spd))
8480 trace_access_lock(iter->cpu_file);
8481 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8483 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8487 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8493 refcount_set(&ref->refcount, 1);
8494 ref->buffer = iter->array_buffer->buffer;
8495 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8496 if (IS_ERR(ref->page)) {
8497 ret = PTR_ERR(ref->page);
8502 ref->cpu = iter->cpu_file;
8504 r = ring_buffer_read_page(ref->buffer, &ref->page,
8505 len, iter->cpu_file, 1);
8507 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8513 page = virt_to_page(ref->page);
8515 spd.pages[i] = page;
8516 spd.partial[i].len = PAGE_SIZE;
8517 spd.partial[i].offset = 0;
8518 spd.partial[i].private = (unsigned long)ref;
8522 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8525 trace_access_unlock(iter->cpu_file);
8528 /* did we read anything? */
8529 if (!spd.nr_pages) {
8536 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8539 wait_index = READ_ONCE(iter->wait_index);
8541 ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
8545 /* No need to wait after waking up when tracing is off */
8546 if (!tracer_tracing_is_on(iter->tr))
8549 /* Make sure we see the new wait_index */
8551 if (wait_index != iter->wait_index)
8557 ret = splice_to_pipe(pipe, &spd);
8559 splice_shrink_spd(&spd);
8564 /* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */
8565 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8567 struct ftrace_buffer_info *info = file->private_data;
8568 struct trace_iterator *iter = &info->iter;
8571 return -ENOIOCTLCMD;
8573 mutex_lock(&trace_types_lock);
8576 /* Make sure the waiters see the new wait_index */
8579 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8581 mutex_unlock(&trace_types_lock);
8585 static const struct file_operations tracing_buffers_fops = {
8586 .open = tracing_buffers_open,
8587 .read = tracing_buffers_read,
8588 .poll = tracing_buffers_poll,
8589 .release = tracing_buffers_release,
8590 .splice_read = tracing_buffers_splice_read,
8591 .unlocked_ioctl = tracing_buffers_ioctl,
8592 .llseek = no_llseek,
8596 tracing_stats_read(struct file *filp, char __user *ubuf,
8597 size_t count, loff_t *ppos)
8599 struct inode *inode = file_inode(filp);
8600 struct trace_array *tr = inode->i_private;
8601 struct array_buffer *trace_buf = &tr->array_buffer;
8602 int cpu = tracing_get_cpu(inode);
8603 struct trace_seq *s;
8605 unsigned long long t;
8606 unsigned long usec_rem;
8608 s = kmalloc(sizeof(*s), GFP_KERNEL);
8614 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8615 trace_seq_printf(s, "entries: %ld\n", cnt);
8617 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8618 trace_seq_printf(s, "overrun: %ld\n", cnt);
8620 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8621 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8623 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8624 trace_seq_printf(s, "bytes: %ld\n", cnt);
8626 if (trace_clocks[tr->clock_id].in_ns) {
8627 /* local or global for trace_clock */
8628 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8629 usec_rem = do_div(t, USEC_PER_SEC);
8630 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8633 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8634 usec_rem = do_div(t, USEC_PER_SEC);
8635 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8637 /* counter or tsc mode for trace_clock */
8638 trace_seq_printf(s, "oldest event ts: %llu\n",
8639 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8641 trace_seq_printf(s, "now ts: %llu\n",
8642 ring_buffer_time_stamp(trace_buf->buffer));
8645 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8646 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8648 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8649 trace_seq_printf(s, "read events: %ld\n", cnt);
8651 count = simple_read_from_buffer(ubuf, count, ppos,
8652 s->buffer, trace_seq_used(s));
8659 static const struct file_operations tracing_stats_fops = {
8660 .open = tracing_open_generic_tr,
8661 .read = tracing_stats_read,
8662 .llseek = generic_file_llseek,
8663 .release = tracing_release_generic_tr,
8666 #ifdef CONFIG_DYNAMIC_FTRACE
8669 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8670 size_t cnt, loff_t *ppos)
8676 /* 256 should be plenty to hold the amount needed */
8677 buf = kmalloc(256, GFP_KERNEL);
8681 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8682 ftrace_update_tot_cnt,
8683 ftrace_number_of_pages,
8684 ftrace_number_of_groups);
8686 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8691 static const struct file_operations tracing_dyn_info_fops = {
8692 .open = tracing_open_generic,
8693 .read = tracing_read_dyn_info,
8694 .llseek = generic_file_llseek,
8696 #endif /* CONFIG_DYNAMIC_FTRACE */
8698 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8700 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8701 struct trace_array *tr, struct ftrace_probe_ops *ops,
8704 tracing_snapshot_instance(tr);
8708 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8709 struct trace_array *tr, struct ftrace_probe_ops *ops,
8712 struct ftrace_func_mapper *mapper = data;
8716 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8726 tracing_snapshot_instance(tr);
8730 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8731 struct ftrace_probe_ops *ops, void *data)
8733 struct ftrace_func_mapper *mapper = data;
8736 seq_printf(m, "%ps:", (void *)ip);
8738 seq_puts(m, "snapshot");
8741 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8744 seq_printf(m, ":count=%ld\n", *count);
8746 seq_puts(m, ":unlimited\n");
8752 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8753 unsigned long ip, void *init_data, void **data)
8755 struct ftrace_func_mapper *mapper = *data;
8758 mapper = allocate_ftrace_func_mapper();
8764 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8768 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8769 unsigned long ip, void *data)
8771 struct ftrace_func_mapper *mapper = data;
8776 free_ftrace_func_mapper(mapper, NULL);
8780 ftrace_func_mapper_remove_ip(mapper, ip);
8783 static struct ftrace_probe_ops snapshot_probe_ops = {
8784 .func = ftrace_snapshot,
8785 .print = ftrace_snapshot_print,
8788 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8789 .func = ftrace_count_snapshot,
8790 .print = ftrace_snapshot_print,
8791 .init = ftrace_snapshot_init,
8792 .free = ftrace_snapshot_free,
8796 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8797 char *glob, char *cmd, char *param, int enable)
8799 struct ftrace_probe_ops *ops;
8800 void *count = (void *)-1;
8807 /* hash funcs only work with set_ftrace_filter */
8811 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8814 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8819 number = strsep(¶m, ":");
8821 if (!strlen(number))
8825 * We use the callback data field (which is a pointer)
8828 ret = kstrtoul(number, 0, (unsigned long *)&count);
8833 ret = tracing_alloc_snapshot_instance(tr);
8837 ret = register_ftrace_function_probe(glob, tr, ops, count);
8840 return ret < 0 ? ret : 0;
8843 static struct ftrace_func_command ftrace_snapshot_cmd = {
8845 .func = ftrace_trace_snapshot_callback,
8848 static __init int register_snapshot_cmd(void)
8850 return register_ftrace_command(&ftrace_snapshot_cmd);
8853 static inline __init int register_snapshot_cmd(void) { return 0; }
8854 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8856 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8858 if (WARN_ON(!tr->dir))
8859 return ERR_PTR(-ENODEV);
8861 /* Top directory uses NULL as the parent */
8862 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8865 /* All sub buffers have a descriptor */
8869 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8871 struct dentry *d_tracer;
8874 return tr->percpu_dir;
8876 d_tracer = tracing_get_dentry(tr);
8877 if (IS_ERR(d_tracer))
8880 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8882 MEM_FAIL(!tr->percpu_dir,
8883 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8885 return tr->percpu_dir;
8888 static struct dentry *
8889 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8890 void *data, long cpu, const struct file_operations *fops)
8892 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8894 if (ret) /* See tracing_get_cpu() */
8895 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8900 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8902 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8903 struct dentry *d_cpu;
8904 char cpu_dir[30]; /* 30 characters should be more than enough */
8909 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8910 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8912 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8916 /* per cpu trace_pipe */
8917 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8918 tr, cpu, &tracing_pipe_fops);
8921 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8922 tr, cpu, &tracing_fops);
8924 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8925 tr, cpu, &tracing_buffers_fops);
8927 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8928 tr, cpu, &tracing_stats_fops);
8930 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8931 tr, cpu, &tracing_entries_fops);
8933 #ifdef CONFIG_TRACER_SNAPSHOT
8934 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8935 tr, cpu, &snapshot_fops);
8937 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8938 tr, cpu, &snapshot_raw_fops);
8942 #ifdef CONFIG_FTRACE_SELFTEST
8943 /* Let selftest have access to static functions in this file */
8944 #include "trace_selftest.c"
8948 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8951 struct trace_option_dentry *topt = filp->private_data;
8954 if (topt->flags->val & topt->opt->bit)
8959 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8963 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8966 struct trace_option_dentry *topt = filp->private_data;
8970 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8974 if (val != 0 && val != 1)
8977 if (!!(topt->flags->val & topt->opt->bit) != val) {
8978 mutex_lock(&trace_types_lock);
8979 ret = __set_tracer_option(topt->tr, topt->flags,
8981 mutex_unlock(&trace_types_lock);
8991 static int tracing_open_options(struct inode *inode, struct file *filp)
8993 struct trace_option_dentry *topt = inode->i_private;
8996 ret = tracing_check_open_get_tr(topt->tr);
9000 filp->private_data = inode->i_private;
9004 static int tracing_release_options(struct inode *inode, struct file *file)
9006 struct trace_option_dentry *topt = file->private_data;
9008 trace_array_put(topt->tr);
9012 static const struct file_operations trace_options_fops = {
9013 .open = tracing_open_options,
9014 .read = trace_options_read,
9015 .write = trace_options_write,
9016 .llseek = generic_file_llseek,
9017 .release = tracing_release_options,
9021 * In order to pass in both the trace_array descriptor as well as the index
9022 * to the flag that the trace option file represents, the trace_array
9023 * has a character array of trace_flags_index[], which holds the index
9024 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
9025 * The address of this character array is passed to the flag option file
9026 * read/write callbacks.
9028 * In order to extract both the index and the trace_array descriptor,
9029 * get_tr_index() uses the following algorithm.
9033 * As the pointer itself contains the address of the index (remember
9036 * Then to get the trace_array descriptor, by subtracting that index
9037 * from the ptr, we get to the start of the index itself.
9039 * ptr - idx == &index[0]
9041 * Then a simple container_of() from that pointer gets us to the
9042 * trace_array descriptor.
9044 static void get_tr_index(void *data, struct trace_array **ptr,
9045 unsigned int *pindex)
9047 *pindex = *(unsigned char *)data;
9049 *ptr = container_of(data - *pindex, struct trace_array,
9054 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
9057 void *tr_index = filp->private_data;
9058 struct trace_array *tr;
9062 get_tr_index(tr_index, &tr, &index);
9064 if (tr->trace_flags & (1 << index))
9069 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
9073 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
9076 void *tr_index = filp->private_data;
9077 struct trace_array *tr;
9082 get_tr_index(tr_index, &tr, &index);
9084 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9088 if (val != 0 && val != 1)
9091 mutex_lock(&event_mutex);
9092 mutex_lock(&trace_types_lock);
9093 ret = set_tracer_flag(tr, 1 << index, val);
9094 mutex_unlock(&trace_types_lock);
9095 mutex_unlock(&event_mutex);
9105 static const struct file_operations trace_options_core_fops = {
9106 .open = tracing_open_generic,
9107 .read = trace_options_core_read,
9108 .write = trace_options_core_write,
9109 .llseek = generic_file_llseek,
9112 struct dentry *trace_create_file(const char *name,
9114 struct dentry *parent,
9116 const struct file_operations *fops)
9120 ret = tracefs_create_file(name, mode, parent, data, fops);
9122 pr_warn("Could not create tracefs '%s' entry\n", name);
9128 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
9130 struct dentry *d_tracer;
9135 d_tracer = tracing_get_dentry(tr);
9136 if (IS_ERR(d_tracer))
9139 tr->options = tracefs_create_dir("options", d_tracer);
9141 pr_warn("Could not create tracefs directory 'options'\n");
9149 create_trace_option_file(struct trace_array *tr,
9150 struct trace_option_dentry *topt,
9151 struct tracer_flags *flags,
9152 struct tracer_opt *opt)
9154 struct dentry *t_options;
9156 t_options = trace_options_init_dentry(tr);
9160 topt->flags = flags;
9164 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
9165 t_options, topt, &trace_options_fops);
9170 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
9172 struct trace_option_dentry *topts;
9173 struct trace_options *tr_topts;
9174 struct tracer_flags *flags;
9175 struct tracer_opt *opts;
9182 flags = tracer->flags;
9184 if (!flags || !flags->opts)
9188 * If this is an instance, only create flags for tracers
9189 * the instance may have.
9191 if (!trace_ok_for_array(tracer, tr))
9194 for (i = 0; i < tr->nr_topts; i++) {
9195 /* Make sure there's no duplicate flags. */
9196 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
9202 for (cnt = 0; opts[cnt].name; cnt++)
9205 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
9209 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
9216 tr->topts = tr_topts;
9217 tr->topts[tr->nr_topts].tracer = tracer;
9218 tr->topts[tr->nr_topts].topts = topts;
9221 for (cnt = 0; opts[cnt].name; cnt++) {
9222 create_trace_option_file(tr, &topts[cnt], flags,
9224 MEM_FAIL(topts[cnt].entry == NULL,
9225 "Failed to create trace option: %s",
9230 static struct dentry *
9231 create_trace_option_core_file(struct trace_array *tr,
9232 const char *option, long index)
9234 struct dentry *t_options;
9236 t_options = trace_options_init_dentry(tr);
9240 return trace_create_file(option, TRACE_MODE_WRITE, t_options,
9241 (void *)&tr->trace_flags_index[index],
9242 &trace_options_core_fops);
9245 static void create_trace_options_dir(struct trace_array *tr)
9247 struct dentry *t_options;
9248 bool top_level = tr == &global_trace;
9251 t_options = trace_options_init_dentry(tr);
9255 for (i = 0; trace_options[i]; i++) {
9257 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
9258 create_trace_option_core_file(tr, trace_options[i], i);
9263 rb_simple_read(struct file *filp, char __user *ubuf,
9264 size_t cnt, loff_t *ppos)
9266 struct trace_array *tr = filp->private_data;
9270 r = tracer_tracing_is_on(tr);
9271 r = sprintf(buf, "%d\n", r);
9273 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9277 rb_simple_write(struct file *filp, const char __user *ubuf,
9278 size_t cnt, loff_t *ppos)
9280 struct trace_array *tr = filp->private_data;
9281 struct trace_buffer *buffer = tr->array_buffer.buffer;
9285 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9290 mutex_lock(&trace_types_lock);
9291 if (!!val == tracer_tracing_is_on(tr)) {
9292 val = 0; /* do nothing */
9294 tracer_tracing_on(tr);
9295 if (tr->current_trace->start)
9296 tr->current_trace->start(tr);
9298 tracer_tracing_off(tr);
9299 if (tr->current_trace->stop)
9300 tr->current_trace->stop(tr);
9301 /* Wake up any waiters */
9302 ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
9304 mutex_unlock(&trace_types_lock);
9312 static const struct file_operations rb_simple_fops = {
9313 .open = tracing_open_generic_tr,
9314 .read = rb_simple_read,
9315 .write = rb_simple_write,
9316 .release = tracing_release_generic_tr,
9317 .llseek = default_llseek,
9321 buffer_percent_read(struct file *filp, char __user *ubuf,
9322 size_t cnt, loff_t *ppos)
9324 struct trace_array *tr = filp->private_data;
9328 r = tr->buffer_percent;
9329 r = sprintf(buf, "%d\n", r);
9331 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9335 buffer_percent_write(struct file *filp, const char __user *ubuf,
9336 size_t cnt, loff_t *ppos)
9338 struct trace_array *tr = filp->private_data;
9342 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9349 tr->buffer_percent = val;
9356 static const struct file_operations buffer_percent_fops = {
9357 .open = tracing_open_generic_tr,
9358 .read = buffer_percent_read,
9359 .write = buffer_percent_write,
9360 .release = tracing_release_generic_tr,
9361 .llseek = default_llseek,
9364 static struct dentry *trace_instance_dir;
9367 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9370 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9372 enum ring_buffer_flags rb_flags;
9374 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9378 buf->buffer = ring_buffer_alloc(size, rb_flags);
9382 buf->data = alloc_percpu(struct trace_array_cpu);
9384 ring_buffer_free(buf->buffer);
9389 /* Allocate the first page for all buffers */
9390 set_buffer_entries(&tr->array_buffer,
9391 ring_buffer_size(tr->array_buffer.buffer, 0));
9396 static void free_trace_buffer(struct array_buffer *buf)
9399 ring_buffer_free(buf->buffer);
9401 free_percpu(buf->data);
9406 static int allocate_trace_buffers(struct trace_array *tr, int size)
9410 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9414 #ifdef CONFIG_TRACER_MAX_TRACE
9415 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9416 allocate_snapshot ? size : 1);
9417 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9418 free_trace_buffer(&tr->array_buffer);
9421 tr->allocated_snapshot = allocate_snapshot;
9423 allocate_snapshot = false;
9429 static void free_trace_buffers(struct trace_array *tr)
9434 free_trace_buffer(&tr->array_buffer);
9436 #ifdef CONFIG_TRACER_MAX_TRACE
9437 free_trace_buffer(&tr->max_buffer);
9441 static void init_trace_flags_index(struct trace_array *tr)
9445 /* Used by the trace options files */
9446 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9447 tr->trace_flags_index[i] = i;
9450 static void __update_tracer_options(struct trace_array *tr)
9454 for (t = trace_types; t; t = t->next)
9455 add_tracer_options(tr, t);
9458 static void update_tracer_options(struct trace_array *tr)
9460 mutex_lock(&trace_types_lock);
9461 tracer_options_updated = true;
9462 __update_tracer_options(tr);
9463 mutex_unlock(&trace_types_lock);
9466 /* Must have trace_types_lock held */
9467 struct trace_array *trace_array_find(const char *instance)
9469 struct trace_array *tr, *found = NULL;
9471 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9472 if (tr->name && strcmp(tr->name, instance) == 0) {
9481 struct trace_array *trace_array_find_get(const char *instance)
9483 struct trace_array *tr;
9485 mutex_lock(&trace_types_lock);
9486 tr = trace_array_find(instance);
9489 mutex_unlock(&trace_types_lock);
9494 static int trace_array_create_dir(struct trace_array *tr)
9498 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9502 ret = event_trace_add_tracer(tr->dir, tr);
9504 tracefs_remove(tr->dir);
9508 init_tracer_tracefs(tr, tr->dir);
9509 __update_tracer_options(tr);
9514 static struct trace_array *trace_array_create(const char *name)
9516 struct trace_array *tr;
9520 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9522 return ERR_PTR(ret);
9524 tr->name = kstrdup(name, GFP_KERNEL);
9528 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9531 if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
9534 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9536 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9538 raw_spin_lock_init(&tr->start_lock);
9540 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9542 tr->current_trace = &nop_trace;
9544 INIT_LIST_HEAD(&tr->systems);
9545 INIT_LIST_HEAD(&tr->events);
9546 INIT_LIST_HEAD(&tr->hist_vars);
9547 INIT_LIST_HEAD(&tr->err_log);
9549 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9552 if (ftrace_allocate_ftrace_ops(tr) < 0)
9555 ftrace_init_trace_array(tr);
9557 init_trace_flags_index(tr);
9559 if (trace_instance_dir) {
9560 ret = trace_array_create_dir(tr);
9564 __trace_early_add_events(tr);
9566 list_add(&tr->list, &ftrace_trace_arrays);
9573 ftrace_free_ftrace_ops(tr);
9574 free_trace_buffers(tr);
9575 free_cpumask_var(tr->pipe_cpumask);
9576 free_cpumask_var(tr->tracing_cpumask);
9580 return ERR_PTR(ret);
9583 static int instance_mkdir(const char *name)
9585 struct trace_array *tr;
9588 mutex_lock(&event_mutex);
9589 mutex_lock(&trace_types_lock);
9592 if (trace_array_find(name))
9595 tr = trace_array_create(name);
9597 ret = PTR_ERR_OR_ZERO(tr);
9600 mutex_unlock(&trace_types_lock);
9601 mutex_unlock(&event_mutex);
9606 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9607 * @name: The name of the trace array to be looked up/created.
9609 * Returns pointer to trace array with given name.
9610 * NULL, if it cannot be created.
9612 * NOTE: This function increments the reference counter associated with the
9613 * trace array returned. This makes sure it cannot be freed while in use.
9614 * Use trace_array_put() once the trace array is no longer needed.
9615 * If the trace_array is to be freed, trace_array_destroy() needs to
9616 * be called after the trace_array_put(), or simply let user space delete
9617 * it from the tracefs instances directory. But until the
9618 * trace_array_put() is called, user space can not delete it.
9621 struct trace_array *trace_array_get_by_name(const char *name)
9623 struct trace_array *tr;
9625 mutex_lock(&event_mutex);
9626 mutex_lock(&trace_types_lock);
9628 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9629 if (tr->name && strcmp(tr->name, name) == 0)
9633 tr = trace_array_create(name);
9641 mutex_unlock(&trace_types_lock);
9642 mutex_unlock(&event_mutex);
9645 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9647 static int __remove_instance(struct trace_array *tr)
9651 /* Reference counter for a newly created trace array = 1. */
9652 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9655 list_del(&tr->list);
9657 /* Disable all the flags that were enabled coming in */
9658 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9659 if ((1 << i) & ZEROED_TRACE_FLAGS)
9660 set_tracer_flag(tr, 1 << i, 0);
9663 tracing_set_nop(tr);
9664 clear_ftrace_function_probes(tr);
9665 event_trace_del_tracer(tr);
9666 ftrace_clear_pids(tr);
9667 ftrace_destroy_function_files(tr);
9668 tracefs_remove(tr->dir);
9669 free_percpu(tr->last_func_repeats);
9670 free_trace_buffers(tr);
9671 clear_tracing_err_log(tr);
9673 for (i = 0; i < tr->nr_topts; i++) {
9674 kfree(tr->topts[i].topts);
9678 free_cpumask_var(tr->pipe_cpumask);
9679 free_cpumask_var(tr->tracing_cpumask);
9686 int trace_array_destroy(struct trace_array *this_tr)
9688 struct trace_array *tr;
9694 mutex_lock(&event_mutex);
9695 mutex_lock(&trace_types_lock);
9699 /* Making sure trace array exists before destroying it. */
9700 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9701 if (tr == this_tr) {
9702 ret = __remove_instance(tr);
9707 mutex_unlock(&trace_types_lock);
9708 mutex_unlock(&event_mutex);
9712 EXPORT_SYMBOL_GPL(trace_array_destroy);
9714 static int instance_rmdir(const char *name)
9716 struct trace_array *tr;
9719 mutex_lock(&event_mutex);
9720 mutex_lock(&trace_types_lock);
9723 tr = trace_array_find(name);
9725 ret = __remove_instance(tr);
9727 mutex_unlock(&trace_types_lock);
9728 mutex_unlock(&event_mutex);
9733 static __init void create_trace_instances(struct dentry *d_tracer)
9735 struct trace_array *tr;
9737 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9740 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9743 mutex_lock(&event_mutex);
9744 mutex_lock(&trace_types_lock);
9746 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9749 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9750 "Failed to create instance directory\n"))
9754 mutex_unlock(&trace_types_lock);
9755 mutex_unlock(&event_mutex);
9759 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9761 struct trace_event_file *file;
9764 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9765 tr, &show_traces_fops);
9767 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9768 tr, &set_tracer_fops);
9770 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9771 tr, &tracing_cpumask_fops);
9773 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9774 tr, &tracing_iter_fops);
9776 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9779 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9780 tr, &tracing_pipe_fops);
9782 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9783 tr, &tracing_entries_fops);
9785 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9786 tr, &tracing_total_entries_fops);
9788 trace_create_file("free_buffer", 0200, d_tracer,
9789 tr, &tracing_free_buffer_fops);
9791 trace_create_file("trace_marker", 0220, d_tracer,
9792 tr, &tracing_mark_fops);
9794 file = __find_event_file(tr, "ftrace", "print");
9795 if (file && file->ef)
9796 eventfs_add_file("trigger", TRACE_MODE_WRITE, file->ef,
9797 file, &event_trigger_fops);
9798 tr->trace_marker_file = file;
9800 trace_create_file("trace_marker_raw", 0220, d_tracer,
9801 tr, &tracing_mark_raw_fops);
9803 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9806 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9807 tr, &rb_simple_fops);
9809 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9810 &trace_time_stamp_mode_fops);
9812 tr->buffer_percent = 50;
9814 trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
9815 tr, &buffer_percent_fops);
9817 create_trace_options_dir(tr);
9819 #ifdef CONFIG_TRACER_MAX_TRACE
9820 trace_create_maxlat_file(tr, d_tracer);
9823 if (ftrace_create_function_files(tr, d_tracer))
9824 MEM_FAIL(1, "Could not allocate function filter files");
9826 #ifdef CONFIG_TRACER_SNAPSHOT
9827 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9828 tr, &snapshot_fops);
9831 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9832 tr, &tracing_err_log_fops);
9834 for_each_tracing_cpu(cpu)
9835 tracing_init_tracefs_percpu(tr, cpu);
9837 ftrace_init_tracefs(tr, d_tracer);
9840 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9842 struct vfsmount *mnt;
9843 struct file_system_type *type;
9846 * To maintain backward compatibility for tools that mount
9847 * debugfs to get to the tracing facility, tracefs is automatically
9848 * mounted to the debugfs/tracing directory.
9850 type = get_fs_type("tracefs");
9853 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9854 put_filesystem(type);
9863 * tracing_init_dentry - initialize top level trace array
9865 * This is called when creating files or directories in the tracing
9866 * directory. It is called via fs_initcall() by any of the boot up code
9867 * and expects to return the dentry of the top level tracing directory.
9869 int tracing_init_dentry(void)
9871 struct trace_array *tr = &global_trace;
9873 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9874 pr_warn("Tracing disabled due to lockdown\n");
9878 /* The top level trace array uses NULL as parent */
9882 if (WARN_ON(!tracefs_initialized()))
9886 * As there may still be users that expect the tracing
9887 * files to exist in debugfs/tracing, we must automount
9888 * the tracefs file system there, so older tools still
9889 * work with the newer kernel.
9891 tr->dir = debugfs_create_automount("tracing", NULL,
9892 trace_automount, NULL);
9897 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9898 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9900 static struct workqueue_struct *eval_map_wq __initdata;
9901 static struct work_struct eval_map_work __initdata;
9902 static struct work_struct tracerfs_init_work __initdata;
9904 static void __init eval_map_work_func(struct work_struct *work)
9908 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9909 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9912 static int __init trace_eval_init(void)
9914 INIT_WORK(&eval_map_work, eval_map_work_func);
9916 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9918 pr_err("Unable to allocate eval_map_wq\n");
9920 eval_map_work_func(&eval_map_work);
9924 queue_work(eval_map_wq, &eval_map_work);
9928 subsys_initcall(trace_eval_init);
9930 static int __init trace_eval_sync(void)
9932 /* Make sure the eval map updates are finished */
9934 destroy_workqueue(eval_map_wq);
9938 late_initcall_sync(trace_eval_sync);
9941 #ifdef CONFIG_MODULES
9942 static void trace_module_add_evals(struct module *mod)
9944 if (!mod->num_trace_evals)
9948 * Modules with bad taint do not have events created, do
9949 * not bother with enums either.
9951 if (trace_module_has_bad_taint(mod))
9954 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9957 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9958 static void trace_module_remove_evals(struct module *mod)
9960 union trace_eval_map_item *map;
9961 union trace_eval_map_item **last = &trace_eval_maps;
9963 if (!mod->num_trace_evals)
9966 mutex_lock(&trace_eval_mutex);
9968 map = trace_eval_maps;
9971 if (map->head.mod == mod)
9973 map = trace_eval_jmp_to_tail(map);
9974 last = &map->tail.next;
9975 map = map->tail.next;
9980 *last = trace_eval_jmp_to_tail(map)->tail.next;
9983 mutex_unlock(&trace_eval_mutex);
9986 static inline void trace_module_remove_evals(struct module *mod) { }
9987 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9989 static int trace_module_notify(struct notifier_block *self,
9990 unsigned long val, void *data)
9992 struct module *mod = data;
9995 case MODULE_STATE_COMING:
9996 trace_module_add_evals(mod);
9998 case MODULE_STATE_GOING:
9999 trace_module_remove_evals(mod);
10006 static struct notifier_block trace_module_nb = {
10007 .notifier_call = trace_module_notify,
10010 #endif /* CONFIG_MODULES */
10012 static __init void tracer_init_tracefs_work_func(struct work_struct *work)
10015 event_trace_init();
10017 init_tracer_tracefs(&global_trace, NULL);
10018 ftrace_init_tracefs_toplevel(&global_trace, NULL);
10020 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
10021 &global_trace, &tracing_thresh_fops);
10023 trace_create_file("README", TRACE_MODE_READ, NULL,
10024 NULL, &tracing_readme_fops);
10026 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
10027 NULL, &tracing_saved_cmdlines_fops);
10029 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
10030 NULL, &tracing_saved_cmdlines_size_fops);
10032 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
10033 NULL, &tracing_saved_tgids_fops);
10035 trace_create_eval_file(NULL);
10037 #ifdef CONFIG_MODULES
10038 register_module_notifier(&trace_module_nb);
10041 #ifdef CONFIG_DYNAMIC_FTRACE
10042 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
10043 NULL, &tracing_dyn_info_fops);
10046 create_trace_instances(NULL);
10048 update_tracer_options(&global_trace);
10051 static __init int tracer_init_tracefs(void)
10055 trace_access_lock_init();
10057 ret = tracing_init_dentry();
10062 INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
10063 queue_work(eval_map_wq, &tracerfs_init_work);
10065 tracer_init_tracefs_work_func(NULL);
10068 rv_init_interface();
10073 fs_initcall(tracer_init_tracefs);
10075 static int trace_die_panic_handler(struct notifier_block *self,
10076 unsigned long ev, void *unused);
10078 static struct notifier_block trace_panic_notifier = {
10079 .notifier_call = trace_die_panic_handler,
10080 .priority = INT_MAX - 1,
10083 static struct notifier_block trace_die_notifier = {
10084 .notifier_call = trace_die_panic_handler,
10085 .priority = INT_MAX - 1,
10089 * The idea is to execute the following die/panic callback early, in order
10090 * to avoid showing irrelevant information in the trace (like other panic
10091 * notifier functions); we are the 2nd to run, after hung_task/rcu_stall
10092 * warnings get disabled (to prevent potential log flooding).
10094 static int trace_die_panic_handler(struct notifier_block *self,
10095 unsigned long ev, void *unused)
10097 if (!ftrace_dump_on_oops)
10098 return NOTIFY_DONE;
10100 /* The die notifier requires DIE_OOPS to trigger */
10101 if (self == &trace_die_notifier && ev != DIE_OOPS)
10102 return NOTIFY_DONE;
10104 ftrace_dump(ftrace_dump_on_oops);
10106 return NOTIFY_DONE;
10110 * printk is set to max of 1024, we really don't need it that big.
10111 * Nothing should be printing 1000 characters anyway.
10113 #define TRACE_MAX_PRINT 1000
10116 * Define here KERN_TRACE so that we have one place to modify
10117 * it if we decide to change what log level the ftrace dump
10120 #define KERN_TRACE KERN_EMERG
10123 trace_printk_seq(struct trace_seq *s)
10125 /* Probably should print a warning here. */
10126 if (s->seq.len >= TRACE_MAX_PRINT)
10127 s->seq.len = TRACE_MAX_PRINT;
10130 * More paranoid code. Although the buffer size is set to
10131 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
10132 * an extra layer of protection.
10134 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
10135 s->seq.len = s->seq.size - 1;
10137 /* should be zero ended, but we are paranoid. */
10138 s->buffer[s->seq.len] = 0;
10140 printk(KERN_TRACE "%s", s->buffer);
10145 void trace_init_global_iter(struct trace_iterator *iter)
10147 iter->tr = &global_trace;
10148 iter->trace = iter->tr->current_trace;
10149 iter->cpu_file = RING_BUFFER_ALL_CPUS;
10150 iter->array_buffer = &global_trace.array_buffer;
10152 if (iter->trace && iter->trace->open)
10153 iter->trace->open(iter);
10155 /* Annotate start of buffers if we had overruns */
10156 if (ring_buffer_overruns(iter->array_buffer->buffer))
10157 iter->iter_flags |= TRACE_FILE_ANNOTATE;
10159 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
10160 if (trace_clocks[iter->tr->clock_id].in_ns)
10161 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
10163 /* Can not use kmalloc for iter.temp and iter.fmt */
10164 iter->temp = static_temp_buf;
10165 iter->temp_size = STATIC_TEMP_BUF_SIZE;
10166 iter->fmt = static_fmt_buf;
10167 iter->fmt_size = STATIC_FMT_BUF_SIZE;
10170 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
10172 /* use static because iter can be a bit big for the stack */
10173 static struct trace_iterator iter;
10174 static atomic_t dump_running;
10175 struct trace_array *tr = &global_trace;
10176 unsigned int old_userobj;
10177 unsigned long flags;
10180 /* Only allow one dump user at a time. */
10181 if (atomic_inc_return(&dump_running) != 1) {
10182 atomic_dec(&dump_running);
10187 * Always turn off tracing when we dump.
10188 * We don't need to show trace output of what happens
10189 * between multiple crashes.
10191 * If the user does a sysrq-z, then they can re-enable
10192 * tracing with echo 1 > tracing_on.
10196 local_irq_save(flags);
10198 /* Simulate the iterator */
10199 trace_init_global_iter(&iter);
10201 for_each_tracing_cpu(cpu) {
10202 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10205 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10207 /* don't look at user memory in panic mode */
10208 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10210 switch (oops_dump_mode) {
10212 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10215 iter.cpu_file = raw_smp_processor_id();
10220 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
10221 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10224 printk(KERN_TRACE "Dumping ftrace buffer:\n");
10226 /* Did function tracer already get disabled? */
10227 if (ftrace_is_dead()) {
10228 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
10229 printk("# MAY BE MISSING FUNCTION EVENTS\n");
10233 * We need to stop all tracing on all CPUS to read
10234 * the next buffer. This is a bit expensive, but is
10235 * not done often. We fill all what we can read,
10236 * and then release the locks again.
10239 while (!trace_empty(&iter)) {
10242 printk(KERN_TRACE "---------------------------------\n");
10246 trace_iterator_reset(&iter);
10247 iter.iter_flags |= TRACE_FILE_LAT_FMT;
10249 if (trace_find_next_entry_inc(&iter) != NULL) {
10252 ret = print_trace_line(&iter);
10253 if (ret != TRACE_TYPE_NO_CONSUME)
10254 trace_consume(&iter);
10256 touch_nmi_watchdog();
10258 trace_printk_seq(&iter.seq);
10262 printk(KERN_TRACE " (ftrace buffer empty)\n");
10264 printk(KERN_TRACE "---------------------------------\n");
10267 tr->trace_flags |= old_userobj;
10269 for_each_tracing_cpu(cpu) {
10270 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10272 atomic_dec(&dump_running);
10273 local_irq_restore(flags);
10275 EXPORT_SYMBOL_GPL(ftrace_dump);
10277 #define WRITE_BUFSIZE 4096
10279 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
10280 size_t count, loff_t *ppos,
10281 int (*createfn)(const char *))
10283 char *kbuf, *buf, *tmp;
10288 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
10292 while (done < count) {
10293 size = count - done;
10295 if (size >= WRITE_BUFSIZE)
10296 size = WRITE_BUFSIZE - 1;
10298 if (copy_from_user(kbuf, buffer + done, size)) {
10305 tmp = strchr(buf, '\n');
10308 size = tmp - buf + 1;
10310 size = strlen(buf);
10311 if (done + size < count) {
10314 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10315 pr_warn("Line length is too long: Should be less than %d\n",
10316 WRITE_BUFSIZE - 2);
10323 /* Remove comments */
10324 tmp = strchr(buf, '#');
10329 ret = createfn(buf);
10334 } while (done < count);
10344 #ifdef CONFIG_TRACER_MAX_TRACE
10345 __init static bool tr_needs_alloc_snapshot(const char *name)
10348 int len = strlen(name);
10351 if (!boot_snapshot_index)
10354 if (strncmp(name, boot_snapshot_info, len) == 0 &&
10355 boot_snapshot_info[len] == '\t')
10358 test = kmalloc(strlen(name) + 3, GFP_KERNEL);
10362 sprintf(test, "\t%s\t", name);
10363 ret = strstr(boot_snapshot_info, test) == NULL;
10368 __init static void do_allocate_snapshot(const char *name)
10370 if (!tr_needs_alloc_snapshot(name))
10374 * When allocate_snapshot is set, the next call to
10375 * allocate_trace_buffers() (called by trace_array_get_by_name())
10376 * will allocate the snapshot buffer. That will alse clear
10379 allocate_snapshot = true;
10382 static inline void do_allocate_snapshot(const char *name) { }
10385 __init static void enable_instances(void)
10387 struct trace_array *tr;
10392 /* A tab is always appended */
10393 boot_instance_info[boot_instance_index - 1] = '\0';
10394 str = boot_instance_info;
10396 while ((curr_str = strsep(&str, "\t"))) {
10398 tok = strsep(&curr_str, ",");
10400 if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
10401 do_allocate_snapshot(tok);
10403 tr = trace_array_get_by_name(tok);
10405 pr_warn("Failed to create instance buffer %s\n", curr_str);
10408 /* Allow user space to delete it */
10409 trace_array_put(tr);
10411 while ((tok = strsep(&curr_str, ","))) {
10412 early_enable_events(tr, tok, true);
10417 __init static int tracer_alloc_buffers(void)
10423 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10424 pr_warn("Tracing disabled due to lockdown\n");
10429 * Make sure we don't accidentally add more trace options
10430 * than we have bits for.
10432 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10434 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10437 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10438 goto out_free_buffer_mask;
10440 /* Only allocate trace_printk buffers if a trace_printk exists */
10441 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10442 /* Must be called before global_trace.buffer is allocated */
10443 trace_printk_init_buffers();
10445 /* To save memory, keep the ring buffer size to its minimum */
10446 if (ring_buffer_expanded)
10447 ring_buf_size = trace_buf_size;
10451 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10452 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10454 raw_spin_lock_init(&global_trace.start_lock);
10457 * The prepare callbacks allocates some memory for the ring buffer. We
10458 * don't free the buffer if the CPU goes down. If we were to free
10459 * the buffer, then the user would lose any trace that was in the
10460 * buffer. The memory will be removed once the "instance" is removed.
10462 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10463 "trace/RB:prepare", trace_rb_cpu_prepare,
10466 goto out_free_cpumask;
10467 /* Used for event triggers */
10469 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10471 goto out_rm_hp_state;
10473 if (trace_create_savedcmd() < 0)
10474 goto out_free_temp_buffer;
10476 if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
10477 goto out_free_savedcmd;
10479 /* TODO: make the number of buffers hot pluggable with CPUS */
10480 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10481 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10482 goto out_free_pipe_cpumask;
10484 if (global_trace.buffer_disabled)
10487 if (trace_boot_clock) {
10488 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10490 pr_warn("Trace clock %s not defined, going back to default\n",
10495 * register_tracer() might reference current_trace, so it
10496 * needs to be set before we register anything. This is
10497 * just a bootstrap of current_trace anyway.
10499 global_trace.current_trace = &nop_trace;
10501 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10503 ftrace_init_global_array_ops(&global_trace);
10505 init_trace_flags_index(&global_trace);
10507 register_tracer(&nop_trace);
10509 /* Function tracing may start here (via kernel command line) */
10510 init_function_trace();
10512 /* All seems OK, enable tracing */
10513 tracing_disabled = 0;
10515 atomic_notifier_chain_register(&panic_notifier_list,
10516 &trace_panic_notifier);
10518 register_die_notifier(&trace_die_notifier);
10520 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10522 INIT_LIST_HEAD(&global_trace.systems);
10523 INIT_LIST_HEAD(&global_trace.events);
10524 INIT_LIST_HEAD(&global_trace.hist_vars);
10525 INIT_LIST_HEAD(&global_trace.err_log);
10526 list_add(&global_trace.list, &ftrace_trace_arrays);
10528 apply_trace_boot_options();
10530 register_snapshot_cmd();
10536 out_free_pipe_cpumask:
10537 free_cpumask_var(global_trace.pipe_cpumask);
10539 free_saved_cmdlines_buffer(savedcmd);
10540 out_free_temp_buffer:
10541 ring_buffer_free(temp_buffer);
10543 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10545 free_cpumask_var(global_trace.tracing_cpumask);
10546 out_free_buffer_mask:
10547 free_cpumask_var(tracing_buffer_mask);
10552 void __init ftrace_boot_snapshot(void)
10554 #ifdef CONFIG_TRACER_MAX_TRACE
10555 struct trace_array *tr;
10557 if (!snapshot_at_boot)
10560 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
10561 if (!tr->allocated_snapshot)
10564 tracing_snapshot_instance(tr);
10565 trace_array_puts(tr, "** Boot snapshot taken **\n");
10570 void __init early_trace_init(void)
10572 if (tracepoint_printk) {
10573 tracepoint_print_iter =
10574 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10575 if (MEM_FAIL(!tracepoint_print_iter,
10576 "Failed to allocate trace iterator\n"))
10577 tracepoint_printk = 0;
10579 static_key_enable(&tracepoint_printk_key.key);
10581 tracer_alloc_buffers();
10586 void __init trace_init(void)
10588 trace_event_init();
10590 if (boot_instance_index)
10591 enable_instances();
10594 __init static void clear_boot_tracer(void)
10597 * The default tracer at boot buffer is an init section.
10598 * This function is called in lateinit. If we did not
10599 * find the boot tracer, then clear it out, to prevent
10600 * later registration from accessing the buffer that is
10601 * about to be freed.
10603 if (!default_bootup_tracer)
10606 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10607 default_bootup_tracer);
10608 default_bootup_tracer = NULL;
10611 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10612 __init static void tracing_set_default_clock(void)
10614 /* sched_clock_stable() is determined in late_initcall */
10615 if (!trace_boot_clock && !sched_clock_stable()) {
10616 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10617 pr_warn("Can not set tracing clock due to lockdown\n");
10621 printk(KERN_WARNING
10622 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10623 "If you want to keep using the local clock, then add:\n"
10624 " \"trace_clock=local\"\n"
10625 "on the kernel command line\n");
10626 tracing_set_clock(&global_trace, "global");
10630 static inline void tracing_set_default_clock(void) { }
10633 __init static int late_trace_init(void)
10635 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10636 static_key_disable(&tracepoint_printk_key.key);
10637 tracepoint_printk = 0;
10640 tracing_set_default_clock();
10641 clear_boot_tracer();
10645 late_initcall_sync(late_trace_init);