1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/panic_notifier.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
52 #include <asm/setup.h> /* COMMAND_LINE_SIZE */
55 #include "trace_output.h"
58 * On boot up, the ring buffer is set to the minimum size, so that
59 * we do not waste memory on systems that are not using tracing.
61 bool ring_buffer_expanded;
63 #ifdef CONFIG_FTRACE_STARTUP_TEST
65 * We need to change this state when a selftest is running.
66 * A selftest will lurk into the ring-buffer to count the
67 * entries inserted during the selftest although some concurrent
68 * insertions into the ring-buffer such as trace_printk could occurred
69 * at the same time, giving false positive or negative results.
71 static bool __read_mostly tracing_selftest_running;
74 * If boot-time tracing including tracers/events via kernel cmdline
75 * is running, we do not want to run SELFTEST.
77 bool __read_mostly tracing_selftest_disabled;
79 void __init disable_tracing_selftest(const char *reason)
81 if (!tracing_selftest_disabled) {
82 tracing_selftest_disabled = true;
83 pr_info("Ftrace startup test is disabled due to %s\n", reason);
87 #define tracing_selftest_running 0
88 #define tracing_selftest_disabled 0
91 /* Pipe tracepoints to printk */
92 static struct trace_iterator *tracepoint_print_iter;
93 int tracepoint_printk;
94 static bool tracepoint_printk_stop_on_boot __initdata;
95 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
97 /* For tracers that don't implement custom flags */
98 static struct tracer_opt dummy_tracer_opt[] = {
103 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
109 * To prevent the comm cache from being overwritten when no
110 * tracing is active, only save the comm when a trace event
113 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
116 * Kill all tracing for good (never come back).
117 * It is initialized to 1 but will turn to zero if the initialization
118 * of the tracer is successful. But that is the only place that sets
121 static int tracing_disabled = 1;
123 cpumask_var_t __read_mostly tracing_buffer_mask;
126 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
128 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
129 * is set, then ftrace_dump is called. This will output the contents
130 * of the ftrace buffers to the console. This is very useful for
131 * capturing traces that lead to crashes and outputing it to a
134 * It is default off, but you can enable it with either specifying
135 * "ftrace_dump_on_oops" in the kernel command line, or setting
136 * /proc/sys/kernel/ftrace_dump_on_oops
137 * Set 1 if you want to dump buffers of all CPUs
138 * Set 2 if you want to dump the buffer of the CPU that triggered oops
141 enum ftrace_dump_mode ftrace_dump_on_oops;
143 /* When set, tracing will stop when a WARN*() is hit */
144 int __disable_trace_on_warning;
146 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
147 /* Map of enums to their values, for "eval_map" file */
148 struct trace_eval_map_head {
150 unsigned long length;
153 union trace_eval_map_item;
155 struct trace_eval_map_tail {
157 * "end" is first and points to NULL as it must be different
158 * than "mod" or "eval_string"
160 union trace_eval_map_item *next;
161 const char *end; /* points to NULL */
164 static DEFINE_MUTEX(trace_eval_mutex);
167 * The trace_eval_maps are saved in an array with two extra elements,
168 * one at the beginning, and one at the end. The beginning item contains
169 * the count of the saved maps (head.length), and the module they
170 * belong to if not built in (head.mod). The ending item contains a
171 * pointer to the next array of saved eval_map items.
173 union trace_eval_map_item {
174 struct trace_eval_map map;
175 struct trace_eval_map_head head;
176 struct trace_eval_map_tail tail;
179 static union trace_eval_map_item *trace_eval_maps;
180 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
182 int tracing_set_tracer(struct trace_array *tr, const char *buf);
183 static void ftrace_trace_userstack(struct trace_array *tr,
184 struct trace_buffer *buffer,
185 unsigned int trace_ctx);
187 #define MAX_TRACER_SIZE 100
188 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
189 static char *default_bootup_tracer;
191 static bool allocate_snapshot;
192 static bool snapshot_at_boot;
194 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata;
195 static int boot_instance_index;
197 static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
198 static int boot_snapshot_index;
200 static int __init set_cmdline_ftrace(char *str)
202 strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
203 default_bootup_tracer = bootup_tracer_buf;
204 /* We are using ftrace early, expand it */
205 ring_buffer_expanded = true;
208 __setup("ftrace=", set_cmdline_ftrace);
210 static int __init set_ftrace_dump_on_oops(char *str)
212 if (*str++ != '=' || !*str || !strcmp("1", str)) {
213 ftrace_dump_on_oops = DUMP_ALL;
217 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
218 ftrace_dump_on_oops = DUMP_ORIG;
224 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
226 static int __init stop_trace_on_warning(char *str)
228 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
229 __disable_trace_on_warning = 1;
232 __setup("traceoff_on_warning", stop_trace_on_warning);
234 static int __init boot_alloc_snapshot(char *str)
236 char *slot = boot_snapshot_info + boot_snapshot_index;
237 int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
242 if (strlen(str) >= left)
245 ret = snprintf(slot, left, "%s\t", str);
246 boot_snapshot_index += ret;
248 allocate_snapshot = true;
249 /* We also need the main ring buffer expanded */
250 ring_buffer_expanded = true;
254 __setup("alloc_snapshot", boot_alloc_snapshot);
257 static int __init boot_snapshot(char *str)
259 snapshot_at_boot = true;
260 boot_alloc_snapshot(str);
263 __setup("ftrace_boot_snapshot", boot_snapshot);
266 static int __init boot_instance(char *str)
268 char *slot = boot_instance_info + boot_instance_index;
269 int left = sizeof(boot_instance_info) - boot_instance_index;
272 if (strlen(str) >= left)
275 ret = snprintf(slot, left, "%s\t", str);
276 boot_instance_index += ret;
280 __setup("trace_instance=", boot_instance);
283 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
285 static int __init set_trace_boot_options(char *str)
287 strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
290 __setup("trace_options=", set_trace_boot_options);
292 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
293 static char *trace_boot_clock __initdata;
295 static int __init set_trace_boot_clock(char *str)
297 strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
298 trace_boot_clock = trace_boot_clock_buf;
301 __setup("trace_clock=", set_trace_boot_clock);
303 static int __init set_tracepoint_printk(char *str)
305 /* Ignore the "tp_printk_stop_on_boot" param */
309 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
310 tracepoint_printk = 1;
313 __setup("tp_printk", set_tracepoint_printk);
315 static int __init set_tracepoint_printk_stop(char *str)
317 tracepoint_printk_stop_on_boot = true;
320 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
322 unsigned long long ns2usecs(u64 nsec)
330 trace_process_export(struct trace_export *export,
331 struct ring_buffer_event *event, int flag)
333 struct trace_entry *entry;
334 unsigned int size = 0;
336 if (export->flags & flag) {
337 entry = ring_buffer_event_data(event);
338 size = ring_buffer_event_length(event);
339 export->write(export, entry, size);
343 static DEFINE_MUTEX(ftrace_export_lock);
345 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
347 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
348 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
349 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
351 static inline void ftrace_exports_enable(struct trace_export *export)
353 if (export->flags & TRACE_EXPORT_FUNCTION)
354 static_branch_inc(&trace_function_exports_enabled);
356 if (export->flags & TRACE_EXPORT_EVENT)
357 static_branch_inc(&trace_event_exports_enabled);
359 if (export->flags & TRACE_EXPORT_MARKER)
360 static_branch_inc(&trace_marker_exports_enabled);
363 static inline void ftrace_exports_disable(struct trace_export *export)
365 if (export->flags & TRACE_EXPORT_FUNCTION)
366 static_branch_dec(&trace_function_exports_enabled);
368 if (export->flags & TRACE_EXPORT_EVENT)
369 static_branch_dec(&trace_event_exports_enabled);
371 if (export->flags & TRACE_EXPORT_MARKER)
372 static_branch_dec(&trace_marker_exports_enabled);
375 static void ftrace_exports(struct ring_buffer_event *event, int flag)
377 struct trace_export *export;
379 preempt_disable_notrace();
381 export = rcu_dereference_raw_check(ftrace_exports_list);
383 trace_process_export(export, event, flag);
384 export = rcu_dereference_raw_check(export->next);
387 preempt_enable_notrace();
391 add_trace_export(struct trace_export **list, struct trace_export *export)
393 rcu_assign_pointer(export->next, *list);
395 * We are entering export into the list but another
396 * CPU might be walking that list. We need to make sure
397 * the export->next pointer is valid before another CPU sees
398 * the export pointer included into the list.
400 rcu_assign_pointer(*list, export);
404 rm_trace_export(struct trace_export **list, struct trace_export *export)
406 struct trace_export **p;
408 for (p = list; *p != NULL; p = &(*p)->next)
415 rcu_assign_pointer(*p, (*p)->next);
421 add_ftrace_export(struct trace_export **list, struct trace_export *export)
423 ftrace_exports_enable(export);
425 add_trace_export(list, export);
429 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
433 ret = rm_trace_export(list, export);
434 ftrace_exports_disable(export);
439 int register_ftrace_export(struct trace_export *export)
441 if (WARN_ON_ONCE(!export->write))
444 mutex_lock(&ftrace_export_lock);
446 add_ftrace_export(&ftrace_exports_list, export);
448 mutex_unlock(&ftrace_export_lock);
452 EXPORT_SYMBOL_GPL(register_ftrace_export);
454 int unregister_ftrace_export(struct trace_export *export)
458 mutex_lock(&ftrace_export_lock);
460 ret = rm_ftrace_export(&ftrace_exports_list, export);
462 mutex_unlock(&ftrace_export_lock);
466 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
468 /* trace_flags holds trace_options default values */
469 #define TRACE_DEFAULT_FLAGS \
470 (FUNCTION_DEFAULT_FLAGS | \
471 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
472 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
473 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
474 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
477 /* trace_options that are only supported by global_trace */
478 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
479 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
481 /* trace_flags that are default zero for instances */
482 #define ZEROED_TRACE_FLAGS \
483 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
486 * The global_trace is the descriptor that holds the top-level tracing
487 * buffers for the live tracing.
489 static struct trace_array global_trace = {
490 .trace_flags = TRACE_DEFAULT_FLAGS,
493 LIST_HEAD(ftrace_trace_arrays);
495 int trace_array_get(struct trace_array *this_tr)
497 struct trace_array *tr;
500 mutex_lock(&trace_types_lock);
501 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
508 mutex_unlock(&trace_types_lock);
513 static void __trace_array_put(struct trace_array *this_tr)
515 WARN_ON(!this_tr->ref);
520 * trace_array_put - Decrement the reference counter for this trace array.
521 * @this_tr : pointer to the trace array
523 * NOTE: Use this when we no longer need the trace array returned by
524 * trace_array_get_by_name(). This ensures the trace array can be later
528 void trace_array_put(struct trace_array *this_tr)
533 mutex_lock(&trace_types_lock);
534 __trace_array_put(this_tr);
535 mutex_unlock(&trace_types_lock);
537 EXPORT_SYMBOL_GPL(trace_array_put);
539 int tracing_check_open_get_tr(struct trace_array *tr)
543 ret = security_locked_down(LOCKDOWN_TRACEFS);
547 if (tracing_disabled)
550 if (tr && trace_array_get(tr) < 0)
556 int call_filter_check_discard(struct trace_event_call *call, void *rec,
557 struct trace_buffer *buffer,
558 struct ring_buffer_event *event)
560 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
561 !filter_match_preds(call->filter, rec)) {
562 __trace_event_discard_commit(buffer, event);
570 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
571 * @filtered_pids: The list of pids to check
572 * @search_pid: The PID to find in @filtered_pids
574 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
577 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
579 return trace_pid_list_is_set(filtered_pids, search_pid);
583 * trace_ignore_this_task - should a task be ignored for tracing
584 * @filtered_pids: The list of pids to check
585 * @filtered_no_pids: The list of pids not to be traced
586 * @task: The task that should be ignored if not filtered
588 * Checks if @task should be traced or not from @filtered_pids.
589 * Returns true if @task should *NOT* be traced.
590 * Returns false if @task should be traced.
593 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
594 struct trace_pid_list *filtered_no_pids,
595 struct task_struct *task)
598 * If filtered_no_pids is not empty, and the task's pid is listed
599 * in filtered_no_pids, then return true.
600 * Otherwise, if filtered_pids is empty, that means we can
601 * trace all tasks. If it has content, then only trace pids
602 * within filtered_pids.
605 return (filtered_pids &&
606 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
608 trace_find_filtered_pid(filtered_no_pids, task->pid));
612 * trace_filter_add_remove_task - Add or remove a task from a pid_list
613 * @pid_list: The list to modify
614 * @self: The current task for fork or NULL for exit
615 * @task: The task to add or remove
617 * If adding a task, if @self is defined, the task is only added if @self
618 * is also included in @pid_list. This happens on fork and tasks should
619 * only be added when the parent is listed. If @self is NULL, then the
620 * @task pid will be removed from the list, which would happen on exit
623 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
624 struct task_struct *self,
625 struct task_struct *task)
630 /* For forks, we only add if the forking task is listed */
632 if (!trace_find_filtered_pid(pid_list, self->pid))
636 /* "self" is set for forks, and NULL for exits */
638 trace_pid_list_set(pid_list, task->pid);
640 trace_pid_list_clear(pid_list, task->pid);
644 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
645 * @pid_list: The pid list to show
646 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
647 * @pos: The position of the file
649 * This is used by the seq_file "next" operation to iterate the pids
650 * listed in a trace_pid_list structure.
652 * Returns the pid+1 as we want to display pid of zero, but NULL would
653 * stop the iteration.
655 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
657 long pid = (unsigned long)v;
662 /* pid already is +1 of the actual previous bit */
663 if (trace_pid_list_next(pid_list, pid, &next) < 0)
668 /* Return pid + 1 to allow zero to be represented */
669 return (void *)(pid + 1);
673 * trace_pid_start - Used for seq_file to start reading pid lists
674 * @pid_list: The pid list to show
675 * @pos: The position of the file
677 * This is used by seq_file "start" operation to start the iteration
680 * Returns the pid+1 as we want to display pid of zero, but NULL would
681 * stop the iteration.
683 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
689 if (trace_pid_list_first(pid_list, &first) < 0)
694 /* Return pid + 1 so that zero can be the exit value */
695 for (pid++; pid && l < *pos;
696 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
702 * trace_pid_show - show the current pid in seq_file processing
703 * @m: The seq_file structure to write into
704 * @v: A void pointer of the pid (+1) value to display
706 * Can be directly used by seq_file operations to display the current
709 int trace_pid_show(struct seq_file *m, void *v)
711 unsigned long pid = (unsigned long)v - 1;
713 seq_printf(m, "%lu\n", pid);
717 /* 128 should be much more than enough */
718 #define PID_BUF_SIZE 127
720 int trace_pid_write(struct trace_pid_list *filtered_pids,
721 struct trace_pid_list **new_pid_list,
722 const char __user *ubuf, size_t cnt)
724 struct trace_pid_list *pid_list;
725 struct trace_parser parser;
733 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
737 * Always recreate a new array. The write is an all or nothing
738 * operation. Always create a new array when adding new pids by
739 * the user. If the operation fails, then the current list is
742 pid_list = trace_pid_list_alloc();
744 trace_parser_put(&parser);
749 /* copy the current bits to the new max */
750 ret = trace_pid_list_first(filtered_pids, &pid);
752 trace_pid_list_set(pid_list, pid);
753 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
763 ret = trace_get_user(&parser, ubuf, cnt, &pos);
771 if (!trace_parser_loaded(&parser))
775 if (kstrtoul(parser.buffer, 0, &val))
780 if (trace_pid_list_set(pid_list, pid) < 0) {
786 trace_parser_clear(&parser);
789 trace_parser_put(&parser);
792 trace_pid_list_free(pid_list);
797 /* Cleared the list of pids */
798 trace_pid_list_free(pid_list);
802 *new_pid_list = pid_list;
807 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
811 /* Early boot up does not have a buffer yet */
813 return trace_clock_local();
815 ts = ring_buffer_time_stamp(buf->buffer);
816 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
821 u64 ftrace_now(int cpu)
823 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
827 * tracing_is_enabled - Show if global_trace has been enabled
829 * Shows if the global trace has been enabled or not. It uses the
830 * mirror flag "buffer_disabled" to be used in fast paths such as for
831 * the irqsoff tracer. But it may be inaccurate due to races. If you
832 * need to know the accurate state, use tracing_is_on() which is a little
833 * slower, but accurate.
835 int tracing_is_enabled(void)
838 * For quick access (irqsoff uses this in fast path), just
839 * return the mirror variable of the state of the ring buffer.
840 * It's a little racy, but we don't really care.
843 return !global_trace.buffer_disabled;
847 * trace_buf_size is the size in bytes that is allocated
848 * for a buffer. Note, the number of bytes is always rounded
851 * This number is purposely set to a low number of 16384.
852 * If the dump on oops happens, it will be much appreciated
853 * to not have to wait for all that output. Anyway this can be
854 * boot time and run time configurable.
856 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
858 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
860 /* trace_types holds a link list of available tracers. */
861 static struct tracer *trace_types __read_mostly;
864 * trace_types_lock is used to protect the trace_types list.
866 DEFINE_MUTEX(trace_types_lock);
869 * serialize the access of the ring buffer
871 * ring buffer serializes readers, but it is low level protection.
872 * The validity of the events (which returns by ring_buffer_peek() ..etc)
873 * are not protected by ring buffer.
875 * The content of events may become garbage if we allow other process consumes
876 * these events concurrently:
877 * A) the page of the consumed events may become a normal page
878 * (not reader page) in ring buffer, and this page will be rewritten
879 * by events producer.
880 * B) The page of the consumed events may become a page for splice_read,
881 * and this page will be returned to system.
883 * These primitives allow multi process access to different cpu ring buffer
886 * These primitives don't distinguish read-only and read-consume access.
887 * Multi read-only access are also serialized.
891 static DECLARE_RWSEM(all_cpu_access_lock);
892 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
894 static inline void trace_access_lock(int cpu)
896 if (cpu == RING_BUFFER_ALL_CPUS) {
897 /* gain it for accessing the whole ring buffer. */
898 down_write(&all_cpu_access_lock);
900 /* gain it for accessing a cpu ring buffer. */
902 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
903 down_read(&all_cpu_access_lock);
905 /* Secondly block other access to this @cpu ring buffer. */
906 mutex_lock(&per_cpu(cpu_access_lock, cpu));
910 static inline void trace_access_unlock(int cpu)
912 if (cpu == RING_BUFFER_ALL_CPUS) {
913 up_write(&all_cpu_access_lock);
915 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
916 up_read(&all_cpu_access_lock);
920 static inline void trace_access_lock_init(void)
924 for_each_possible_cpu(cpu)
925 mutex_init(&per_cpu(cpu_access_lock, cpu));
930 static DEFINE_MUTEX(access_lock);
932 static inline void trace_access_lock(int cpu)
935 mutex_lock(&access_lock);
938 static inline void trace_access_unlock(int cpu)
941 mutex_unlock(&access_lock);
944 static inline void trace_access_lock_init(void)
950 #ifdef CONFIG_STACKTRACE
951 static void __ftrace_trace_stack(struct trace_buffer *buffer,
952 unsigned int trace_ctx,
953 int skip, struct pt_regs *regs);
954 static inline void ftrace_trace_stack(struct trace_array *tr,
955 struct trace_buffer *buffer,
956 unsigned int trace_ctx,
957 int skip, struct pt_regs *regs);
960 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
961 unsigned int trace_ctx,
962 int skip, struct pt_regs *regs)
965 static inline void ftrace_trace_stack(struct trace_array *tr,
966 struct trace_buffer *buffer,
967 unsigned long trace_ctx,
968 int skip, struct pt_regs *regs)
974 static __always_inline void
975 trace_event_setup(struct ring_buffer_event *event,
976 int type, unsigned int trace_ctx)
978 struct trace_entry *ent = ring_buffer_event_data(event);
980 tracing_generic_entry_update(ent, type, trace_ctx);
983 static __always_inline struct ring_buffer_event *
984 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
987 unsigned int trace_ctx)
989 struct ring_buffer_event *event;
991 event = ring_buffer_lock_reserve(buffer, len);
993 trace_event_setup(event, type, trace_ctx);
998 void tracer_tracing_on(struct trace_array *tr)
1000 if (tr->array_buffer.buffer)
1001 ring_buffer_record_on(tr->array_buffer.buffer);
1003 * This flag is looked at when buffers haven't been allocated
1004 * yet, or by some tracers (like irqsoff), that just want to
1005 * know if the ring buffer has been disabled, but it can handle
1006 * races of where it gets disabled but we still do a record.
1007 * As the check is in the fast path of the tracers, it is more
1008 * important to be fast than accurate.
1010 tr->buffer_disabled = 0;
1011 /* Make the flag seen by readers */
1016 * tracing_on - enable tracing buffers
1018 * This function enables tracing buffers that may have been
1019 * disabled with tracing_off.
1021 void tracing_on(void)
1023 tracer_tracing_on(&global_trace);
1025 EXPORT_SYMBOL_GPL(tracing_on);
1028 static __always_inline void
1029 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
1031 __this_cpu_write(trace_taskinfo_save, true);
1033 /* If this is the temp buffer, we need to commit fully */
1034 if (this_cpu_read(trace_buffered_event) == event) {
1035 /* Length is in event->array[0] */
1036 ring_buffer_write(buffer, event->array[0], &event->array[1]);
1037 /* Release the temp buffer */
1038 this_cpu_dec(trace_buffered_event_cnt);
1039 /* ring_buffer_unlock_commit() enables preemption */
1040 preempt_enable_notrace();
1042 ring_buffer_unlock_commit(buffer);
1045 int __trace_array_puts(struct trace_array *tr, unsigned long ip,
1046 const char *str, int size)
1048 struct ring_buffer_event *event;
1049 struct trace_buffer *buffer;
1050 struct print_entry *entry;
1051 unsigned int trace_ctx;
1054 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1057 if (unlikely(tracing_selftest_running && tr == &global_trace))
1060 if (unlikely(tracing_disabled))
1063 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1065 trace_ctx = tracing_gen_ctx();
1066 buffer = tr->array_buffer.buffer;
1067 ring_buffer_nest_start(buffer);
1068 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1075 entry = ring_buffer_event_data(event);
1078 memcpy(&entry->buf, str, size);
1080 /* Add a newline if necessary */
1081 if (entry->buf[size - 1] != '\n') {
1082 entry->buf[size] = '\n';
1083 entry->buf[size + 1] = '\0';
1085 entry->buf[size] = '\0';
1087 __buffer_unlock_commit(buffer, event);
1088 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1090 ring_buffer_nest_end(buffer);
1093 EXPORT_SYMBOL_GPL(__trace_array_puts);
1096 * __trace_puts - write a constant string into the trace buffer.
1097 * @ip: The address of the caller
1098 * @str: The constant string to write
1099 * @size: The size of the string.
1101 int __trace_puts(unsigned long ip, const char *str, int size)
1103 return __trace_array_puts(&global_trace, ip, str, size);
1105 EXPORT_SYMBOL_GPL(__trace_puts);
1108 * __trace_bputs - write the pointer to a constant string into trace buffer
1109 * @ip: The address of the caller
1110 * @str: The constant string to write to the buffer to
1112 int __trace_bputs(unsigned long ip, const char *str)
1114 struct ring_buffer_event *event;
1115 struct trace_buffer *buffer;
1116 struct bputs_entry *entry;
1117 unsigned int trace_ctx;
1118 int size = sizeof(struct bputs_entry);
1121 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1124 if (unlikely(tracing_selftest_running || tracing_disabled))
1127 trace_ctx = tracing_gen_ctx();
1128 buffer = global_trace.array_buffer.buffer;
1130 ring_buffer_nest_start(buffer);
1131 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1136 entry = ring_buffer_event_data(event);
1140 __buffer_unlock_commit(buffer, event);
1141 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1145 ring_buffer_nest_end(buffer);
1148 EXPORT_SYMBOL_GPL(__trace_bputs);
1150 #ifdef CONFIG_TRACER_SNAPSHOT
1151 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1154 struct tracer *tracer = tr->current_trace;
1155 unsigned long flags;
1158 trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1159 trace_array_puts(tr, "*** snapshot is being ignored ***\n");
1163 if (!tr->allocated_snapshot) {
1164 trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
1165 trace_array_puts(tr, "*** stopping trace here! ***\n");
1166 tracer_tracing_off(tr);
1170 /* Note, snapshot can not be used when the tracer uses it */
1171 if (tracer->use_max_tr) {
1172 trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
1173 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1177 local_irq_save(flags);
1178 update_max_tr(tr, current, smp_processor_id(), cond_data);
1179 local_irq_restore(flags);
1182 void tracing_snapshot_instance(struct trace_array *tr)
1184 tracing_snapshot_instance_cond(tr, NULL);
1188 * tracing_snapshot - take a snapshot of the current buffer.
1190 * This causes a swap between the snapshot buffer and the current live
1191 * tracing buffer. You can use this to take snapshots of the live
1192 * trace when some condition is triggered, but continue to trace.
1194 * Note, make sure to allocate the snapshot with either
1195 * a tracing_snapshot_alloc(), or by doing it manually
1196 * with: echo 1 > /sys/kernel/tracing/snapshot
1198 * If the snapshot buffer is not allocated, it will stop tracing.
1199 * Basically making a permanent snapshot.
1201 void tracing_snapshot(void)
1203 struct trace_array *tr = &global_trace;
1205 tracing_snapshot_instance(tr);
1207 EXPORT_SYMBOL_GPL(tracing_snapshot);
1210 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1211 * @tr: The tracing instance to snapshot
1212 * @cond_data: The data to be tested conditionally, and possibly saved
1214 * This is the same as tracing_snapshot() except that the snapshot is
1215 * conditional - the snapshot will only happen if the
1216 * cond_snapshot.update() implementation receiving the cond_data
1217 * returns true, which means that the trace array's cond_snapshot
1218 * update() operation used the cond_data to determine whether the
1219 * snapshot should be taken, and if it was, presumably saved it along
1220 * with the snapshot.
1222 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1224 tracing_snapshot_instance_cond(tr, cond_data);
1226 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1229 * tracing_cond_snapshot_data - get the user data associated with a snapshot
1230 * @tr: The tracing instance
1232 * When the user enables a conditional snapshot using
1233 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1234 * with the snapshot. This accessor is used to retrieve it.
1236 * Should not be called from cond_snapshot.update(), since it takes
1237 * the tr->max_lock lock, which the code calling
1238 * cond_snapshot.update() has already done.
1240 * Returns the cond_data associated with the trace array's snapshot.
1242 void *tracing_cond_snapshot_data(struct trace_array *tr)
1244 void *cond_data = NULL;
1246 local_irq_disable();
1247 arch_spin_lock(&tr->max_lock);
1249 if (tr->cond_snapshot)
1250 cond_data = tr->cond_snapshot->cond_data;
1252 arch_spin_unlock(&tr->max_lock);
1257 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1259 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1260 struct array_buffer *size_buf, int cpu_id);
1261 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1263 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1267 if (!tr->allocated_snapshot) {
1269 /* allocate spare buffer */
1270 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1271 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1275 tr->allocated_snapshot = true;
1281 static void free_snapshot(struct trace_array *tr)
1284 * We don't free the ring buffer. instead, resize it because
1285 * The max_tr ring buffer has some state (e.g. ring->clock) and
1286 * we want preserve it.
1288 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1289 set_buffer_entries(&tr->max_buffer, 1);
1290 tracing_reset_online_cpus(&tr->max_buffer);
1291 tr->allocated_snapshot = false;
1295 * tracing_alloc_snapshot - allocate snapshot buffer.
1297 * This only allocates the snapshot buffer if it isn't already
1298 * allocated - it doesn't also take a snapshot.
1300 * This is meant to be used in cases where the snapshot buffer needs
1301 * to be set up for events that can't sleep but need to be able to
1302 * trigger a snapshot.
1304 int tracing_alloc_snapshot(void)
1306 struct trace_array *tr = &global_trace;
1309 ret = tracing_alloc_snapshot_instance(tr);
1314 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1317 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1319 * This is similar to tracing_snapshot(), but it will allocate the
1320 * snapshot buffer if it isn't already allocated. Use this only
1321 * where it is safe to sleep, as the allocation may sleep.
1323 * This causes a swap between the snapshot buffer and the current live
1324 * tracing buffer. You can use this to take snapshots of the live
1325 * trace when some condition is triggered, but continue to trace.
1327 void tracing_snapshot_alloc(void)
1331 ret = tracing_alloc_snapshot();
1337 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1340 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1341 * @tr: The tracing instance
1342 * @cond_data: User data to associate with the snapshot
1343 * @update: Implementation of the cond_snapshot update function
1345 * Check whether the conditional snapshot for the given instance has
1346 * already been enabled, or if the current tracer is already using a
1347 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1348 * save the cond_data and update function inside.
1350 * Returns 0 if successful, error otherwise.
1352 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1353 cond_update_fn_t update)
1355 struct cond_snapshot *cond_snapshot;
1358 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1362 cond_snapshot->cond_data = cond_data;
1363 cond_snapshot->update = update;
1365 mutex_lock(&trace_types_lock);
1367 ret = tracing_alloc_snapshot_instance(tr);
1371 if (tr->current_trace->use_max_tr) {
1377 * The cond_snapshot can only change to NULL without the
1378 * trace_types_lock. We don't care if we race with it going
1379 * to NULL, but we want to make sure that it's not set to
1380 * something other than NULL when we get here, which we can
1381 * do safely with only holding the trace_types_lock and not
1382 * having to take the max_lock.
1384 if (tr->cond_snapshot) {
1389 local_irq_disable();
1390 arch_spin_lock(&tr->max_lock);
1391 tr->cond_snapshot = cond_snapshot;
1392 arch_spin_unlock(&tr->max_lock);
1395 mutex_unlock(&trace_types_lock);
1400 mutex_unlock(&trace_types_lock);
1401 kfree(cond_snapshot);
1404 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1407 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1408 * @tr: The tracing instance
1410 * Check whether the conditional snapshot for the given instance is
1411 * enabled; if so, free the cond_snapshot associated with it,
1412 * otherwise return -EINVAL.
1414 * Returns 0 if successful, error otherwise.
1416 int tracing_snapshot_cond_disable(struct trace_array *tr)
1420 local_irq_disable();
1421 arch_spin_lock(&tr->max_lock);
1423 if (!tr->cond_snapshot)
1426 kfree(tr->cond_snapshot);
1427 tr->cond_snapshot = NULL;
1430 arch_spin_unlock(&tr->max_lock);
1435 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1437 void tracing_snapshot(void)
1439 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1441 EXPORT_SYMBOL_GPL(tracing_snapshot);
1442 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1444 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1446 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1447 int tracing_alloc_snapshot(void)
1449 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1452 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1453 void tracing_snapshot_alloc(void)
1458 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1459 void *tracing_cond_snapshot_data(struct trace_array *tr)
1463 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1464 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1468 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1469 int tracing_snapshot_cond_disable(struct trace_array *tr)
1473 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1474 #define free_snapshot(tr) do { } while (0)
1475 #endif /* CONFIG_TRACER_SNAPSHOT */
1477 void tracer_tracing_off(struct trace_array *tr)
1479 if (tr->array_buffer.buffer)
1480 ring_buffer_record_off(tr->array_buffer.buffer);
1482 * This flag is looked at when buffers haven't been allocated
1483 * yet, or by some tracers (like irqsoff), that just want to
1484 * know if the ring buffer has been disabled, but it can handle
1485 * races of where it gets disabled but we still do a record.
1486 * As the check is in the fast path of the tracers, it is more
1487 * important to be fast than accurate.
1489 tr->buffer_disabled = 1;
1490 /* Make the flag seen by readers */
1495 * tracing_off - turn off tracing buffers
1497 * This function stops the tracing buffers from recording data.
1498 * It does not disable any overhead the tracers themselves may
1499 * be causing. This function simply causes all recording to
1500 * the ring buffers to fail.
1502 void tracing_off(void)
1504 tracer_tracing_off(&global_trace);
1506 EXPORT_SYMBOL_GPL(tracing_off);
1508 void disable_trace_on_warning(void)
1510 if (__disable_trace_on_warning) {
1511 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1512 "Disabling tracing due to warning\n");
1518 * tracer_tracing_is_on - show real state of ring buffer enabled
1519 * @tr : the trace array to know if ring buffer is enabled
1521 * Shows real state of the ring buffer if it is enabled or not.
1523 bool tracer_tracing_is_on(struct trace_array *tr)
1525 if (tr->array_buffer.buffer)
1526 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1527 return !tr->buffer_disabled;
1531 * tracing_is_on - show state of ring buffers enabled
1533 int tracing_is_on(void)
1535 return tracer_tracing_is_on(&global_trace);
1537 EXPORT_SYMBOL_GPL(tracing_is_on);
1539 static int __init set_buf_size(char *str)
1541 unsigned long buf_size;
1545 buf_size = memparse(str, &str);
1547 * nr_entries can not be zero and the startup
1548 * tests require some buffer space. Therefore
1549 * ensure we have at least 4096 bytes of buffer.
1551 trace_buf_size = max(4096UL, buf_size);
1554 __setup("trace_buf_size=", set_buf_size);
1556 static int __init set_tracing_thresh(char *str)
1558 unsigned long threshold;
1563 ret = kstrtoul(str, 0, &threshold);
1566 tracing_thresh = threshold * 1000;
1569 __setup("tracing_thresh=", set_tracing_thresh);
1571 unsigned long nsecs_to_usecs(unsigned long nsecs)
1573 return nsecs / 1000;
1577 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1578 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1579 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1580 * of strings in the order that the evals (enum) were defined.
1585 /* These must match the bit positions in trace_iterator_flags */
1586 static const char *trace_options[] = {
1594 int in_ns; /* is this clock in nanoseconds? */
1595 } trace_clocks[] = {
1596 { trace_clock_local, "local", 1 },
1597 { trace_clock_global, "global", 1 },
1598 { trace_clock_counter, "counter", 0 },
1599 { trace_clock_jiffies, "uptime", 0 },
1600 { trace_clock, "perf", 1 },
1601 { ktime_get_mono_fast_ns, "mono", 1 },
1602 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1603 { ktime_get_boot_fast_ns, "boot", 1 },
1604 { ktime_get_tai_fast_ns, "tai", 1 },
1608 bool trace_clock_in_ns(struct trace_array *tr)
1610 if (trace_clocks[tr->clock_id].in_ns)
1617 * trace_parser_get_init - gets the buffer for trace parser
1619 int trace_parser_get_init(struct trace_parser *parser, int size)
1621 memset(parser, 0, sizeof(*parser));
1623 parser->buffer = kmalloc(size, GFP_KERNEL);
1624 if (!parser->buffer)
1627 parser->size = size;
1632 * trace_parser_put - frees the buffer for trace parser
1634 void trace_parser_put(struct trace_parser *parser)
1636 kfree(parser->buffer);
1637 parser->buffer = NULL;
1641 * trace_get_user - reads the user input string separated by space
1642 * (matched by isspace(ch))
1644 * For each string found the 'struct trace_parser' is updated,
1645 * and the function returns.
1647 * Returns number of bytes read.
1649 * See kernel/trace/trace.h for 'struct trace_parser' details.
1651 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1652 size_t cnt, loff_t *ppos)
1659 trace_parser_clear(parser);
1661 ret = get_user(ch, ubuf++);
1669 * The parser is not finished with the last write,
1670 * continue reading the user input without skipping spaces.
1672 if (!parser->cont) {
1673 /* skip white space */
1674 while (cnt && isspace(ch)) {
1675 ret = get_user(ch, ubuf++);
1684 /* only spaces were written */
1685 if (isspace(ch) || !ch) {
1692 /* read the non-space input */
1693 while (cnt && !isspace(ch) && ch) {
1694 if (parser->idx < parser->size - 1)
1695 parser->buffer[parser->idx++] = ch;
1700 ret = get_user(ch, ubuf++);
1707 /* We either got finished input or we have to wait for another call. */
1708 if (isspace(ch) || !ch) {
1709 parser->buffer[parser->idx] = 0;
1710 parser->cont = false;
1711 } else if (parser->idx < parser->size - 1) {
1712 parser->cont = true;
1713 parser->buffer[parser->idx++] = ch;
1714 /* Make sure the parsed string always terminates with '\0'. */
1715 parser->buffer[parser->idx] = 0;
1728 /* TODO add a seq_buf_to_buffer() */
1729 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1733 if (trace_seq_used(s) <= s->seq.readpos)
1736 len = trace_seq_used(s) - s->seq.readpos;
1739 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1741 s->seq.readpos += cnt;
1745 unsigned long __read_mostly tracing_thresh;
1747 #ifdef CONFIG_TRACER_MAX_TRACE
1748 static const struct file_operations tracing_max_lat_fops;
1750 #ifdef LATENCY_FS_NOTIFY
1752 static struct workqueue_struct *fsnotify_wq;
1754 static void latency_fsnotify_workfn(struct work_struct *work)
1756 struct trace_array *tr = container_of(work, struct trace_array,
1758 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1761 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1763 struct trace_array *tr = container_of(iwork, struct trace_array,
1765 queue_work(fsnotify_wq, &tr->fsnotify_work);
1768 static void trace_create_maxlat_file(struct trace_array *tr,
1769 struct dentry *d_tracer)
1771 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1772 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1773 tr->d_max_latency = trace_create_file("tracing_max_latency",
1775 d_tracer, &tr->max_latency,
1776 &tracing_max_lat_fops);
1779 __init static int latency_fsnotify_init(void)
1781 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1782 WQ_UNBOUND | WQ_HIGHPRI, 0);
1784 pr_err("Unable to allocate tr_max_lat_wq\n");
1790 late_initcall_sync(latency_fsnotify_init);
1792 void latency_fsnotify(struct trace_array *tr)
1797 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1798 * possible that we are called from __schedule() or do_idle(), which
1799 * could cause a deadlock.
1801 irq_work_queue(&tr->fsnotify_irqwork);
1804 #else /* !LATENCY_FS_NOTIFY */
1806 #define trace_create_maxlat_file(tr, d_tracer) \
1807 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1808 d_tracer, &tr->max_latency, &tracing_max_lat_fops)
1813 * Copy the new maximum trace into the separate maximum-trace
1814 * structure. (this way the maximum trace is permanently saved,
1815 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1818 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1820 struct array_buffer *trace_buf = &tr->array_buffer;
1821 struct array_buffer *max_buf = &tr->max_buffer;
1822 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1823 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1826 max_buf->time_start = data->preempt_timestamp;
1828 max_data->saved_latency = tr->max_latency;
1829 max_data->critical_start = data->critical_start;
1830 max_data->critical_end = data->critical_end;
1832 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1833 max_data->pid = tsk->pid;
1835 * If tsk == current, then use current_uid(), as that does not use
1836 * RCU. The irq tracer can be called out of RCU scope.
1839 max_data->uid = current_uid();
1841 max_data->uid = task_uid(tsk);
1843 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1844 max_data->policy = tsk->policy;
1845 max_data->rt_priority = tsk->rt_priority;
1847 /* record this tasks comm */
1848 tracing_record_cmdline(tsk);
1849 latency_fsnotify(tr);
1853 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1855 * @tsk: the task with the latency
1856 * @cpu: The cpu that initiated the trace.
1857 * @cond_data: User data associated with a conditional snapshot
1859 * Flip the buffers between the @tr and the max_tr and record information
1860 * about which task was the cause of this latency.
1863 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1869 WARN_ON_ONCE(!irqs_disabled());
1871 if (!tr->allocated_snapshot) {
1872 /* Only the nop tracer should hit this when disabling */
1873 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1877 arch_spin_lock(&tr->max_lock);
1879 /* Inherit the recordable setting from array_buffer */
1880 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1881 ring_buffer_record_on(tr->max_buffer.buffer);
1883 ring_buffer_record_off(tr->max_buffer.buffer);
1885 #ifdef CONFIG_TRACER_SNAPSHOT
1886 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1887 arch_spin_unlock(&tr->max_lock);
1891 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1893 __update_max_tr(tr, tsk, cpu);
1895 arch_spin_unlock(&tr->max_lock);
1899 * update_max_tr_single - only copy one trace over, and reset the rest
1901 * @tsk: task with the latency
1902 * @cpu: the cpu of the buffer to copy.
1904 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1907 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1914 WARN_ON_ONCE(!irqs_disabled());
1915 if (!tr->allocated_snapshot) {
1916 /* Only the nop tracer should hit this when disabling */
1917 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1921 arch_spin_lock(&tr->max_lock);
1923 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1925 if (ret == -EBUSY) {
1927 * We failed to swap the buffer due to a commit taking
1928 * place on this CPU. We fail to record, but we reset
1929 * the max trace buffer (no one writes directly to it)
1930 * and flag that it failed.
1932 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1933 "Failed to swap buffers due to commit in progress\n");
1936 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1938 __update_max_tr(tr, tsk, cpu);
1939 arch_spin_unlock(&tr->max_lock);
1942 #endif /* CONFIG_TRACER_MAX_TRACE */
1944 static int wait_on_pipe(struct trace_iterator *iter, int full)
1946 /* Iterators are static, they should be filled or empty */
1947 if (trace_buffer_iter(iter, iter->cpu_file))
1950 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1954 #ifdef CONFIG_FTRACE_STARTUP_TEST
1955 static bool selftests_can_run;
1957 struct trace_selftests {
1958 struct list_head list;
1959 struct tracer *type;
1962 static LIST_HEAD(postponed_selftests);
1964 static int save_selftest(struct tracer *type)
1966 struct trace_selftests *selftest;
1968 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1972 selftest->type = type;
1973 list_add(&selftest->list, &postponed_selftests);
1977 static int run_tracer_selftest(struct tracer *type)
1979 struct trace_array *tr = &global_trace;
1980 struct tracer *saved_tracer = tr->current_trace;
1983 if (!type->selftest || tracing_selftest_disabled)
1987 * If a tracer registers early in boot up (before scheduling is
1988 * initialized and such), then do not run its selftests yet.
1989 * Instead, run it a little later in the boot process.
1991 if (!selftests_can_run)
1992 return save_selftest(type);
1994 if (!tracing_is_on()) {
1995 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
2001 * Run a selftest on this tracer.
2002 * Here we reset the trace buffer, and set the current
2003 * tracer to be this tracer. The tracer can then run some
2004 * internal tracing to verify that everything is in order.
2005 * If we fail, we do not register this tracer.
2007 tracing_reset_online_cpus(&tr->array_buffer);
2009 tr->current_trace = type;
2011 #ifdef CONFIG_TRACER_MAX_TRACE
2012 if (type->use_max_tr) {
2013 /* If we expanded the buffers, make sure the max is expanded too */
2014 if (ring_buffer_expanded)
2015 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
2016 RING_BUFFER_ALL_CPUS);
2017 tr->allocated_snapshot = true;
2021 /* the test is responsible for initializing and enabling */
2022 pr_info("Testing tracer %s: ", type->name);
2023 ret = type->selftest(type, tr);
2024 /* the test is responsible for resetting too */
2025 tr->current_trace = saved_tracer;
2027 printk(KERN_CONT "FAILED!\n");
2028 /* Add the warning after printing 'FAILED' */
2032 /* Only reset on passing, to avoid touching corrupted buffers */
2033 tracing_reset_online_cpus(&tr->array_buffer);
2035 #ifdef CONFIG_TRACER_MAX_TRACE
2036 if (type->use_max_tr) {
2037 tr->allocated_snapshot = false;
2039 /* Shrink the max buffer again */
2040 if (ring_buffer_expanded)
2041 ring_buffer_resize(tr->max_buffer.buffer, 1,
2042 RING_BUFFER_ALL_CPUS);
2046 printk(KERN_CONT "PASSED\n");
2050 static int do_run_tracer_selftest(struct tracer *type)
2055 * Tests can take a long time, especially if they are run one after the
2056 * other, as does happen during bootup when all the tracers are
2057 * registered. This could cause the soft lockup watchdog to trigger.
2061 tracing_selftest_running = true;
2062 ret = run_tracer_selftest(type);
2063 tracing_selftest_running = false;
2068 static __init int init_trace_selftests(void)
2070 struct trace_selftests *p, *n;
2071 struct tracer *t, **last;
2074 selftests_can_run = true;
2076 mutex_lock(&trace_types_lock);
2078 if (list_empty(&postponed_selftests))
2081 pr_info("Running postponed tracer tests:\n");
2083 tracing_selftest_running = true;
2084 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2085 /* This loop can take minutes when sanitizers are enabled, so
2086 * lets make sure we allow RCU processing.
2089 ret = run_tracer_selftest(p->type);
2090 /* If the test fails, then warn and remove from available_tracers */
2092 WARN(1, "tracer: %s failed selftest, disabling\n",
2094 last = &trace_types;
2095 for (t = trace_types; t; t = t->next) {
2106 tracing_selftest_running = false;
2109 mutex_unlock(&trace_types_lock);
2113 core_initcall(init_trace_selftests);
2115 static inline int run_tracer_selftest(struct tracer *type)
2119 static inline int do_run_tracer_selftest(struct tracer *type)
2123 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2125 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2127 static void __init apply_trace_boot_options(void);
2130 * register_tracer - register a tracer with the ftrace system.
2131 * @type: the plugin for the tracer
2133 * Register a new plugin tracer.
2135 int __init register_tracer(struct tracer *type)
2141 pr_info("Tracer must have a name\n");
2145 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2146 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2150 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2151 pr_warn("Can not register tracer %s due to lockdown\n",
2156 mutex_lock(&trace_types_lock);
2158 for (t = trace_types; t; t = t->next) {
2159 if (strcmp(type->name, t->name) == 0) {
2161 pr_info("Tracer %s already registered\n",
2168 if (!type->set_flag)
2169 type->set_flag = &dummy_set_flag;
2171 /*allocate a dummy tracer_flags*/
2172 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2177 type->flags->val = 0;
2178 type->flags->opts = dummy_tracer_opt;
2180 if (!type->flags->opts)
2181 type->flags->opts = dummy_tracer_opt;
2183 /* store the tracer for __set_tracer_option */
2184 type->flags->trace = type;
2186 ret = do_run_tracer_selftest(type);
2190 type->next = trace_types;
2192 add_tracer_options(&global_trace, type);
2195 mutex_unlock(&trace_types_lock);
2197 if (ret || !default_bootup_tracer)
2200 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2203 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2204 /* Do we want this tracer to start on bootup? */
2205 tracing_set_tracer(&global_trace, type->name);
2206 default_bootup_tracer = NULL;
2208 apply_trace_boot_options();
2210 /* disable other selftests, since this will break it. */
2211 disable_tracing_selftest("running a tracer");
2217 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2219 struct trace_buffer *buffer = buf->buffer;
2224 ring_buffer_record_disable(buffer);
2226 /* Make sure all commits have finished */
2228 ring_buffer_reset_cpu(buffer, cpu);
2230 ring_buffer_record_enable(buffer);
2233 void tracing_reset_online_cpus(struct array_buffer *buf)
2235 struct trace_buffer *buffer = buf->buffer;
2240 ring_buffer_record_disable(buffer);
2242 /* Make sure all commits have finished */
2245 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2247 ring_buffer_reset_online_cpus(buffer);
2249 ring_buffer_record_enable(buffer);
2252 /* Must have trace_types_lock held */
2253 void tracing_reset_all_online_cpus_unlocked(void)
2255 struct trace_array *tr;
2257 lockdep_assert_held(&trace_types_lock);
2259 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2260 if (!tr->clear_trace)
2262 tr->clear_trace = false;
2263 tracing_reset_online_cpus(&tr->array_buffer);
2264 #ifdef CONFIG_TRACER_MAX_TRACE
2265 tracing_reset_online_cpus(&tr->max_buffer);
2270 void tracing_reset_all_online_cpus(void)
2272 mutex_lock(&trace_types_lock);
2273 tracing_reset_all_online_cpus_unlocked();
2274 mutex_unlock(&trace_types_lock);
2278 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2279 * is the tgid last observed corresponding to pid=i.
2281 static int *tgid_map;
2283 /* The maximum valid index into tgid_map. */
2284 static size_t tgid_map_max;
2286 #define SAVED_CMDLINES_DEFAULT 128
2287 #define NO_CMDLINE_MAP UINT_MAX
2289 * Preemption must be disabled before acquiring trace_cmdline_lock.
2290 * The various trace_arrays' max_lock must be acquired in a context
2291 * where interrupt is disabled.
2293 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2294 struct saved_cmdlines_buffer {
2295 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2296 unsigned *map_cmdline_to_pid;
2297 unsigned cmdline_num;
2299 char *saved_cmdlines;
2301 static struct saved_cmdlines_buffer *savedcmd;
2303 static inline char *get_saved_cmdlines(int idx)
2305 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2308 static inline void set_cmdline(int idx, const char *cmdline)
2310 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2313 static int allocate_cmdlines_buffer(unsigned int val,
2314 struct saved_cmdlines_buffer *s)
2316 s->map_cmdline_to_pid = kmalloc_array(val,
2317 sizeof(*s->map_cmdline_to_pid),
2319 if (!s->map_cmdline_to_pid)
2322 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2323 if (!s->saved_cmdlines) {
2324 kfree(s->map_cmdline_to_pid);
2329 s->cmdline_num = val;
2330 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2331 sizeof(s->map_pid_to_cmdline));
2332 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2333 val * sizeof(*s->map_cmdline_to_pid));
2338 static int trace_create_savedcmd(void)
2342 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2346 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2356 int is_tracing_stopped(void)
2358 return global_trace.stop_count;
2362 * tracing_start - quick start of the tracer
2364 * If tracing is enabled but was stopped by tracing_stop,
2365 * this will start the tracer back up.
2367 void tracing_start(void)
2369 struct trace_buffer *buffer;
2370 unsigned long flags;
2372 if (tracing_disabled)
2375 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2376 if (--global_trace.stop_count) {
2377 if (global_trace.stop_count < 0) {
2378 /* Someone screwed up their debugging */
2380 global_trace.stop_count = 0;
2385 /* Prevent the buffers from switching */
2386 arch_spin_lock(&global_trace.max_lock);
2388 buffer = global_trace.array_buffer.buffer;
2390 ring_buffer_record_enable(buffer);
2392 #ifdef CONFIG_TRACER_MAX_TRACE
2393 buffer = global_trace.max_buffer.buffer;
2395 ring_buffer_record_enable(buffer);
2398 arch_spin_unlock(&global_trace.max_lock);
2401 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2404 static void tracing_start_tr(struct trace_array *tr)
2406 struct trace_buffer *buffer;
2407 unsigned long flags;
2409 if (tracing_disabled)
2412 /* If global, we need to also start the max tracer */
2413 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2414 return tracing_start();
2416 raw_spin_lock_irqsave(&tr->start_lock, flags);
2418 if (--tr->stop_count) {
2419 if (tr->stop_count < 0) {
2420 /* Someone screwed up their debugging */
2427 buffer = tr->array_buffer.buffer;
2429 ring_buffer_record_enable(buffer);
2432 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2436 * tracing_stop - quick stop of the tracer
2438 * Light weight way to stop tracing. Use in conjunction with
2441 void tracing_stop(void)
2443 struct trace_buffer *buffer;
2444 unsigned long flags;
2446 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2447 if (global_trace.stop_count++)
2450 /* Prevent the buffers from switching */
2451 arch_spin_lock(&global_trace.max_lock);
2453 buffer = global_trace.array_buffer.buffer;
2455 ring_buffer_record_disable(buffer);
2457 #ifdef CONFIG_TRACER_MAX_TRACE
2458 buffer = global_trace.max_buffer.buffer;
2460 ring_buffer_record_disable(buffer);
2463 arch_spin_unlock(&global_trace.max_lock);
2466 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2469 static void tracing_stop_tr(struct trace_array *tr)
2471 struct trace_buffer *buffer;
2472 unsigned long flags;
2474 /* If global, we need to also stop the max tracer */
2475 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2476 return tracing_stop();
2478 raw_spin_lock_irqsave(&tr->start_lock, flags);
2479 if (tr->stop_count++)
2482 buffer = tr->array_buffer.buffer;
2484 ring_buffer_record_disable(buffer);
2487 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2490 static int trace_save_cmdline(struct task_struct *tsk)
2494 /* treat recording of idle task as a success */
2498 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2501 * It's not the end of the world if we don't get
2502 * the lock, but we also don't want to spin
2503 * nor do we want to disable interrupts,
2504 * so if we miss here, then better luck next time.
2506 * This is called within the scheduler and wake up, so interrupts
2507 * had better been disabled and run queue lock been held.
2509 lockdep_assert_preemption_disabled();
2510 if (!arch_spin_trylock(&trace_cmdline_lock))
2513 idx = savedcmd->map_pid_to_cmdline[tpid];
2514 if (idx == NO_CMDLINE_MAP) {
2515 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2517 savedcmd->map_pid_to_cmdline[tpid] = idx;
2518 savedcmd->cmdline_idx = idx;
2521 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2522 set_cmdline(idx, tsk->comm);
2524 arch_spin_unlock(&trace_cmdline_lock);
2529 static void __trace_find_cmdline(int pid, char comm[])
2535 strcpy(comm, "<idle>");
2539 if (WARN_ON_ONCE(pid < 0)) {
2540 strcpy(comm, "<XXX>");
2544 tpid = pid & (PID_MAX_DEFAULT - 1);
2545 map = savedcmd->map_pid_to_cmdline[tpid];
2546 if (map != NO_CMDLINE_MAP) {
2547 tpid = savedcmd->map_cmdline_to_pid[map];
2549 strscpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2553 strcpy(comm, "<...>");
2556 void trace_find_cmdline(int pid, char comm[])
2559 arch_spin_lock(&trace_cmdline_lock);
2561 __trace_find_cmdline(pid, comm);
2563 arch_spin_unlock(&trace_cmdline_lock);
2567 static int *trace_find_tgid_ptr(int pid)
2570 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2571 * if we observe a non-NULL tgid_map then we also observe the correct
2574 int *map = smp_load_acquire(&tgid_map);
2576 if (unlikely(!map || pid > tgid_map_max))
2582 int trace_find_tgid(int pid)
2584 int *ptr = trace_find_tgid_ptr(pid);
2586 return ptr ? *ptr : 0;
2589 static int trace_save_tgid(struct task_struct *tsk)
2593 /* treat recording of idle task as a success */
2597 ptr = trace_find_tgid_ptr(tsk->pid);
2605 static bool tracing_record_taskinfo_skip(int flags)
2607 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2609 if (!__this_cpu_read(trace_taskinfo_save))
2615 * tracing_record_taskinfo - record the task info of a task
2617 * @task: task to record
2618 * @flags: TRACE_RECORD_CMDLINE for recording comm
2619 * TRACE_RECORD_TGID for recording tgid
2621 void tracing_record_taskinfo(struct task_struct *task, int flags)
2625 if (tracing_record_taskinfo_skip(flags))
2629 * Record as much task information as possible. If some fail, continue
2630 * to try to record the others.
2632 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2633 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2635 /* If recording any information failed, retry again soon. */
2639 __this_cpu_write(trace_taskinfo_save, false);
2643 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2645 * @prev: previous task during sched_switch
2646 * @next: next task during sched_switch
2647 * @flags: TRACE_RECORD_CMDLINE for recording comm
2648 * TRACE_RECORD_TGID for recording tgid
2650 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2651 struct task_struct *next, int flags)
2655 if (tracing_record_taskinfo_skip(flags))
2659 * Record as much task information as possible. If some fail, continue
2660 * to try to record the others.
2662 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2663 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2664 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2665 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2667 /* If recording any information failed, retry again soon. */
2671 __this_cpu_write(trace_taskinfo_save, false);
2674 /* Helpers to record a specific task information */
2675 void tracing_record_cmdline(struct task_struct *task)
2677 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2680 void tracing_record_tgid(struct task_struct *task)
2682 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2686 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2687 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2688 * simplifies those functions and keeps them in sync.
2690 enum print_line_t trace_handle_return(struct trace_seq *s)
2692 return trace_seq_has_overflowed(s) ?
2693 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2695 EXPORT_SYMBOL_GPL(trace_handle_return);
2697 static unsigned short migration_disable_value(void)
2699 #if defined(CONFIG_SMP)
2700 return current->migration_disabled;
2706 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2708 unsigned int trace_flags = irqs_status;
2711 pc = preempt_count();
2714 trace_flags |= TRACE_FLAG_NMI;
2715 if (pc & HARDIRQ_MASK)
2716 trace_flags |= TRACE_FLAG_HARDIRQ;
2717 if (in_serving_softirq())
2718 trace_flags |= TRACE_FLAG_SOFTIRQ;
2719 if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2720 trace_flags |= TRACE_FLAG_BH_OFF;
2722 if (tif_need_resched())
2723 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2724 if (test_preempt_need_resched())
2725 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2726 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2727 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2730 struct ring_buffer_event *
2731 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2734 unsigned int trace_ctx)
2736 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2739 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2740 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2741 static int trace_buffered_event_ref;
2744 * trace_buffered_event_enable - enable buffering events
2746 * When events are being filtered, it is quicker to use a temporary
2747 * buffer to write the event data into if there's a likely chance
2748 * that it will not be committed. The discard of the ring buffer
2749 * is not as fast as committing, and is much slower than copying
2752 * When an event is to be filtered, allocate per cpu buffers to
2753 * write the event data into, and if the event is filtered and discarded
2754 * it is simply dropped, otherwise, the entire data is to be committed
2757 void trace_buffered_event_enable(void)
2759 struct ring_buffer_event *event;
2763 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2765 if (trace_buffered_event_ref++)
2768 for_each_tracing_cpu(cpu) {
2769 page = alloc_pages_node(cpu_to_node(cpu),
2770 GFP_KERNEL | __GFP_NORETRY, 0);
2774 event = page_address(page);
2775 memset(event, 0, sizeof(*event));
2777 per_cpu(trace_buffered_event, cpu) = event;
2780 if (cpu == smp_processor_id() &&
2781 __this_cpu_read(trace_buffered_event) !=
2782 per_cpu(trace_buffered_event, cpu))
2789 trace_buffered_event_disable();
2792 static void enable_trace_buffered_event(void *data)
2794 /* Probably not needed, but do it anyway */
2796 this_cpu_dec(trace_buffered_event_cnt);
2799 static void disable_trace_buffered_event(void *data)
2801 this_cpu_inc(trace_buffered_event_cnt);
2805 * trace_buffered_event_disable - disable buffering events
2807 * When a filter is removed, it is faster to not use the buffered
2808 * events, and to commit directly into the ring buffer. Free up
2809 * the temp buffers when there are no more users. This requires
2810 * special synchronization with current events.
2812 void trace_buffered_event_disable(void)
2816 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2818 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2821 if (--trace_buffered_event_ref)
2825 /* For each CPU, set the buffer as used. */
2826 smp_call_function_many(tracing_buffer_mask,
2827 disable_trace_buffered_event, NULL, 1);
2830 /* Wait for all current users to finish */
2833 for_each_tracing_cpu(cpu) {
2834 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2835 per_cpu(trace_buffered_event, cpu) = NULL;
2838 * Make sure trace_buffered_event is NULL before clearing
2839 * trace_buffered_event_cnt.
2844 /* Do the work on each cpu */
2845 smp_call_function_many(tracing_buffer_mask,
2846 enable_trace_buffered_event, NULL, 1);
2850 static struct trace_buffer *temp_buffer;
2852 struct ring_buffer_event *
2853 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2854 struct trace_event_file *trace_file,
2855 int type, unsigned long len,
2856 unsigned int trace_ctx)
2858 struct ring_buffer_event *entry;
2859 struct trace_array *tr = trace_file->tr;
2862 *current_rb = tr->array_buffer.buffer;
2864 if (!tr->no_filter_buffering_ref &&
2865 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2866 preempt_disable_notrace();
2868 * Filtering is on, so try to use the per cpu buffer first.
2869 * This buffer will simulate a ring_buffer_event,
2870 * where the type_len is zero and the array[0] will
2871 * hold the full length.
2872 * (see include/linux/ring-buffer.h for details on
2873 * how the ring_buffer_event is structured).
2875 * Using a temp buffer during filtering and copying it
2876 * on a matched filter is quicker than writing directly
2877 * into the ring buffer and then discarding it when
2878 * it doesn't match. That is because the discard
2879 * requires several atomic operations to get right.
2880 * Copying on match and doing nothing on a failed match
2881 * is still quicker than no copy on match, but having
2882 * to discard out of the ring buffer on a failed match.
2884 if ((entry = __this_cpu_read(trace_buffered_event))) {
2885 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2887 val = this_cpu_inc_return(trace_buffered_event_cnt);
2890 * Preemption is disabled, but interrupts and NMIs
2891 * can still come in now. If that happens after
2892 * the above increment, then it will have to go
2893 * back to the old method of allocating the event
2894 * on the ring buffer, and if the filter fails, it
2895 * will have to call ring_buffer_discard_commit()
2898 * Need to also check the unlikely case that the
2899 * length is bigger than the temp buffer size.
2900 * If that happens, then the reserve is pretty much
2901 * guaranteed to fail, as the ring buffer currently
2902 * only allows events less than a page. But that may
2903 * change in the future, so let the ring buffer reserve
2904 * handle the failure in that case.
2906 if (val == 1 && likely(len <= max_len)) {
2907 trace_event_setup(entry, type, trace_ctx);
2908 entry->array[0] = len;
2909 /* Return with preemption disabled */
2912 this_cpu_dec(trace_buffered_event_cnt);
2914 /* __trace_buffer_lock_reserve() disables preemption */
2915 preempt_enable_notrace();
2918 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2921 * If tracing is off, but we have triggers enabled
2922 * we still need to look at the event data. Use the temp_buffer
2923 * to store the trace event for the trigger to use. It's recursive
2924 * safe and will not be recorded anywhere.
2926 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2927 *current_rb = temp_buffer;
2928 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2933 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2935 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2936 static DEFINE_MUTEX(tracepoint_printk_mutex);
2938 static void output_printk(struct trace_event_buffer *fbuffer)
2940 struct trace_event_call *event_call;
2941 struct trace_event_file *file;
2942 struct trace_event *event;
2943 unsigned long flags;
2944 struct trace_iterator *iter = tracepoint_print_iter;
2946 /* We should never get here if iter is NULL */
2947 if (WARN_ON_ONCE(!iter))
2950 event_call = fbuffer->trace_file->event_call;
2951 if (!event_call || !event_call->event.funcs ||
2952 !event_call->event.funcs->trace)
2955 file = fbuffer->trace_file;
2956 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2957 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2958 !filter_match_preds(file->filter, fbuffer->entry)))
2961 event = &fbuffer->trace_file->event_call->event;
2963 raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2964 trace_seq_init(&iter->seq);
2965 iter->ent = fbuffer->entry;
2966 event_call->event.funcs->trace(iter, 0, event);
2967 trace_seq_putc(&iter->seq, 0);
2968 printk("%s", iter->seq.buffer);
2970 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2973 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2974 void *buffer, size_t *lenp,
2977 int save_tracepoint_printk;
2980 mutex_lock(&tracepoint_printk_mutex);
2981 save_tracepoint_printk = tracepoint_printk;
2983 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2986 * This will force exiting early, as tracepoint_printk
2987 * is always zero when tracepoint_printk_iter is not allocated
2989 if (!tracepoint_print_iter)
2990 tracepoint_printk = 0;
2992 if (save_tracepoint_printk == tracepoint_printk)
2995 if (tracepoint_printk)
2996 static_key_enable(&tracepoint_printk_key.key);
2998 static_key_disable(&tracepoint_printk_key.key);
3001 mutex_unlock(&tracepoint_printk_mutex);
3006 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
3008 enum event_trigger_type tt = ETT_NONE;
3009 struct trace_event_file *file = fbuffer->trace_file;
3011 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
3012 fbuffer->entry, &tt))
3015 if (static_key_false(&tracepoint_printk_key.key))
3016 output_printk(fbuffer);
3018 if (static_branch_unlikely(&trace_event_exports_enabled))
3019 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
3021 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
3022 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
3026 event_triggers_post_call(file, tt);
3029 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
3034 * trace_buffer_unlock_commit_regs()
3035 * trace_event_buffer_commit()
3036 * trace_event_raw_event_xxx()
3038 # define STACK_SKIP 3
3040 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
3041 struct trace_buffer *buffer,
3042 struct ring_buffer_event *event,
3043 unsigned int trace_ctx,
3044 struct pt_regs *regs)
3046 __buffer_unlock_commit(buffer, event);
3049 * If regs is not set, then skip the necessary functions.
3050 * Note, we can still get here via blktrace, wakeup tracer
3051 * and mmiotrace, but that's ok if they lose a function or
3052 * two. They are not that meaningful.
3054 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
3055 ftrace_trace_userstack(tr, buffer, trace_ctx);
3059 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
3062 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
3063 struct ring_buffer_event *event)
3065 __buffer_unlock_commit(buffer, event);
3069 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
3070 parent_ip, unsigned int trace_ctx)
3072 struct trace_event_call *call = &event_function;
3073 struct trace_buffer *buffer = tr->array_buffer.buffer;
3074 struct ring_buffer_event *event;
3075 struct ftrace_entry *entry;
3077 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
3081 entry = ring_buffer_event_data(event);
3083 entry->parent_ip = parent_ip;
3085 if (!call_filter_check_discard(call, entry, buffer, event)) {
3086 if (static_branch_unlikely(&trace_function_exports_enabled))
3087 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
3088 __buffer_unlock_commit(buffer, event);
3092 #ifdef CONFIG_STACKTRACE
3094 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
3095 #define FTRACE_KSTACK_NESTING 4
3097 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
3099 struct ftrace_stack {
3100 unsigned long calls[FTRACE_KSTACK_ENTRIES];
3104 struct ftrace_stacks {
3105 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
3108 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
3109 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
3111 static void __ftrace_trace_stack(struct trace_buffer *buffer,
3112 unsigned int trace_ctx,
3113 int skip, struct pt_regs *regs)
3115 struct trace_event_call *call = &event_kernel_stack;
3116 struct ring_buffer_event *event;
3117 unsigned int size, nr_entries;
3118 struct ftrace_stack *fstack;
3119 struct stack_entry *entry;
3124 * Add one, for this function and the call to save_stack_trace()
3125 * If regs is set, then these functions will not be in the way.
3127 #ifndef CONFIG_UNWINDER_ORC
3132 preempt_disable_notrace();
3134 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3136 /* This should never happen. If it does, yell once and skip */
3137 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3141 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3142 * interrupt will either see the value pre increment or post
3143 * increment. If the interrupt happens pre increment it will have
3144 * restored the counter when it returns. We just need a barrier to
3145 * keep gcc from moving things around.
3149 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3150 size = ARRAY_SIZE(fstack->calls);
3153 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3156 nr_entries = stack_trace_save(fstack->calls, size, skip);
3159 size = nr_entries * sizeof(unsigned long);
3160 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3161 (sizeof(*entry) - sizeof(entry->caller)) + size,
3165 ptr = ring_buffer_event_data(event);
3169 * For backward compatibility reasons, the entry->caller is an
3170 * array of 8 slots to store the stack. This is also exported
3171 * to user space. The amount allocated on the ring buffer actually
3172 * holds enough for the stack specified by nr_entries. This will
3173 * go into the location of entry->caller. Due to string fortifiers
3174 * checking the size of the destination of memcpy() it triggers
3175 * when it detects that size is greater than 8. To hide this from
3176 * the fortifiers, we use "ptr" and pointer arithmetic to assign caller.
3178 * The below is really just:
3179 * memcpy(&entry->caller, fstack->calls, size);
3181 ptr += offsetof(typeof(*entry), caller);
3182 memcpy(ptr, fstack->calls, size);
3184 entry->size = nr_entries;
3186 if (!call_filter_check_discard(call, entry, buffer, event))
3187 __buffer_unlock_commit(buffer, event);
3190 /* Again, don't let gcc optimize things here */
3192 __this_cpu_dec(ftrace_stack_reserve);
3193 preempt_enable_notrace();
3197 static inline void ftrace_trace_stack(struct trace_array *tr,
3198 struct trace_buffer *buffer,
3199 unsigned int trace_ctx,
3200 int skip, struct pt_regs *regs)
3202 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3205 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3208 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3211 struct trace_buffer *buffer = tr->array_buffer.buffer;
3213 if (rcu_is_watching()) {
3214 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3218 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY)))
3222 * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
3223 * but if the above rcu_is_watching() failed, then the NMI
3224 * triggered someplace critical, and ct_irq_enter() should
3225 * not be called from NMI.
3227 if (unlikely(in_nmi()))
3230 ct_irq_enter_irqson();
3231 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3232 ct_irq_exit_irqson();
3236 * trace_dump_stack - record a stack back trace in the trace buffer
3237 * @skip: Number of functions to skip (helper handlers)
3239 void trace_dump_stack(int skip)
3241 if (tracing_disabled || tracing_selftest_running)
3244 #ifndef CONFIG_UNWINDER_ORC
3245 /* Skip 1 to skip this function. */
3248 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3249 tracing_gen_ctx(), skip, NULL);
3251 EXPORT_SYMBOL_GPL(trace_dump_stack);
3253 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3254 static DEFINE_PER_CPU(int, user_stack_count);
3257 ftrace_trace_userstack(struct trace_array *tr,
3258 struct trace_buffer *buffer, unsigned int trace_ctx)
3260 struct trace_event_call *call = &event_user_stack;
3261 struct ring_buffer_event *event;
3262 struct userstack_entry *entry;
3264 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3268 * NMIs can not handle page faults, even with fix ups.
3269 * The save user stack can (and often does) fault.
3271 if (unlikely(in_nmi()))
3275 * prevent recursion, since the user stack tracing may
3276 * trigger other kernel events.
3279 if (__this_cpu_read(user_stack_count))
3282 __this_cpu_inc(user_stack_count);
3284 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3285 sizeof(*entry), trace_ctx);
3287 goto out_drop_count;
3288 entry = ring_buffer_event_data(event);
3290 entry->tgid = current->tgid;
3291 memset(&entry->caller, 0, sizeof(entry->caller));
3293 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3294 if (!call_filter_check_discard(call, entry, buffer, event))
3295 __buffer_unlock_commit(buffer, event);
3298 __this_cpu_dec(user_stack_count);
3302 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3303 static void ftrace_trace_userstack(struct trace_array *tr,
3304 struct trace_buffer *buffer,
3305 unsigned int trace_ctx)
3308 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3310 #endif /* CONFIG_STACKTRACE */
3313 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3314 unsigned long long delta)
3316 entry->bottom_delta_ts = delta & U32_MAX;
3317 entry->top_delta_ts = (delta >> 32);
3320 void trace_last_func_repeats(struct trace_array *tr,
3321 struct trace_func_repeats *last_info,
3322 unsigned int trace_ctx)
3324 struct trace_buffer *buffer = tr->array_buffer.buffer;
3325 struct func_repeats_entry *entry;
3326 struct ring_buffer_event *event;
3329 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3330 sizeof(*entry), trace_ctx);
3334 delta = ring_buffer_event_time_stamp(buffer, event) -
3335 last_info->ts_last_call;
3337 entry = ring_buffer_event_data(event);
3338 entry->ip = last_info->ip;
3339 entry->parent_ip = last_info->parent_ip;
3340 entry->count = last_info->count;
3341 func_repeats_set_delta_ts(entry, delta);
3343 __buffer_unlock_commit(buffer, event);
3346 /* created for use with alloc_percpu */
3347 struct trace_buffer_struct {
3349 char buffer[4][TRACE_BUF_SIZE];
3352 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3355 * This allows for lockless recording. If we're nested too deeply, then
3356 * this returns NULL.
3358 static char *get_trace_buf(void)
3360 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3362 if (!trace_percpu_buffer || buffer->nesting >= 4)
3367 /* Interrupts must see nesting incremented before we use the buffer */
3369 return &buffer->buffer[buffer->nesting - 1][0];
3372 static void put_trace_buf(void)
3374 /* Don't let the decrement of nesting leak before this */
3376 this_cpu_dec(trace_percpu_buffer->nesting);
3379 static int alloc_percpu_trace_buffer(void)
3381 struct trace_buffer_struct __percpu *buffers;
3383 if (trace_percpu_buffer)
3386 buffers = alloc_percpu(struct trace_buffer_struct);
3387 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3390 trace_percpu_buffer = buffers;
3394 static int buffers_allocated;
3396 void trace_printk_init_buffers(void)
3398 if (buffers_allocated)
3401 if (alloc_percpu_trace_buffer())
3404 /* trace_printk() is for debug use only. Don't use it in production. */
3407 pr_warn("**********************************************************\n");
3408 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3410 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3412 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3413 pr_warn("** unsafe for production use. **\n");
3415 pr_warn("** If you see this message and you are not debugging **\n");
3416 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3418 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3419 pr_warn("**********************************************************\n");
3421 /* Expand the buffers to set size */
3422 tracing_update_buffers();
3424 buffers_allocated = 1;
3427 * trace_printk_init_buffers() can be called by modules.
3428 * If that happens, then we need to start cmdline recording
3429 * directly here. If the global_trace.buffer is already
3430 * allocated here, then this was called by module code.
3432 if (global_trace.array_buffer.buffer)
3433 tracing_start_cmdline_record();
3435 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3437 void trace_printk_start_comm(void)
3439 /* Start tracing comms if trace printk is set */
3440 if (!buffers_allocated)
3442 tracing_start_cmdline_record();
3445 static void trace_printk_start_stop_comm(int enabled)
3447 if (!buffers_allocated)
3451 tracing_start_cmdline_record();
3453 tracing_stop_cmdline_record();
3457 * trace_vbprintk - write binary msg to tracing buffer
3458 * @ip: The address of the caller
3459 * @fmt: The string format to write to the buffer
3460 * @args: Arguments for @fmt
3462 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3464 struct trace_event_call *call = &event_bprint;
3465 struct ring_buffer_event *event;
3466 struct trace_buffer *buffer;
3467 struct trace_array *tr = &global_trace;
3468 struct bprint_entry *entry;
3469 unsigned int trace_ctx;
3473 if (unlikely(tracing_selftest_running || tracing_disabled))
3476 /* Don't pollute graph traces with trace_vprintk internals */
3477 pause_graph_tracing();
3479 trace_ctx = tracing_gen_ctx();
3480 preempt_disable_notrace();
3482 tbuffer = get_trace_buf();
3488 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3490 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3493 size = sizeof(*entry) + sizeof(u32) * len;
3494 buffer = tr->array_buffer.buffer;
3495 ring_buffer_nest_start(buffer);
3496 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3500 entry = ring_buffer_event_data(event);
3504 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3505 if (!call_filter_check_discard(call, entry, buffer, event)) {
3506 __buffer_unlock_commit(buffer, event);
3507 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3511 ring_buffer_nest_end(buffer);
3516 preempt_enable_notrace();
3517 unpause_graph_tracing();
3521 EXPORT_SYMBOL_GPL(trace_vbprintk);
3525 __trace_array_vprintk(struct trace_buffer *buffer,
3526 unsigned long ip, const char *fmt, va_list args)
3528 struct trace_event_call *call = &event_print;
3529 struct ring_buffer_event *event;
3531 struct print_entry *entry;
3532 unsigned int trace_ctx;
3535 if (tracing_disabled)
3538 /* Don't pollute graph traces with trace_vprintk internals */
3539 pause_graph_tracing();
3541 trace_ctx = tracing_gen_ctx();
3542 preempt_disable_notrace();
3545 tbuffer = get_trace_buf();
3551 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3553 size = sizeof(*entry) + len + 1;
3554 ring_buffer_nest_start(buffer);
3555 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3559 entry = ring_buffer_event_data(event);
3562 memcpy(&entry->buf, tbuffer, len + 1);
3563 if (!call_filter_check_discard(call, entry, buffer, event)) {
3564 __buffer_unlock_commit(buffer, event);
3565 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3569 ring_buffer_nest_end(buffer);
3573 preempt_enable_notrace();
3574 unpause_graph_tracing();
3580 int trace_array_vprintk(struct trace_array *tr,
3581 unsigned long ip, const char *fmt, va_list args)
3583 if (tracing_selftest_running && tr == &global_trace)
3586 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3590 * trace_array_printk - Print a message to a specific instance
3591 * @tr: The instance trace_array descriptor
3592 * @ip: The instruction pointer that this is called from.
3593 * @fmt: The format to print (printf format)
3595 * If a subsystem sets up its own instance, they have the right to
3596 * printk strings into their tracing instance buffer using this
3597 * function. Note, this function will not write into the top level
3598 * buffer (use trace_printk() for that), as writing into the top level
3599 * buffer should only have events that can be individually disabled.
3600 * trace_printk() is only used for debugging a kernel, and should not
3601 * be ever incorporated in normal use.
3603 * trace_array_printk() can be used, as it will not add noise to the
3604 * top level tracing buffer.
3606 * Note, trace_array_init_printk() must be called on @tr before this
3610 int trace_array_printk(struct trace_array *tr,
3611 unsigned long ip, const char *fmt, ...)
3619 /* This is only allowed for created instances */
3620 if (tr == &global_trace)
3623 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3627 ret = trace_array_vprintk(tr, ip, fmt, ap);
3631 EXPORT_SYMBOL_GPL(trace_array_printk);
3634 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3635 * @tr: The trace array to initialize the buffers for
3637 * As trace_array_printk() only writes into instances, they are OK to
3638 * have in the kernel (unlike trace_printk()). This needs to be called
3639 * before trace_array_printk() can be used on a trace_array.
3641 int trace_array_init_printk(struct trace_array *tr)
3646 /* This is only allowed for created instances */
3647 if (tr == &global_trace)
3650 return alloc_percpu_trace_buffer();
3652 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3655 int trace_array_printk_buf(struct trace_buffer *buffer,
3656 unsigned long ip, const char *fmt, ...)
3661 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3665 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3671 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3673 return trace_array_vprintk(&global_trace, ip, fmt, args);
3675 EXPORT_SYMBOL_GPL(trace_vprintk);
3677 static void trace_iterator_increment(struct trace_iterator *iter)
3679 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3683 ring_buffer_iter_advance(buf_iter);
3686 static struct trace_entry *
3687 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3688 unsigned long *lost_events)
3690 struct ring_buffer_event *event;
3691 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3694 event = ring_buffer_iter_peek(buf_iter, ts);
3696 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3697 (unsigned long)-1 : 0;
3699 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3704 iter->ent_size = ring_buffer_event_length(event);
3705 return ring_buffer_event_data(event);
3711 static struct trace_entry *
3712 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3713 unsigned long *missing_events, u64 *ent_ts)
3715 struct trace_buffer *buffer = iter->array_buffer->buffer;
3716 struct trace_entry *ent, *next = NULL;
3717 unsigned long lost_events = 0, next_lost = 0;
3718 int cpu_file = iter->cpu_file;
3719 u64 next_ts = 0, ts;
3725 * If we are in a per_cpu trace file, don't bother by iterating over
3726 * all cpu and peek directly.
3728 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3729 if (ring_buffer_empty_cpu(buffer, cpu_file))
3731 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3733 *ent_cpu = cpu_file;
3738 for_each_tracing_cpu(cpu) {
3740 if (ring_buffer_empty_cpu(buffer, cpu))
3743 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3746 * Pick the entry with the smallest timestamp:
3748 if (ent && (!next || ts < next_ts)) {
3752 next_lost = lost_events;
3753 next_size = iter->ent_size;
3757 iter->ent_size = next_size;
3760 *ent_cpu = next_cpu;
3766 *missing_events = next_lost;
3771 #define STATIC_FMT_BUF_SIZE 128
3772 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3774 char *trace_iter_expand_format(struct trace_iterator *iter)
3779 * iter->tr is NULL when used with tp_printk, which makes
3780 * this get called where it is not safe to call krealloc().
3782 if (!iter->tr || iter->fmt == static_fmt_buf)
3785 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3788 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3795 /* Returns true if the string is safe to dereference from an event */
3796 static bool trace_safe_str(struct trace_iterator *iter, const char *str,
3799 unsigned long addr = (unsigned long)str;
3800 struct trace_event *trace_event;
3801 struct trace_event_call *event;
3803 /* Ignore strings with no length */
3807 /* OK if part of the event data */
3808 if ((addr >= (unsigned long)iter->ent) &&
3809 (addr < (unsigned long)iter->ent + iter->ent_size))
3812 /* OK if part of the temp seq buffer */
3813 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3814 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3817 /* Core rodata can not be freed */
3818 if (is_kernel_rodata(addr))
3821 if (trace_is_tracepoint_string(str))
3825 * Now this could be a module event, referencing core module
3826 * data, which is OK.
3831 trace_event = ftrace_find_event(iter->ent->type);
3835 event = container_of(trace_event, struct trace_event_call, event);
3836 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3839 /* Would rather have rodata, but this will suffice */
3840 if (within_module_core(addr, event->module))
3846 static const char *show_buffer(struct trace_seq *s)
3848 struct seq_buf *seq = &s->seq;
3850 seq_buf_terminate(seq);
3855 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3857 static int test_can_verify_check(const char *fmt, ...)
3864 * The verifier is dependent on vsnprintf() modifies the va_list
3865 * passed to it, where it is sent as a reference. Some architectures
3866 * (like x86_32) passes it by value, which means that vsnprintf()
3867 * does not modify the va_list passed to it, and the verifier
3868 * would then need to be able to understand all the values that
3869 * vsnprintf can use. If it is passed by value, then the verifier
3873 vsnprintf(buf, 16, "%d", ap);
3874 ret = va_arg(ap, int);
3880 static void test_can_verify(void)
3882 if (!test_can_verify_check("%d %d", 0, 1)) {
3883 pr_info("trace event string verifier disabled\n");
3884 static_branch_inc(&trace_no_verify);
3889 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3890 * @iter: The iterator that holds the seq buffer and the event being printed
3891 * @fmt: The format used to print the event
3892 * @ap: The va_list holding the data to print from @fmt.
3894 * This writes the data into the @iter->seq buffer using the data from
3895 * @fmt and @ap. If the format has a %s, then the source of the string
3896 * is examined to make sure it is safe to print, otherwise it will
3897 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3900 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3903 const char *p = fmt;
3907 if (WARN_ON_ONCE(!fmt))
3910 if (static_branch_unlikely(&trace_no_verify))
3913 /* Don't bother checking when doing a ftrace_dump() */
3914 if (iter->fmt == static_fmt_buf)
3923 /* We only care about %s and variants */
3924 for (i = 0; p[i]; i++) {
3925 if (i + 1 >= iter->fmt_size) {
3927 * If we can't expand the copy buffer,
3930 if (!trace_iter_expand_format(iter))
3934 if (p[i] == '\\' && p[i+1]) {
3939 /* Need to test cases like %08.*s */
3940 for (j = 1; p[i+j]; j++) {
3941 if (isdigit(p[i+j]) ||
3944 if (p[i+j] == '*') {
3956 /* If no %s found then just print normally */
3960 /* Copy up to the %s, and print that */
3961 strncpy(iter->fmt, p, i);
3962 iter->fmt[i] = '\0';
3963 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3966 * If iter->seq is full, the above call no longer guarantees
3967 * that ap is in sync with fmt processing, and further calls
3968 * to va_arg() can return wrong positional arguments.
3970 * Ensure that ap is no longer used in this case.
3972 if (iter->seq.full) {
3978 len = va_arg(ap, int);
3980 /* The ap now points to the string data of the %s */
3981 str = va_arg(ap, const char *);
3984 * If you hit this warning, it is likely that the
3985 * trace event in question used %s on a string that
3986 * was saved at the time of the event, but may not be
3987 * around when the trace is read. Use __string(),
3988 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3989 * instead. See samples/trace_events/trace-events-sample.h
3992 if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
3993 "fmt: '%s' current_buffer: '%s'",
3994 fmt, show_buffer(&iter->seq))) {
3997 /* Try to safely read the string */
3999 if (len + 1 > iter->fmt_size)
4000 len = iter->fmt_size - 1;
4003 ret = copy_from_kernel_nofault(iter->fmt, str, len);
4007 ret = strncpy_from_kernel_nofault(iter->fmt, str,
4011 trace_seq_printf(&iter->seq, "(0x%px)", str);
4013 trace_seq_printf(&iter->seq, "(0x%px:%s)",
4015 str = "[UNSAFE-MEMORY]";
4016 strcpy(iter->fmt, "%s");
4018 strncpy(iter->fmt, p + i, j + 1);
4019 iter->fmt[j+1] = '\0';
4022 trace_seq_printf(&iter->seq, iter->fmt, len, str);
4024 trace_seq_printf(&iter->seq, iter->fmt, str);
4030 trace_seq_vprintf(&iter->seq, p, ap);
4033 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
4035 const char *p, *new_fmt;
4038 if (WARN_ON_ONCE(!fmt))
4041 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
4045 new_fmt = q = iter->fmt;
4047 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
4048 if (!trace_iter_expand_format(iter))
4051 q += iter->fmt - new_fmt;
4052 new_fmt = iter->fmt;
4057 /* Replace %p with %px */
4061 } else if (p[0] == 'p' && !isalnum(p[1])) {
4072 #define STATIC_TEMP_BUF_SIZE 128
4073 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
4075 /* Find the next real entry, without updating the iterator itself */
4076 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
4077 int *ent_cpu, u64 *ent_ts)
4079 /* __find_next_entry will reset ent_size */
4080 int ent_size = iter->ent_size;
4081 struct trace_entry *entry;
4084 * If called from ftrace_dump(), then the iter->temp buffer
4085 * will be the static_temp_buf and not created from kmalloc.
4086 * If the entry size is greater than the buffer, we can
4087 * not save it. Just return NULL in that case. This is only
4088 * used to add markers when two consecutive events' time
4089 * stamps have a large delta. See trace_print_lat_context()
4091 if (iter->temp == static_temp_buf &&
4092 STATIC_TEMP_BUF_SIZE < ent_size)
4096 * The __find_next_entry() may call peek_next_entry(), which may
4097 * call ring_buffer_peek() that may make the contents of iter->ent
4098 * undefined. Need to copy iter->ent now.
4100 if (iter->ent && iter->ent != iter->temp) {
4101 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
4102 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
4104 temp = kmalloc(iter->ent_size, GFP_KERNEL);
4109 iter->temp_size = iter->ent_size;
4111 memcpy(iter->temp, iter->ent, iter->ent_size);
4112 iter->ent = iter->temp;
4114 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
4115 /* Put back the original ent_size */
4116 iter->ent_size = ent_size;
4121 /* Find the next real entry, and increment the iterator to the next entry */
4122 void *trace_find_next_entry_inc(struct trace_iterator *iter)
4124 iter->ent = __find_next_entry(iter, &iter->cpu,
4125 &iter->lost_events, &iter->ts);
4128 trace_iterator_increment(iter);
4130 return iter->ent ? iter : NULL;
4133 static void trace_consume(struct trace_iterator *iter)
4135 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
4136 &iter->lost_events);
4139 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
4141 struct trace_iterator *iter = m->private;
4145 WARN_ON_ONCE(iter->leftover);
4149 /* can't go backwards */
4154 ent = trace_find_next_entry_inc(iter);
4158 while (ent && iter->idx < i)
4159 ent = trace_find_next_entry_inc(iter);
4166 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4168 struct ring_buffer_iter *buf_iter;
4169 unsigned long entries = 0;
4172 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4174 buf_iter = trace_buffer_iter(iter, cpu);
4178 ring_buffer_iter_reset(buf_iter);
4181 * We could have the case with the max latency tracers
4182 * that a reset never took place on a cpu. This is evident
4183 * by the timestamp being before the start of the buffer.
4185 while (ring_buffer_iter_peek(buf_iter, &ts)) {
4186 if (ts >= iter->array_buffer->time_start)
4189 ring_buffer_iter_advance(buf_iter);
4192 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4196 * The current tracer is copied to avoid a global locking
4199 static void *s_start(struct seq_file *m, loff_t *pos)
4201 struct trace_iterator *iter = m->private;
4202 struct trace_array *tr = iter->tr;
4203 int cpu_file = iter->cpu_file;
4209 * copy the tracer to avoid using a global lock all around.
4210 * iter->trace is a copy of current_trace, the pointer to the
4211 * name may be used instead of a strcmp(), as iter->trace->name
4212 * will point to the same string as current_trace->name.
4214 mutex_lock(&trace_types_lock);
4215 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
4216 *iter->trace = *tr->current_trace;
4217 mutex_unlock(&trace_types_lock);
4219 #ifdef CONFIG_TRACER_MAX_TRACE
4220 if (iter->snapshot && iter->trace->use_max_tr)
4221 return ERR_PTR(-EBUSY);
4224 if (*pos != iter->pos) {
4229 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4230 for_each_tracing_cpu(cpu)
4231 tracing_iter_reset(iter, cpu);
4233 tracing_iter_reset(iter, cpu_file);
4236 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4241 * If we overflowed the seq_file before, then we want
4242 * to just reuse the trace_seq buffer again.
4248 p = s_next(m, p, &l);
4252 trace_event_read_lock();
4253 trace_access_lock(cpu_file);
4257 static void s_stop(struct seq_file *m, void *p)
4259 struct trace_iterator *iter = m->private;
4261 #ifdef CONFIG_TRACER_MAX_TRACE
4262 if (iter->snapshot && iter->trace->use_max_tr)
4266 trace_access_unlock(iter->cpu_file);
4267 trace_event_read_unlock();
4271 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4272 unsigned long *entries, int cpu)
4274 unsigned long count;
4276 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4278 * If this buffer has skipped entries, then we hold all
4279 * entries for the trace and we need to ignore the
4280 * ones before the time stamp.
4282 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4283 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4284 /* total is the same as the entries */
4288 ring_buffer_overrun_cpu(buf->buffer, cpu);
4293 get_total_entries(struct array_buffer *buf,
4294 unsigned long *total, unsigned long *entries)
4302 for_each_tracing_cpu(cpu) {
4303 get_total_entries_cpu(buf, &t, &e, cpu);
4309 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4311 unsigned long total, entries;
4316 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4321 unsigned long trace_total_entries(struct trace_array *tr)
4323 unsigned long total, entries;
4328 get_total_entries(&tr->array_buffer, &total, &entries);
4333 static void print_lat_help_header(struct seq_file *m)
4335 seq_puts(m, "# _------=> CPU# \n"
4336 "# / _-----=> irqs-off/BH-disabled\n"
4337 "# | / _----=> need-resched \n"
4338 "# || / _---=> hardirq/softirq \n"
4339 "# ||| / _--=> preempt-depth \n"
4340 "# |||| / _-=> migrate-disable \n"
4341 "# ||||| / delay \n"
4342 "# cmd pid |||||| time | caller \n"
4343 "# \\ / |||||| \\ | / \n");
4346 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4348 unsigned long total;
4349 unsigned long entries;
4351 get_total_entries(buf, &total, &entries);
4352 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4353 entries, total, num_online_cpus());
4357 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4360 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4362 print_event_info(buf, m);
4364 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4365 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4368 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4371 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4372 static const char space[] = " ";
4373 int prec = tgid ? 12 : 2;
4375 print_event_info(buf, m);
4377 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
4378 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4379 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4380 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4381 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4382 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4383 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4384 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
4388 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4390 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4391 struct array_buffer *buf = iter->array_buffer;
4392 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4393 struct tracer *type = iter->trace;
4394 unsigned long entries;
4395 unsigned long total;
4396 const char *name = type->name;
4398 get_total_entries(buf, &total, &entries);
4400 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4402 seq_puts(m, "# -----------------------------------"
4403 "---------------------------------\n");
4404 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4405 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4406 nsecs_to_usecs(data->saved_latency),
4410 preempt_model_none() ? "server" :
4411 preempt_model_voluntary() ? "desktop" :
4412 preempt_model_full() ? "preempt" :
4413 preempt_model_rt() ? "preempt_rt" :
4415 /* These are reserved for later use */
4418 seq_printf(m, " #P:%d)\n", num_online_cpus());
4422 seq_puts(m, "# -----------------\n");
4423 seq_printf(m, "# | task: %.16s-%d "
4424 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4425 data->comm, data->pid,
4426 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4427 data->policy, data->rt_priority);
4428 seq_puts(m, "# -----------------\n");
4430 if (data->critical_start) {
4431 seq_puts(m, "# => started at: ");
4432 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4433 trace_print_seq(m, &iter->seq);
4434 seq_puts(m, "\n# => ended at: ");
4435 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4436 trace_print_seq(m, &iter->seq);
4437 seq_puts(m, "\n#\n");
4443 static void test_cpu_buff_start(struct trace_iterator *iter)
4445 struct trace_seq *s = &iter->seq;
4446 struct trace_array *tr = iter->tr;
4448 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4451 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4454 if (cpumask_available(iter->started) &&
4455 cpumask_test_cpu(iter->cpu, iter->started))
4458 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4461 if (cpumask_available(iter->started))
4462 cpumask_set_cpu(iter->cpu, iter->started);
4464 /* Don't print started cpu buffer for the first entry of the trace */
4466 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4470 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4472 struct trace_array *tr = iter->tr;
4473 struct trace_seq *s = &iter->seq;
4474 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4475 struct trace_entry *entry;
4476 struct trace_event *event;
4480 test_cpu_buff_start(iter);
4482 event = ftrace_find_event(entry->type);
4484 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4485 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4486 trace_print_lat_context(iter);
4488 trace_print_context(iter);
4491 if (trace_seq_has_overflowed(s))
4492 return TRACE_TYPE_PARTIAL_LINE;
4495 if (tr->trace_flags & TRACE_ITER_FIELDS)
4496 return print_event_fields(iter, event);
4497 return event->funcs->trace(iter, sym_flags, event);
4500 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4502 return trace_handle_return(s);
4505 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4507 struct trace_array *tr = iter->tr;
4508 struct trace_seq *s = &iter->seq;
4509 struct trace_entry *entry;
4510 struct trace_event *event;
4514 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4515 trace_seq_printf(s, "%d %d %llu ",
4516 entry->pid, iter->cpu, iter->ts);
4518 if (trace_seq_has_overflowed(s))
4519 return TRACE_TYPE_PARTIAL_LINE;
4521 event = ftrace_find_event(entry->type);
4523 return event->funcs->raw(iter, 0, event);
4525 trace_seq_printf(s, "%d ?\n", entry->type);
4527 return trace_handle_return(s);
4530 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4532 struct trace_array *tr = iter->tr;
4533 struct trace_seq *s = &iter->seq;
4534 unsigned char newline = '\n';
4535 struct trace_entry *entry;
4536 struct trace_event *event;
4540 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4541 SEQ_PUT_HEX_FIELD(s, entry->pid);
4542 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4543 SEQ_PUT_HEX_FIELD(s, iter->ts);
4544 if (trace_seq_has_overflowed(s))
4545 return TRACE_TYPE_PARTIAL_LINE;
4548 event = ftrace_find_event(entry->type);
4550 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4551 if (ret != TRACE_TYPE_HANDLED)
4555 SEQ_PUT_FIELD(s, newline);
4557 return trace_handle_return(s);
4560 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4562 struct trace_array *tr = iter->tr;
4563 struct trace_seq *s = &iter->seq;
4564 struct trace_entry *entry;
4565 struct trace_event *event;
4569 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4570 SEQ_PUT_FIELD(s, entry->pid);
4571 SEQ_PUT_FIELD(s, iter->cpu);
4572 SEQ_PUT_FIELD(s, iter->ts);
4573 if (trace_seq_has_overflowed(s))
4574 return TRACE_TYPE_PARTIAL_LINE;
4577 event = ftrace_find_event(entry->type);
4578 return event ? event->funcs->binary(iter, 0, event) :
4582 int trace_empty(struct trace_iterator *iter)
4584 struct ring_buffer_iter *buf_iter;
4587 /* If we are looking at one CPU buffer, only check that one */
4588 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4589 cpu = iter->cpu_file;
4590 buf_iter = trace_buffer_iter(iter, cpu);
4592 if (!ring_buffer_iter_empty(buf_iter))
4595 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4601 for_each_tracing_cpu(cpu) {
4602 buf_iter = trace_buffer_iter(iter, cpu);
4604 if (!ring_buffer_iter_empty(buf_iter))
4607 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4615 /* Called with trace_event_read_lock() held. */
4616 enum print_line_t print_trace_line(struct trace_iterator *iter)
4618 struct trace_array *tr = iter->tr;
4619 unsigned long trace_flags = tr->trace_flags;
4620 enum print_line_t ret;
4622 if (iter->lost_events) {
4623 if (iter->lost_events == (unsigned long)-1)
4624 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4627 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4628 iter->cpu, iter->lost_events);
4629 if (trace_seq_has_overflowed(&iter->seq))
4630 return TRACE_TYPE_PARTIAL_LINE;
4633 if (iter->trace && iter->trace->print_line) {
4634 ret = iter->trace->print_line(iter);
4635 if (ret != TRACE_TYPE_UNHANDLED)
4639 if (iter->ent->type == TRACE_BPUTS &&
4640 trace_flags & TRACE_ITER_PRINTK &&
4641 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4642 return trace_print_bputs_msg_only(iter);
4644 if (iter->ent->type == TRACE_BPRINT &&
4645 trace_flags & TRACE_ITER_PRINTK &&
4646 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4647 return trace_print_bprintk_msg_only(iter);
4649 if (iter->ent->type == TRACE_PRINT &&
4650 trace_flags & TRACE_ITER_PRINTK &&
4651 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4652 return trace_print_printk_msg_only(iter);
4654 if (trace_flags & TRACE_ITER_BIN)
4655 return print_bin_fmt(iter);
4657 if (trace_flags & TRACE_ITER_HEX)
4658 return print_hex_fmt(iter);
4660 if (trace_flags & TRACE_ITER_RAW)
4661 return print_raw_fmt(iter);
4663 return print_trace_fmt(iter);
4666 void trace_latency_header(struct seq_file *m)
4668 struct trace_iterator *iter = m->private;
4669 struct trace_array *tr = iter->tr;
4671 /* print nothing if the buffers are empty */
4672 if (trace_empty(iter))
4675 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4676 print_trace_header(m, iter);
4678 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4679 print_lat_help_header(m);
4682 void trace_default_header(struct seq_file *m)
4684 struct trace_iterator *iter = m->private;
4685 struct trace_array *tr = iter->tr;
4686 unsigned long trace_flags = tr->trace_flags;
4688 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4691 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4692 /* print nothing if the buffers are empty */
4693 if (trace_empty(iter))
4695 print_trace_header(m, iter);
4696 if (!(trace_flags & TRACE_ITER_VERBOSE))
4697 print_lat_help_header(m);
4699 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4700 if (trace_flags & TRACE_ITER_IRQ_INFO)
4701 print_func_help_header_irq(iter->array_buffer,
4704 print_func_help_header(iter->array_buffer, m,
4710 static void test_ftrace_alive(struct seq_file *m)
4712 if (!ftrace_is_dead())
4714 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4715 "# MAY BE MISSING FUNCTION EVENTS\n");
4718 #ifdef CONFIG_TRACER_MAX_TRACE
4719 static void show_snapshot_main_help(struct seq_file *m)
4721 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4722 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4723 "# Takes a snapshot of the main buffer.\n"
4724 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4725 "# (Doesn't have to be '2' works with any number that\n"
4726 "# is not a '0' or '1')\n");
4729 static void show_snapshot_percpu_help(struct seq_file *m)
4731 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4732 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4733 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4734 "# Takes a snapshot of the main buffer for this cpu.\n");
4736 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4737 "# Must use main snapshot file to allocate.\n");
4739 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4740 "# (Doesn't have to be '2' works with any number that\n"
4741 "# is not a '0' or '1')\n");
4744 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4746 if (iter->tr->allocated_snapshot)
4747 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4749 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4751 seq_puts(m, "# Snapshot commands:\n");
4752 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4753 show_snapshot_main_help(m);
4755 show_snapshot_percpu_help(m);
4758 /* Should never be called */
4759 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4762 static int s_show(struct seq_file *m, void *v)
4764 struct trace_iterator *iter = v;
4767 if (iter->ent == NULL) {
4769 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4771 test_ftrace_alive(m);
4773 if (iter->snapshot && trace_empty(iter))
4774 print_snapshot_help(m, iter);
4775 else if (iter->trace && iter->trace->print_header)
4776 iter->trace->print_header(m);
4778 trace_default_header(m);
4780 } else if (iter->leftover) {
4782 * If we filled the seq_file buffer earlier, we
4783 * want to just show it now.
4785 ret = trace_print_seq(m, &iter->seq);
4787 /* ret should this time be zero, but you never know */
4788 iter->leftover = ret;
4791 print_trace_line(iter);
4792 ret = trace_print_seq(m, &iter->seq);
4794 * If we overflow the seq_file buffer, then it will
4795 * ask us for this data again at start up.
4797 * ret is 0 if seq_file write succeeded.
4800 iter->leftover = ret;
4807 * Should be used after trace_array_get(), trace_types_lock
4808 * ensures that i_cdev was already initialized.
4810 static inline int tracing_get_cpu(struct inode *inode)
4812 if (inode->i_cdev) /* See trace_create_cpu_file() */
4813 return (long)inode->i_cdev - 1;
4814 return RING_BUFFER_ALL_CPUS;
4817 static const struct seq_operations tracer_seq_ops = {
4824 static struct trace_iterator *
4825 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4827 struct trace_array *tr = inode->i_private;
4828 struct trace_iterator *iter;
4831 if (tracing_disabled)
4832 return ERR_PTR(-ENODEV);
4834 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4836 return ERR_PTR(-ENOMEM);
4838 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4840 if (!iter->buffer_iter)
4844 * trace_find_next_entry() may need to save off iter->ent.
4845 * It will place it into the iter->temp buffer. As most
4846 * events are less than 128, allocate a buffer of that size.
4847 * If one is greater, then trace_find_next_entry() will
4848 * allocate a new buffer to adjust for the bigger iter->ent.
4849 * It's not critical if it fails to get allocated here.
4851 iter->temp = kmalloc(128, GFP_KERNEL);
4853 iter->temp_size = 128;
4856 * trace_event_printf() may need to modify given format
4857 * string to replace %p with %px so that it shows real address
4858 * instead of hash value. However, that is only for the event
4859 * tracing, other tracer may not need. Defer the allocation
4860 * until it is needed.
4866 * We make a copy of the current tracer to avoid concurrent
4867 * changes on it while we are reading.
4869 mutex_lock(&trace_types_lock);
4870 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4874 *iter->trace = *tr->current_trace;
4876 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4881 #ifdef CONFIG_TRACER_MAX_TRACE
4882 /* Currently only the top directory has a snapshot */
4883 if (tr->current_trace->print_max || snapshot)
4884 iter->array_buffer = &tr->max_buffer;
4887 iter->array_buffer = &tr->array_buffer;
4888 iter->snapshot = snapshot;
4890 iter->cpu_file = tracing_get_cpu(inode);
4891 mutex_init(&iter->mutex);
4893 /* Notify the tracer early; before we stop tracing. */
4894 if (iter->trace->open)
4895 iter->trace->open(iter);
4897 /* Annotate start of buffers if we had overruns */
4898 if (ring_buffer_overruns(iter->array_buffer->buffer))
4899 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4901 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4902 if (trace_clocks[tr->clock_id].in_ns)
4903 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4906 * If pause-on-trace is enabled, then stop the trace while
4907 * dumping, unless this is the "snapshot" file
4909 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4910 tracing_stop_tr(tr);
4912 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4913 for_each_tracing_cpu(cpu) {
4914 iter->buffer_iter[cpu] =
4915 ring_buffer_read_prepare(iter->array_buffer->buffer,
4918 ring_buffer_read_prepare_sync();
4919 for_each_tracing_cpu(cpu) {
4920 ring_buffer_read_start(iter->buffer_iter[cpu]);
4921 tracing_iter_reset(iter, cpu);
4924 cpu = iter->cpu_file;
4925 iter->buffer_iter[cpu] =
4926 ring_buffer_read_prepare(iter->array_buffer->buffer,
4928 ring_buffer_read_prepare_sync();
4929 ring_buffer_read_start(iter->buffer_iter[cpu]);
4930 tracing_iter_reset(iter, cpu);
4933 mutex_unlock(&trace_types_lock);
4938 mutex_unlock(&trace_types_lock);
4941 kfree(iter->buffer_iter);
4943 seq_release_private(inode, file);
4944 return ERR_PTR(-ENOMEM);
4947 int tracing_open_generic(struct inode *inode, struct file *filp)
4951 ret = tracing_check_open_get_tr(NULL);
4955 filp->private_data = inode->i_private;
4959 bool tracing_is_disabled(void)
4961 return (tracing_disabled) ? true: false;
4965 * Open and update trace_array ref count.
4966 * Must have the current trace_array passed to it.
4968 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4970 struct trace_array *tr = inode->i_private;
4973 ret = tracing_check_open_get_tr(tr);
4977 filp->private_data = inode->i_private;
4982 static int tracing_mark_open(struct inode *inode, struct file *filp)
4984 stream_open(inode, filp);
4985 return tracing_open_generic_tr(inode, filp);
4988 static int tracing_release(struct inode *inode, struct file *file)
4990 struct trace_array *tr = inode->i_private;
4991 struct seq_file *m = file->private_data;
4992 struct trace_iterator *iter;
4995 if (!(file->f_mode & FMODE_READ)) {
4996 trace_array_put(tr);
5000 /* Writes do not use seq_file */
5002 mutex_lock(&trace_types_lock);
5004 for_each_tracing_cpu(cpu) {
5005 if (iter->buffer_iter[cpu])
5006 ring_buffer_read_finish(iter->buffer_iter[cpu]);
5009 if (iter->trace && iter->trace->close)
5010 iter->trace->close(iter);
5012 if (!iter->snapshot && tr->stop_count)
5013 /* reenable tracing if it was previously enabled */
5014 tracing_start_tr(tr);
5016 __trace_array_put(tr);
5018 mutex_unlock(&trace_types_lock);
5020 mutex_destroy(&iter->mutex);
5021 free_cpumask_var(iter->started);
5025 kfree(iter->buffer_iter);
5026 seq_release_private(inode, file);
5031 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
5033 struct trace_array *tr = inode->i_private;
5035 trace_array_put(tr);
5039 static int tracing_single_release_tr(struct inode *inode, struct file *file)
5041 struct trace_array *tr = inode->i_private;
5043 trace_array_put(tr);
5045 return single_release(inode, file);
5048 static int tracing_open(struct inode *inode, struct file *file)
5050 struct trace_array *tr = inode->i_private;
5051 struct trace_iterator *iter;
5054 ret = tracing_check_open_get_tr(tr);
5058 /* If this file was open for write, then erase contents */
5059 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
5060 int cpu = tracing_get_cpu(inode);
5061 struct array_buffer *trace_buf = &tr->array_buffer;
5063 #ifdef CONFIG_TRACER_MAX_TRACE
5064 if (tr->current_trace->print_max)
5065 trace_buf = &tr->max_buffer;
5068 if (cpu == RING_BUFFER_ALL_CPUS)
5069 tracing_reset_online_cpus(trace_buf);
5071 tracing_reset_cpu(trace_buf, cpu);
5074 if (file->f_mode & FMODE_READ) {
5075 iter = __tracing_open(inode, file, false);
5077 ret = PTR_ERR(iter);
5078 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5079 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5083 trace_array_put(tr);
5089 * Some tracers are not suitable for instance buffers.
5090 * A tracer is always available for the global array (toplevel)
5091 * or if it explicitly states that it is.
5094 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
5096 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
5099 /* Find the next tracer that this trace array may use */
5100 static struct tracer *
5101 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
5103 while (t && !trace_ok_for_array(t, tr))
5110 t_next(struct seq_file *m, void *v, loff_t *pos)
5112 struct trace_array *tr = m->private;
5113 struct tracer *t = v;
5118 t = get_tracer_for_array(tr, t->next);
5123 static void *t_start(struct seq_file *m, loff_t *pos)
5125 struct trace_array *tr = m->private;
5129 mutex_lock(&trace_types_lock);
5131 t = get_tracer_for_array(tr, trace_types);
5132 for (; t && l < *pos; t = t_next(m, t, &l))
5138 static void t_stop(struct seq_file *m, void *p)
5140 mutex_unlock(&trace_types_lock);
5143 static int t_show(struct seq_file *m, void *v)
5145 struct tracer *t = v;
5150 seq_puts(m, t->name);
5159 static const struct seq_operations show_traces_seq_ops = {
5166 static int show_traces_open(struct inode *inode, struct file *file)
5168 struct trace_array *tr = inode->i_private;
5172 ret = tracing_check_open_get_tr(tr);
5176 ret = seq_open(file, &show_traces_seq_ops);
5178 trace_array_put(tr);
5182 m = file->private_data;
5188 static int show_traces_release(struct inode *inode, struct file *file)
5190 struct trace_array *tr = inode->i_private;
5192 trace_array_put(tr);
5193 return seq_release(inode, file);
5197 tracing_write_stub(struct file *filp, const char __user *ubuf,
5198 size_t count, loff_t *ppos)
5203 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5207 if (file->f_mode & FMODE_READ)
5208 ret = seq_lseek(file, offset, whence);
5210 file->f_pos = ret = 0;
5215 static const struct file_operations tracing_fops = {
5216 .open = tracing_open,
5218 .read_iter = seq_read_iter,
5219 .splice_read = copy_splice_read,
5220 .write = tracing_write_stub,
5221 .llseek = tracing_lseek,
5222 .release = tracing_release,
5225 static const struct file_operations show_traces_fops = {
5226 .open = show_traces_open,
5228 .llseek = seq_lseek,
5229 .release = show_traces_release,
5233 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5234 size_t count, loff_t *ppos)
5236 struct trace_array *tr = file_inode(filp)->i_private;
5240 len = snprintf(NULL, 0, "%*pb\n",
5241 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5242 mask_str = kmalloc(len, GFP_KERNEL);
5246 len = snprintf(mask_str, len, "%*pb\n",
5247 cpumask_pr_args(tr->tracing_cpumask));
5252 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5260 int tracing_set_cpumask(struct trace_array *tr,
5261 cpumask_var_t tracing_cpumask_new)
5268 local_irq_disable();
5269 arch_spin_lock(&tr->max_lock);
5270 for_each_tracing_cpu(cpu) {
5272 * Increase/decrease the disabled counter if we are
5273 * about to flip a bit in the cpumask:
5275 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5276 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5277 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5278 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5280 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5281 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5282 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5283 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5286 arch_spin_unlock(&tr->max_lock);
5289 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5295 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5296 size_t count, loff_t *ppos)
5298 struct trace_array *tr = file_inode(filp)->i_private;
5299 cpumask_var_t tracing_cpumask_new;
5302 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5305 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5309 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5313 free_cpumask_var(tracing_cpumask_new);
5318 free_cpumask_var(tracing_cpumask_new);
5323 static const struct file_operations tracing_cpumask_fops = {
5324 .open = tracing_open_generic_tr,
5325 .read = tracing_cpumask_read,
5326 .write = tracing_cpumask_write,
5327 .release = tracing_release_generic_tr,
5328 .llseek = generic_file_llseek,
5331 static int tracing_trace_options_show(struct seq_file *m, void *v)
5333 struct tracer_opt *trace_opts;
5334 struct trace_array *tr = m->private;
5338 mutex_lock(&trace_types_lock);
5339 tracer_flags = tr->current_trace->flags->val;
5340 trace_opts = tr->current_trace->flags->opts;
5342 for (i = 0; trace_options[i]; i++) {
5343 if (tr->trace_flags & (1 << i))
5344 seq_printf(m, "%s\n", trace_options[i]);
5346 seq_printf(m, "no%s\n", trace_options[i]);
5349 for (i = 0; trace_opts[i].name; i++) {
5350 if (tracer_flags & trace_opts[i].bit)
5351 seq_printf(m, "%s\n", trace_opts[i].name);
5353 seq_printf(m, "no%s\n", trace_opts[i].name);
5355 mutex_unlock(&trace_types_lock);
5360 static int __set_tracer_option(struct trace_array *tr,
5361 struct tracer_flags *tracer_flags,
5362 struct tracer_opt *opts, int neg)
5364 struct tracer *trace = tracer_flags->trace;
5367 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5372 tracer_flags->val &= ~opts->bit;
5374 tracer_flags->val |= opts->bit;
5378 /* Try to assign a tracer specific option */
5379 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5381 struct tracer *trace = tr->current_trace;
5382 struct tracer_flags *tracer_flags = trace->flags;
5383 struct tracer_opt *opts = NULL;
5386 for (i = 0; tracer_flags->opts[i].name; i++) {
5387 opts = &tracer_flags->opts[i];
5389 if (strcmp(cmp, opts->name) == 0)
5390 return __set_tracer_option(tr, trace->flags, opts, neg);
5396 /* Some tracers require overwrite to stay enabled */
5397 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5399 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5405 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5409 if ((mask == TRACE_ITER_RECORD_TGID) ||
5410 (mask == TRACE_ITER_RECORD_CMD))
5411 lockdep_assert_held(&event_mutex);
5413 /* do nothing if flag is already set */
5414 if (!!(tr->trace_flags & mask) == !!enabled)
5417 /* Give the tracer a chance to approve the change */
5418 if (tr->current_trace->flag_changed)
5419 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5423 tr->trace_flags |= mask;
5425 tr->trace_flags &= ~mask;
5427 if (mask == TRACE_ITER_RECORD_CMD)
5428 trace_event_enable_cmd_record(enabled);
5430 if (mask == TRACE_ITER_RECORD_TGID) {
5432 tgid_map_max = pid_max;
5433 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5437 * Pairs with smp_load_acquire() in
5438 * trace_find_tgid_ptr() to ensure that if it observes
5439 * the tgid_map we just allocated then it also observes
5440 * the corresponding tgid_map_max value.
5442 smp_store_release(&tgid_map, map);
5445 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5449 trace_event_enable_tgid_record(enabled);
5452 if (mask == TRACE_ITER_EVENT_FORK)
5453 trace_event_follow_fork(tr, enabled);
5455 if (mask == TRACE_ITER_FUNC_FORK)
5456 ftrace_pid_follow_fork(tr, enabled);
5458 if (mask == TRACE_ITER_OVERWRITE) {
5459 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5460 #ifdef CONFIG_TRACER_MAX_TRACE
5461 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5465 if (mask == TRACE_ITER_PRINTK) {
5466 trace_printk_start_stop_comm(enabled);
5467 trace_printk_control(enabled);
5473 int trace_set_options(struct trace_array *tr, char *option)
5478 size_t orig_len = strlen(option);
5481 cmp = strstrip(option);
5483 len = str_has_prefix(cmp, "no");
5489 mutex_lock(&event_mutex);
5490 mutex_lock(&trace_types_lock);
5492 ret = match_string(trace_options, -1, cmp);
5493 /* If no option could be set, test the specific tracer options */
5495 ret = set_tracer_option(tr, cmp, neg);
5497 ret = set_tracer_flag(tr, 1 << ret, !neg);
5499 mutex_unlock(&trace_types_lock);
5500 mutex_unlock(&event_mutex);
5503 * If the first trailing whitespace is replaced with '\0' by strstrip,
5504 * turn it back into a space.
5506 if (orig_len > strlen(option))
5507 option[strlen(option)] = ' ';
5512 static void __init apply_trace_boot_options(void)
5514 char *buf = trace_boot_options_buf;
5518 option = strsep(&buf, ",");
5524 trace_set_options(&global_trace, option);
5526 /* Put back the comma to allow this to be called again */
5533 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5534 size_t cnt, loff_t *ppos)
5536 struct seq_file *m = filp->private_data;
5537 struct trace_array *tr = m->private;
5541 if (cnt >= sizeof(buf))
5544 if (copy_from_user(buf, ubuf, cnt))
5549 ret = trace_set_options(tr, buf);
5558 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5560 struct trace_array *tr = inode->i_private;
5563 ret = tracing_check_open_get_tr(tr);
5567 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5569 trace_array_put(tr);
5574 static const struct file_operations tracing_iter_fops = {
5575 .open = tracing_trace_options_open,
5577 .llseek = seq_lseek,
5578 .release = tracing_single_release_tr,
5579 .write = tracing_trace_options_write,
5582 static const char readme_msg[] =
5583 "tracing mini-HOWTO:\n\n"
5584 "# echo 0 > tracing_on : quick way to disable tracing\n"
5585 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5586 " Important files:\n"
5587 " trace\t\t\t- The static contents of the buffer\n"
5588 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5589 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5590 " current_tracer\t- function and latency tracers\n"
5591 " available_tracers\t- list of configured tracers for current_tracer\n"
5592 " error_log\t- error log for failed commands (that support it)\n"
5593 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5594 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5595 " trace_clock\t\t- change the clock used to order events\n"
5596 " local: Per cpu clock but may not be synced across CPUs\n"
5597 " global: Synced across CPUs but slows tracing down.\n"
5598 " counter: Not a clock, but just an increment\n"
5599 " uptime: Jiffy counter from time of boot\n"
5600 " perf: Same clock that perf events use\n"
5601 #ifdef CONFIG_X86_64
5602 " x86-tsc: TSC cycle counter\n"
5604 "\n timestamp_mode\t- view the mode used to timestamp events\n"
5605 " delta: Delta difference against a buffer-wide timestamp\n"
5606 " absolute: Absolute (standalone) timestamp\n"
5607 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5608 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5609 " tracing_cpumask\t- Limit which CPUs to trace\n"
5610 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5611 "\t\t\t Remove sub-buffer with rmdir\n"
5612 " trace_options\t\t- Set format or modify how tracing happens\n"
5613 "\t\t\t Disable an option by prefixing 'no' to the\n"
5614 "\t\t\t option name\n"
5615 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5616 #ifdef CONFIG_DYNAMIC_FTRACE
5617 "\n available_filter_functions - list of functions that can be filtered on\n"
5618 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5619 "\t\t\t functions\n"
5620 "\t accepts: func_full_name or glob-matching-pattern\n"
5621 "\t modules: Can select a group via module\n"
5622 "\t Format: :mod:<module-name>\n"
5623 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5624 "\t triggers: a command to perform when function is hit\n"
5625 "\t Format: <function>:<trigger>[:count]\n"
5626 "\t trigger: traceon, traceoff\n"
5627 "\t\t enable_event:<system>:<event>\n"
5628 "\t\t disable_event:<system>:<event>\n"
5629 #ifdef CONFIG_STACKTRACE
5632 #ifdef CONFIG_TRACER_SNAPSHOT
5637 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5638 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5639 "\t The first one will disable tracing every time do_fault is hit\n"
5640 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5641 "\t The first time do trap is hit and it disables tracing, the\n"
5642 "\t counter will decrement to 2. If tracing is already disabled,\n"
5643 "\t the counter will not decrement. It only decrements when the\n"
5644 "\t trigger did work\n"
5645 "\t To remove trigger without count:\n"
5646 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5647 "\t To remove trigger with a count:\n"
5648 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5649 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5650 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5651 "\t modules: Can select a group via module command :mod:\n"
5652 "\t Does not accept triggers\n"
5653 #endif /* CONFIG_DYNAMIC_FTRACE */
5654 #ifdef CONFIG_FUNCTION_TRACER
5655 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5657 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5660 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5661 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5662 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5663 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5665 #ifdef CONFIG_TRACER_SNAPSHOT
5666 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5667 "\t\t\t snapshot buffer. Read the contents for more\n"
5668 "\t\t\t information\n"
5670 #ifdef CONFIG_STACK_TRACER
5671 " stack_trace\t\t- Shows the max stack trace when active\n"
5672 " stack_max_size\t- Shows current max stack size that was traced\n"
5673 "\t\t\t Write into this file to reset the max size (trigger a\n"
5674 "\t\t\t new trace)\n"
5675 #ifdef CONFIG_DYNAMIC_FTRACE
5676 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5679 #endif /* CONFIG_STACK_TRACER */
5680 #ifdef CONFIG_DYNAMIC_EVENTS
5681 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5682 "\t\t\t Write into this file to define/undefine new trace events.\n"
5684 #ifdef CONFIG_KPROBE_EVENTS
5685 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5686 "\t\t\t Write into this file to define/undefine new trace events.\n"
5688 #ifdef CONFIG_UPROBE_EVENTS
5689 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5690 "\t\t\t Write into this file to define/undefine new trace events.\n"
5692 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \
5693 defined(CONFIG_FPROBE_EVENTS)
5694 "\t accepts: event-definitions (one definition per line)\n"
5695 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5696 "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n"
5697 "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
5699 #ifdef CONFIG_FPROBE_EVENTS
5700 "\t f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
5701 "\t t[:[<group>/][<event>]] <tracepoint> [<args>]\n"
5703 #ifdef CONFIG_HIST_TRIGGERS
5704 "\t s:[synthetic/]<event> <field> [<field>]\n"
5706 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
5707 "\t -:[<group>/][<event>]\n"
5708 #ifdef CONFIG_KPROBE_EVENTS
5709 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5710 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5712 #ifdef CONFIG_UPROBE_EVENTS
5713 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5715 "\t args: <name>=fetcharg[:type]\n"
5716 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5717 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5718 #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS
5719 "\t $stack<index>, $stack, $retval, $comm, $arg<N>, <argname>\n"
5721 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5724 "\t $stack<index>, $stack, $retval, $comm,\n"
5726 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5727 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n"
5728 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5729 "\t symstr, <type>\\[<array-size>\\]\n"
5730 #ifdef CONFIG_HIST_TRIGGERS
5731 "\t field: <stype> <name>;\n"
5732 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5733 "\t [unsigned] char/int/long\n"
5735 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5736 "\t of the <attached-group>/<attached-event>.\n"
5738 " events/\t\t- Directory containing all trace event subsystems:\n"
5739 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5740 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5741 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5743 " filter\t\t- If set, only events passing filter are traced\n"
5744 " events/<system>/<event>/\t- Directory containing control files for\n"
5746 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5747 " filter\t\t- If set, only events passing filter are traced\n"
5748 " trigger\t\t- If set, a command to perform when event is hit\n"
5749 "\t Format: <trigger>[:count][if <filter>]\n"
5750 "\t trigger: traceon, traceoff\n"
5751 "\t enable_event:<system>:<event>\n"
5752 "\t disable_event:<system>:<event>\n"
5753 #ifdef CONFIG_HIST_TRIGGERS
5754 "\t enable_hist:<system>:<event>\n"
5755 "\t disable_hist:<system>:<event>\n"
5757 #ifdef CONFIG_STACKTRACE
5760 #ifdef CONFIG_TRACER_SNAPSHOT
5763 #ifdef CONFIG_HIST_TRIGGERS
5764 "\t\t hist (see below)\n"
5766 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5767 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5768 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5769 "\t events/block/block_unplug/trigger\n"
5770 "\t The first disables tracing every time block_unplug is hit.\n"
5771 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5772 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5773 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5774 "\t Like function triggers, the counter is only decremented if it\n"
5775 "\t enabled or disabled tracing.\n"
5776 "\t To remove a trigger without a count:\n"
5777 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5778 "\t To remove a trigger with a count:\n"
5779 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5780 "\t Filters can be ignored when removing a trigger.\n"
5781 #ifdef CONFIG_HIST_TRIGGERS
5782 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5783 "\t Format: hist:keys=<field1[,field2,...]>\n"
5784 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5785 "\t [:values=<field1[,field2,...]>]\n"
5786 "\t [:sort=<field1[,field2,...]>]\n"
5787 "\t [:size=#entries]\n"
5788 "\t [:pause][:continue][:clear]\n"
5789 "\t [:name=histname1]\n"
5790 "\t [:nohitcount]\n"
5791 "\t [:<handler>.<action>]\n"
5792 "\t [if <filter>]\n\n"
5793 "\t Note, special fields can be used as well:\n"
5794 "\t common_timestamp - to record current timestamp\n"
5795 "\t common_cpu - to record the CPU the event happened on\n"
5797 "\t A hist trigger variable can be:\n"
5798 "\t - a reference to a field e.g. x=current_timestamp,\n"
5799 "\t - a reference to another variable e.g. y=$x,\n"
5800 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5801 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5803 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5804 "\t multiplication(*) and division(/) operators. An operand can be either a\n"
5805 "\t variable reference, field or numeric literal.\n"
5807 "\t When a matching event is hit, an entry is added to a hash\n"
5808 "\t table using the key(s) and value(s) named, and the value of a\n"
5809 "\t sum called 'hitcount' is incremented. Keys and values\n"
5810 "\t correspond to fields in the event's format description. Keys\n"
5811 "\t can be any field, or the special string 'common_stacktrace'.\n"
5812 "\t Compound keys consisting of up to two fields can be specified\n"
5813 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5814 "\t fields. Sort keys consisting of up to two fields can be\n"
5815 "\t specified using the 'sort' keyword. The sort direction can\n"
5816 "\t be modified by appending '.descending' or '.ascending' to a\n"
5817 "\t sort field. The 'size' parameter can be used to specify more\n"
5818 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5819 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5820 "\t its histogram data will be shared with other triggers of the\n"
5821 "\t same name, and trigger hits will update this common data.\n\n"
5822 "\t Reading the 'hist' file for the event will dump the hash\n"
5823 "\t table in its entirety to stdout. If there are multiple hist\n"
5824 "\t triggers attached to an event, there will be a table for each\n"
5825 "\t trigger in the output. The table displayed for a named\n"
5826 "\t trigger will be the same as any other instance having the\n"
5827 "\t same name. The default format used to display a given field\n"
5828 "\t can be modified by appending any of the following modifiers\n"
5829 "\t to the field name, as applicable:\n\n"
5830 "\t .hex display a number as a hex value\n"
5831 "\t .sym display an address as a symbol\n"
5832 "\t .sym-offset display an address as a symbol and offset\n"
5833 "\t .execname display a common_pid as a program name\n"
5834 "\t .syscall display a syscall id as a syscall name\n"
5835 "\t .log2 display log2 value rather than raw number\n"
5836 "\t .buckets=size display values in groups of size rather than raw number\n"
5837 "\t .usecs display a common_timestamp in microseconds\n"
5838 "\t .percent display a number of percentage value\n"
5839 "\t .graph display a bar-graph of a value\n\n"
5840 "\t The 'pause' parameter can be used to pause an existing hist\n"
5841 "\t trigger or to start a hist trigger but not log any events\n"
5842 "\t until told to do so. 'continue' can be used to start or\n"
5843 "\t restart a paused hist trigger.\n\n"
5844 "\t The 'clear' parameter will clear the contents of a running\n"
5845 "\t hist trigger and leave its current paused/active state\n"
5847 "\t The 'nohitcount' (or NOHC) parameter will suppress display of\n"
5848 "\t raw hitcount in the histogram.\n\n"
5849 "\t The enable_hist and disable_hist triggers can be used to\n"
5850 "\t have one event conditionally start and stop another event's\n"
5851 "\t already-attached hist trigger. The syntax is analogous to\n"
5852 "\t the enable_event and disable_event triggers.\n\n"
5853 "\t Hist trigger handlers and actions are executed whenever a\n"
5854 "\t a histogram entry is added or updated. They take the form:\n\n"
5855 "\t <handler>.<action>\n\n"
5856 "\t The available handlers are:\n\n"
5857 "\t onmatch(matching.event) - invoke on addition or update\n"
5858 "\t onmax(var) - invoke if var exceeds current max\n"
5859 "\t onchange(var) - invoke action if var changes\n\n"
5860 "\t The available actions are:\n\n"
5861 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5862 "\t save(field,...) - save current event fields\n"
5863 #ifdef CONFIG_TRACER_SNAPSHOT
5864 "\t snapshot() - snapshot the trace buffer\n\n"
5866 #ifdef CONFIG_SYNTH_EVENTS
5867 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5868 "\t Write into this file to define/undefine new synthetic events.\n"
5869 "\t example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n"
5875 tracing_readme_read(struct file *filp, char __user *ubuf,
5876 size_t cnt, loff_t *ppos)
5878 return simple_read_from_buffer(ubuf, cnt, ppos,
5879 readme_msg, strlen(readme_msg));
5882 static const struct file_operations tracing_readme_fops = {
5883 .open = tracing_open_generic,
5884 .read = tracing_readme_read,
5885 .llseek = generic_file_llseek,
5888 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5892 return trace_find_tgid_ptr(pid);
5895 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5899 return trace_find_tgid_ptr(pid);
5902 static void saved_tgids_stop(struct seq_file *m, void *v)
5906 static int saved_tgids_show(struct seq_file *m, void *v)
5908 int *entry = (int *)v;
5909 int pid = entry - tgid_map;
5915 seq_printf(m, "%d %d\n", pid, tgid);
5919 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5920 .start = saved_tgids_start,
5921 .stop = saved_tgids_stop,
5922 .next = saved_tgids_next,
5923 .show = saved_tgids_show,
5926 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5930 ret = tracing_check_open_get_tr(NULL);
5934 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5938 static const struct file_operations tracing_saved_tgids_fops = {
5939 .open = tracing_saved_tgids_open,
5941 .llseek = seq_lseek,
5942 .release = seq_release,
5945 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5947 unsigned int *ptr = v;
5949 if (*pos || m->count)
5954 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5956 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5965 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5971 arch_spin_lock(&trace_cmdline_lock);
5973 v = &savedcmd->map_cmdline_to_pid[0];
5975 v = saved_cmdlines_next(m, v, &l);
5983 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5985 arch_spin_unlock(&trace_cmdline_lock);
5989 static int saved_cmdlines_show(struct seq_file *m, void *v)
5991 char buf[TASK_COMM_LEN];
5992 unsigned int *pid = v;
5994 __trace_find_cmdline(*pid, buf);
5995 seq_printf(m, "%d %s\n", *pid, buf);
5999 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
6000 .start = saved_cmdlines_start,
6001 .next = saved_cmdlines_next,
6002 .stop = saved_cmdlines_stop,
6003 .show = saved_cmdlines_show,
6006 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
6010 ret = tracing_check_open_get_tr(NULL);
6014 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
6017 static const struct file_operations tracing_saved_cmdlines_fops = {
6018 .open = tracing_saved_cmdlines_open,
6020 .llseek = seq_lseek,
6021 .release = seq_release,
6025 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
6026 size_t cnt, loff_t *ppos)
6032 arch_spin_lock(&trace_cmdline_lock);
6033 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
6034 arch_spin_unlock(&trace_cmdline_lock);
6037 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6040 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
6042 kfree(s->saved_cmdlines);
6043 kfree(s->map_cmdline_to_pid);
6047 static int tracing_resize_saved_cmdlines(unsigned int val)
6049 struct saved_cmdlines_buffer *s, *savedcmd_temp;
6051 s = kmalloc(sizeof(*s), GFP_KERNEL);
6055 if (allocate_cmdlines_buffer(val, s) < 0) {
6061 arch_spin_lock(&trace_cmdline_lock);
6062 savedcmd_temp = savedcmd;
6064 arch_spin_unlock(&trace_cmdline_lock);
6066 free_saved_cmdlines_buffer(savedcmd_temp);
6072 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
6073 size_t cnt, loff_t *ppos)
6078 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6082 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
6083 if (!val || val > PID_MAX_DEFAULT)
6086 ret = tracing_resize_saved_cmdlines((unsigned int)val);
6095 static const struct file_operations tracing_saved_cmdlines_size_fops = {
6096 .open = tracing_open_generic,
6097 .read = tracing_saved_cmdlines_size_read,
6098 .write = tracing_saved_cmdlines_size_write,
6101 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
6102 static union trace_eval_map_item *
6103 update_eval_map(union trace_eval_map_item *ptr)
6105 if (!ptr->map.eval_string) {
6106 if (ptr->tail.next) {
6107 ptr = ptr->tail.next;
6108 /* Set ptr to the next real item (skip head) */
6116 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
6118 union trace_eval_map_item *ptr = v;
6121 * Paranoid! If ptr points to end, we don't want to increment past it.
6122 * This really should never happen.
6125 ptr = update_eval_map(ptr);
6126 if (WARN_ON_ONCE(!ptr))
6130 ptr = update_eval_map(ptr);
6135 static void *eval_map_start(struct seq_file *m, loff_t *pos)
6137 union trace_eval_map_item *v;
6140 mutex_lock(&trace_eval_mutex);
6142 v = trace_eval_maps;
6146 while (v && l < *pos) {
6147 v = eval_map_next(m, v, &l);
6153 static void eval_map_stop(struct seq_file *m, void *v)
6155 mutex_unlock(&trace_eval_mutex);
6158 static int eval_map_show(struct seq_file *m, void *v)
6160 union trace_eval_map_item *ptr = v;
6162 seq_printf(m, "%s %ld (%s)\n",
6163 ptr->map.eval_string, ptr->map.eval_value,
6169 static const struct seq_operations tracing_eval_map_seq_ops = {
6170 .start = eval_map_start,
6171 .next = eval_map_next,
6172 .stop = eval_map_stop,
6173 .show = eval_map_show,
6176 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
6180 ret = tracing_check_open_get_tr(NULL);
6184 return seq_open(filp, &tracing_eval_map_seq_ops);
6187 static const struct file_operations tracing_eval_map_fops = {
6188 .open = tracing_eval_map_open,
6190 .llseek = seq_lseek,
6191 .release = seq_release,
6194 static inline union trace_eval_map_item *
6195 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
6197 /* Return tail of array given the head */
6198 return ptr + ptr->head.length + 1;
6202 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
6205 struct trace_eval_map **stop;
6206 struct trace_eval_map **map;
6207 union trace_eval_map_item *map_array;
6208 union trace_eval_map_item *ptr;
6213 * The trace_eval_maps contains the map plus a head and tail item,
6214 * where the head holds the module and length of array, and the
6215 * tail holds a pointer to the next list.
6217 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6219 pr_warn("Unable to allocate trace eval mapping\n");
6223 mutex_lock(&trace_eval_mutex);
6225 if (!trace_eval_maps)
6226 trace_eval_maps = map_array;
6228 ptr = trace_eval_maps;
6230 ptr = trace_eval_jmp_to_tail(ptr);
6231 if (!ptr->tail.next)
6233 ptr = ptr->tail.next;
6236 ptr->tail.next = map_array;
6238 map_array->head.mod = mod;
6239 map_array->head.length = len;
6242 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6243 map_array->map = **map;
6246 memset(map_array, 0, sizeof(*map_array));
6248 mutex_unlock(&trace_eval_mutex);
6251 static void trace_create_eval_file(struct dentry *d_tracer)
6253 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
6254 NULL, &tracing_eval_map_fops);
6257 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
6258 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6259 static inline void trace_insert_eval_map_file(struct module *mod,
6260 struct trace_eval_map **start, int len) { }
6261 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6263 static void trace_insert_eval_map(struct module *mod,
6264 struct trace_eval_map **start, int len)
6266 struct trace_eval_map **map;
6273 trace_event_eval_update(map, len);
6275 trace_insert_eval_map_file(mod, start, len);
6279 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6280 size_t cnt, loff_t *ppos)
6282 struct trace_array *tr = filp->private_data;
6283 char buf[MAX_TRACER_SIZE+2];
6286 mutex_lock(&trace_types_lock);
6287 r = sprintf(buf, "%s\n", tr->current_trace->name);
6288 mutex_unlock(&trace_types_lock);
6290 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6293 int tracer_init(struct tracer *t, struct trace_array *tr)
6295 tracing_reset_online_cpus(&tr->array_buffer);
6299 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6303 for_each_tracing_cpu(cpu)
6304 per_cpu_ptr(buf->data, cpu)->entries = val;
6307 #ifdef CONFIG_TRACER_MAX_TRACE
6308 /* resize @tr's buffer to the size of @size_tr's entries */
6309 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6310 struct array_buffer *size_buf, int cpu_id)
6314 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6315 for_each_tracing_cpu(cpu) {
6316 ret = ring_buffer_resize(trace_buf->buffer,
6317 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6320 per_cpu_ptr(trace_buf->data, cpu)->entries =
6321 per_cpu_ptr(size_buf->data, cpu)->entries;
6324 ret = ring_buffer_resize(trace_buf->buffer,
6325 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6327 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6328 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6333 #endif /* CONFIG_TRACER_MAX_TRACE */
6335 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6336 unsigned long size, int cpu)
6341 * If kernel or user changes the size of the ring buffer
6342 * we use the size that was given, and we can forget about
6343 * expanding it later.
6345 ring_buffer_expanded = true;
6347 /* May be called before buffers are initialized */
6348 if (!tr->array_buffer.buffer)
6351 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6355 #ifdef CONFIG_TRACER_MAX_TRACE
6356 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
6357 !tr->current_trace->use_max_tr)
6360 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6362 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6363 &tr->array_buffer, cpu);
6366 * AARGH! We are left with different
6367 * size max buffer!!!!
6368 * The max buffer is our "snapshot" buffer.
6369 * When a tracer needs a snapshot (one of the
6370 * latency tracers), it swaps the max buffer
6371 * with the saved snap shot. We succeeded to
6372 * update the size of the main buffer, but failed to
6373 * update the size of the max buffer. But when we tried
6374 * to reset the main buffer to the original size, we
6375 * failed there too. This is very unlikely to
6376 * happen, but if it does, warn and kill all
6380 tracing_disabled = 1;
6385 if (cpu == RING_BUFFER_ALL_CPUS)
6386 set_buffer_entries(&tr->max_buffer, size);
6388 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
6391 #endif /* CONFIG_TRACER_MAX_TRACE */
6393 if (cpu == RING_BUFFER_ALL_CPUS)
6394 set_buffer_entries(&tr->array_buffer, size);
6396 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
6401 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6402 unsigned long size, int cpu_id)
6406 mutex_lock(&trace_types_lock);
6408 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6409 /* make sure, this cpu is enabled in the mask */
6410 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6416 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6421 mutex_unlock(&trace_types_lock);
6428 * tracing_update_buffers - used by tracing facility to expand ring buffers
6430 * To save on memory when the tracing is never used on a system with it
6431 * configured in. The ring buffers are set to a minimum size. But once
6432 * a user starts to use the tracing facility, then they need to grow
6433 * to their default size.
6435 * This function is to be called when a tracer is about to be used.
6437 int tracing_update_buffers(void)
6441 mutex_lock(&trace_types_lock);
6442 if (!ring_buffer_expanded)
6443 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6444 RING_BUFFER_ALL_CPUS);
6445 mutex_unlock(&trace_types_lock);
6450 struct trace_option_dentry;
6453 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6456 * Used to clear out the tracer before deletion of an instance.
6457 * Must have trace_types_lock held.
6459 static void tracing_set_nop(struct trace_array *tr)
6461 if (tr->current_trace == &nop_trace)
6464 tr->current_trace->enabled--;
6466 if (tr->current_trace->reset)
6467 tr->current_trace->reset(tr);
6469 tr->current_trace = &nop_trace;
6472 static bool tracer_options_updated;
6474 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6476 /* Only enable if the directory has been created already. */
6480 /* Only create trace option files after update_tracer_options finish */
6481 if (!tracer_options_updated)
6484 create_trace_option_files(tr, t);
6487 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6490 #ifdef CONFIG_TRACER_MAX_TRACE
6495 mutex_lock(&trace_types_lock);
6497 if (!ring_buffer_expanded) {
6498 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6499 RING_BUFFER_ALL_CPUS);
6505 for (t = trace_types; t; t = t->next) {
6506 if (strcmp(t->name, buf) == 0)
6513 if (t == tr->current_trace)
6516 #ifdef CONFIG_TRACER_SNAPSHOT
6517 if (t->use_max_tr) {
6518 local_irq_disable();
6519 arch_spin_lock(&tr->max_lock);
6520 if (tr->cond_snapshot)
6522 arch_spin_unlock(&tr->max_lock);
6528 /* Some tracers won't work on kernel command line */
6529 if (system_state < SYSTEM_RUNNING && t->noboot) {
6530 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6535 /* Some tracers are only allowed for the top level buffer */
6536 if (!trace_ok_for_array(t, tr)) {
6541 /* If trace pipe files are being read, we can't change the tracer */
6542 if (tr->trace_ref) {
6547 trace_branch_disable();
6549 tr->current_trace->enabled--;
6551 if (tr->current_trace->reset)
6552 tr->current_trace->reset(tr);
6554 #ifdef CONFIG_TRACER_MAX_TRACE
6555 had_max_tr = tr->current_trace->use_max_tr;
6557 /* Current trace needs to be nop_trace before synchronize_rcu */
6558 tr->current_trace = &nop_trace;
6560 if (had_max_tr && !t->use_max_tr) {
6562 * We need to make sure that the update_max_tr sees that
6563 * current_trace changed to nop_trace to keep it from
6564 * swapping the buffers after we resize it.
6565 * The update_max_tr is called from interrupts disabled
6566 * so a synchronized_sched() is sufficient.
6572 if (t->use_max_tr && !tr->allocated_snapshot) {
6573 ret = tracing_alloc_snapshot_instance(tr);
6578 tr->current_trace = &nop_trace;
6582 ret = tracer_init(t, tr);
6587 tr->current_trace = t;
6588 tr->current_trace->enabled++;
6589 trace_branch_enable(tr);
6591 mutex_unlock(&trace_types_lock);
6597 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6598 size_t cnt, loff_t *ppos)
6600 struct trace_array *tr = filp->private_data;
6601 char buf[MAX_TRACER_SIZE+1];
6608 if (cnt > MAX_TRACER_SIZE)
6609 cnt = MAX_TRACER_SIZE;
6611 if (copy_from_user(buf, ubuf, cnt))
6618 err = tracing_set_tracer(tr, name);
6628 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6629 size_t cnt, loff_t *ppos)
6634 r = snprintf(buf, sizeof(buf), "%ld\n",
6635 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6636 if (r > sizeof(buf))
6638 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6642 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6643 size_t cnt, loff_t *ppos)
6648 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6658 tracing_thresh_read(struct file *filp, char __user *ubuf,
6659 size_t cnt, loff_t *ppos)
6661 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6665 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6666 size_t cnt, loff_t *ppos)
6668 struct trace_array *tr = filp->private_data;
6671 mutex_lock(&trace_types_lock);
6672 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6676 if (tr->current_trace->update_thresh) {
6677 ret = tr->current_trace->update_thresh(tr);
6684 mutex_unlock(&trace_types_lock);
6689 #ifdef CONFIG_TRACER_MAX_TRACE
6692 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6693 size_t cnt, loff_t *ppos)
6695 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6699 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6700 size_t cnt, loff_t *ppos)
6702 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6707 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6709 struct trace_array *tr = inode->i_private;
6710 struct trace_iterator *iter;
6713 ret = tracing_check_open_get_tr(tr);
6717 mutex_lock(&trace_types_lock);
6719 /* create a buffer to store the information to pass to userspace */
6720 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6723 __trace_array_put(tr);
6727 trace_seq_init(&iter->seq);
6728 iter->trace = tr->current_trace;
6730 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6735 /* trace pipe does not show start of buffer */
6736 cpumask_setall(iter->started);
6738 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6739 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6741 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6742 if (trace_clocks[tr->clock_id].in_ns)
6743 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6746 iter->array_buffer = &tr->array_buffer;
6747 iter->cpu_file = tracing_get_cpu(inode);
6748 mutex_init(&iter->mutex);
6749 filp->private_data = iter;
6751 if (iter->trace->pipe_open)
6752 iter->trace->pipe_open(iter);
6754 nonseekable_open(inode, filp);
6758 mutex_unlock(&trace_types_lock);
6763 __trace_array_put(tr);
6764 mutex_unlock(&trace_types_lock);
6768 static int tracing_release_pipe(struct inode *inode, struct file *file)
6770 struct trace_iterator *iter = file->private_data;
6771 struct trace_array *tr = inode->i_private;
6773 mutex_lock(&trace_types_lock);
6777 if (iter->trace->pipe_close)
6778 iter->trace->pipe_close(iter);
6780 mutex_unlock(&trace_types_lock);
6782 free_cpumask_var(iter->started);
6785 mutex_destroy(&iter->mutex);
6788 trace_array_put(tr);
6794 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6796 struct trace_array *tr = iter->tr;
6798 /* Iterators are static, they should be filled or empty */
6799 if (trace_buffer_iter(iter, iter->cpu_file))
6800 return EPOLLIN | EPOLLRDNORM;
6802 if (tr->trace_flags & TRACE_ITER_BLOCK)
6804 * Always select as readable when in blocking mode
6806 return EPOLLIN | EPOLLRDNORM;
6808 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6809 filp, poll_table, iter->tr->buffer_percent);
6813 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6815 struct trace_iterator *iter = filp->private_data;
6817 return trace_poll(iter, filp, poll_table);
6820 /* Must be called with iter->mutex held. */
6821 static int tracing_wait_pipe(struct file *filp)
6823 struct trace_iterator *iter = filp->private_data;
6826 while (trace_empty(iter)) {
6828 if ((filp->f_flags & O_NONBLOCK)) {
6833 * We block until we read something and tracing is disabled.
6834 * We still block if tracing is disabled, but we have never
6835 * read anything. This allows a user to cat this file, and
6836 * then enable tracing. But after we have read something,
6837 * we give an EOF when tracing is again disabled.
6839 * iter->pos will be 0 if we haven't read anything.
6841 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6844 mutex_unlock(&iter->mutex);
6846 ret = wait_on_pipe(iter, 0);
6848 mutex_lock(&iter->mutex);
6861 tracing_read_pipe(struct file *filp, char __user *ubuf,
6862 size_t cnt, loff_t *ppos)
6864 struct trace_iterator *iter = filp->private_data;
6868 * Avoid more than one consumer on a single file descriptor
6869 * This is just a matter of traces coherency, the ring buffer itself
6872 mutex_lock(&iter->mutex);
6874 /* return any leftover data */
6875 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6879 trace_seq_init(&iter->seq);
6881 if (iter->trace->read) {
6882 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6888 sret = tracing_wait_pipe(filp);
6892 /* stop when tracing is finished */
6893 if (trace_empty(iter)) {
6898 if (cnt >= PAGE_SIZE)
6899 cnt = PAGE_SIZE - 1;
6901 /* reset all but tr, trace, and overruns */
6902 trace_iterator_reset(iter);
6903 cpumask_clear(iter->started);
6904 trace_seq_init(&iter->seq);
6906 trace_event_read_lock();
6907 trace_access_lock(iter->cpu_file);
6908 while (trace_find_next_entry_inc(iter) != NULL) {
6909 enum print_line_t ret;
6910 int save_len = iter->seq.seq.len;
6912 ret = print_trace_line(iter);
6913 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6915 * If one print_trace_line() fills entire trace_seq in one shot,
6916 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6917 * In this case, we need to consume it, otherwise, loop will peek
6918 * this event next time, resulting in an infinite loop.
6920 if (save_len == 0) {
6922 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6923 trace_consume(iter);
6927 /* In other cases, don't print partial lines */
6928 iter->seq.seq.len = save_len;
6931 if (ret != TRACE_TYPE_NO_CONSUME)
6932 trace_consume(iter);
6934 if (trace_seq_used(&iter->seq) >= cnt)
6938 * Setting the full flag means we reached the trace_seq buffer
6939 * size and we should leave by partial output condition above.
6940 * One of the trace_seq_* functions is not used properly.
6942 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6945 trace_access_unlock(iter->cpu_file);
6946 trace_event_read_unlock();
6948 /* Now copy what we have to the user */
6949 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6950 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6951 trace_seq_init(&iter->seq);
6954 * If there was nothing to send to user, in spite of consuming trace
6955 * entries, go back to wait for more entries.
6961 mutex_unlock(&iter->mutex);
6966 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6969 __free_page(spd->pages[idx]);
6973 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6979 /* Seq buffer is page-sized, exactly what we need. */
6981 save_len = iter->seq.seq.len;
6982 ret = print_trace_line(iter);
6984 if (trace_seq_has_overflowed(&iter->seq)) {
6985 iter->seq.seq.len = save_len;
6990 * This should not be hit, because it should only
6991 * be set if the iter->seq overflowed. But check it
6992 * anyway to be safe.
6994 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6995 iter->seq.seq.len = save_len;
6999 count = trace_seq_used(&iter->seq) - save_len;
7002 iter->seq.seq.len = save_len;
7006 if (ret != TRACE_TYPE_NO_CONSUME)
7007 trace_consume(iter);
7009 if (!trace_find_next_entry_inc(iter)) {
7019 static ssize_t tracing_splice_read_pipe(struct file *filp,
7021 struct pipe_inode_info *pipe,
7025 struct page *pages_def[PIPE_DEF_BUFFERS];
7026 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7027 struct trace_iterator *iter = filp->private_data;
7028 struct splice_pipe_desc spd = {
7030 .partial = partial_def,
7031 .nr_pages = 0, /* This gets updated below. */
7032 .nr_pages_max = PIPE_DEF_BUFFERS,
7033 .ops = &default_pipe_buf_ops,
7034 .spd_release = tracing_spd_release_pipe,
7040 if (splice_grow_spd(pipe, &spd))
7043 mutex_lock(&iter->mutex);
7045 if (iter->trace->splice_read) {
7046 ret = iter->trace->splice_read(iter, filp,
7047 ppos, pipe, len, flags);
7052 ret = tracing_wait_pipe(filp);
7056 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
7061 trace_event_read_lock();
7062 trace_access_lock(iter->cpu_file);
7064 /* Fill as many pages as possible. */
7065 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
7066 spd.pages[i] = alloc_page(GFP_KERNEL);
7070 rem = tracing_fill_pipe_page(rem, iter);
7072 /* Copy the data into the page, so we can start over. */
7073 ret = trace_seq_to_buffer(&iter->seq,
7074 page_address(spd.pages[i]),
7075 trace_seq_used(&iter->seq));
7077 __free_page(spd.pages[i]);
7080 spd.partial[i].offset = 0;
7081 spd.partial[i].len = trace_seq_used(&iter->seq);
7083 trace_seq_init(&iter->seq);
7086 trace_access_unlock(iter->cpu_file);
7087 trace_event_read_unlock();
7088 mutex_unlock(&iter->mutex);
7093 ret = splice_to_pipe(pipe, &spd);
7097 splice_shrink_spd(&spd);
7101 mutex_unlock(&iter->mutex);
7106 tracing_entries_read(struct file *filp, char __user *ubuf,
7107 size_t cnt, loff_t *ppos)
7109 struct inode *inode = file_inode(filp);
7110 struct trace_array *tr = inode->i_private;
7111 int cpu = tracing_get_cpu(inode);
7116 mutex_lock(&trace_types_lock);
7118 if (cpu == RING_BUFFER_ALL_CPUS) {
7119 int cpu, buf_size_same;
7124 /* check if all cpu sizes are same */
7125 for_each_tracing_cpu(cpu) {
7126 /* fill in the size from first enabled cpu */
7128 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
7129 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
7135 if (buf_size_same) {
7136 if (!ring_buffer_expanded)
7137 r = sprintf(buf, "%lu (expanded: %lu)\n",
7139 trace_buf_size >> 10);
7141 r = sprintf(buf, "%lu\n", size >> 10);
7143 r = sprintf(buf, "X\n");
7145 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
7147 mutex_unlock(&trace_types_lock);
7149 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7154 tracing_entries_write(struct file *filp, const char __user *ubuf,
7155 size_t cnt, loff_t *ppos)
7157 struct inode *inode = file_inode(filp);
7158 struct trace_array *tr = inode->i_private;
7162 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7166 /* must have at least 1 entry */
7170 /* value is in KB */
7172 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
7182 tracing_total_entries_read(struct file *filp, char __user *ubuf,
7183 size_t cnt, loff_t *ppos)
7185 struct trace_array *tr = filp->private_data;
7188 unsigned long size = 0, expanded_size = 0;
7190 mutex_lock(&trace_types_lock);
7191 for_each_tracing_cpu(cpu) {
7192 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7193 if (!ring_buffer_expanded)
7194 expanded_size += trace_buf_size >> 10;
7196 if (ring_buffer_expanded)
7197 r = sprintf(buf, "%lu\n", size);
7199 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
7200 mutex_unlock(&trace_types_lock);
7202 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7206 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7207 size_t cnt, loff_t *ppos)
7210 * There is no need to read what the user has written, this function
7211 * is just to make sure that there is no error when "echo" is used
7220 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7222 struct trace_array *tr = inode->i_private;
7224 /* disable tracing ? */
7225 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7226 tracer_tracing_off(tr);
7227 /* resize the ring buffer to 0 */
7228 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7230 trace_array_put(tr);
7236 tracing_mark_write(struct file *filp, const char __user *ubuf,
7237 size_t cnt, loff_t *fpos)
7239 struct trace_array *tr = filp->private_data;
7240 struct ring_buffer_event *event;
7241 enum event_trigger_type tt = ETT_NONE;
7242 struct trace_buffer *buffer;
7243 struct print_entry *entry;
7248 /* Used in tracing_mark_raw_write() as well */
7249 #define FAULTED_STR "<faulted>"
7250 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7252 if (tracing_disabled)
7255 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7258 if (cnt > TRACE_BUF_SIZE)
7259 cnt = TRACE_BUF_SIZE;
7261 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7263 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7265 /* If less than "<faulted>", then make sure we can still add that */
7266 if (cnt < FAULTED_SIZE)
7267 size += FAULTED_SIZE - cnt;
7269 buffer = tr->array_buffer.buffer;
7270 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7272 if (unlikely(!event))
7273 /* Ring buffer disabled, return as if not open for write */
7276 entry = ring_buffer_event_data(event);
7277 entry->ip = _THIS_IP_;
7279 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7281 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7287 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7288 /* do not add \n before testing triggers, but add \0 */
7289 entry->buf[cnt] = '\0';
7290 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7293 if (entry->buf[cnt - 1] != '\n') {
7294 entry->buf[cnt] = '\n';
7295 entry->buf[cnt + 1] = '\0';
7297 entry->buf[cnt] = '\0';
7299 if (static_branch_unlikely(&trace_marker_exports_enabled))
7300 ftrace_exports(event, TRACE_EXPORT_MARKER);
7301 __buffer_unlock_commit(buffer, event);
7304 event_triggers_post_call(tr->trace_marker_file, tt);
7309 /* Limit it for now to 3K (including tag) */
7310 #define RAW_DATA_MAX_SIZE (1024*3)
7313 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7314 size_t cnt, loff_t *fpos)
7316 struct trace_array *tr = filp->private_data;
7317 struct ring_buffer_event *event;
7318 struct trace_buffer *buffer;
7319 struct raw_data_entry *entry;
7324 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7326 if (tracing_disabled)
7329 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7332 /* The marker must at least have a tag id */
7333 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7336 if (cnt > TRACE_BUF_SIZE)
7337 cnt = TRACE_BUF_SIZE;
7339 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7341 size = sizeof(*entry) + cnt;
7342 if (cnt < FAULT_SIZE_ID)
7343 size += FAULT_SIZE_ID - cnt;
7345 buffer = tr->array_buffer.buffer;
7346 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7349 /* Ring buffer disabled, return as if not open for write */
7352 entry = ring_buffer_event_data(event);
7354 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7357 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7362 __buffer_unlock_commit(buffer, event);
7367 static int tracing_clock_show(struct seq_file *m, void *v)
7369 struct trace_array *tr = m->private;
7372 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7374 "%s%s%s%s", i ? " " : "",
7375 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7376 i == tr->clock_id ? "]" : "");
7382 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7386 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7387 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7390 if (i == ARRAY_SIZE(trace_clocks))
7393 mutex_lock(&trace_types_lock);
7397 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7400 * New clock may not be consistent with the previous clock.
7401 * Reset the buffer so that it doesn't have incomparable timestamps.
7403 tracing_reset_online_cpus(&tr->array_buffer);
7405 #ifdef CONFIG_TRACER_MAX_TRACE
7406 if (tr->max_buffer.buffer)
7407 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7408 tracing_reset_online_cpus(&tr->max_buffer);
7411 mutex_unlock(&trace_types_lock);
7416 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7417 size_t cnt, loff_t *fpos)
7419 struct seq_file *m = filp->private_data;
7420 struct trace_array *tr = m->private;
7422 const char *clockstr;
7425 if (cnt >= sizeof(buf))
7428 if (copy_from_user(buf, ubuf, cnt))
7433 clockstr = strstrip(buf);
7435 ret = tracing_set_clock(tr, clockstr);
7444 static int tracing_clock_open(struct inode *inode, struct file *file)
7446 struct trace_array *tr = inode->i_private;
7449 ret = tracing_check_open_get_tr(tr);
7453 ret = single_open(file, tracing_clock_show, inode->i_private);
7455 trace_array_put(tr);
7460 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7462 struct trace_array *tr = m->private;
7464 mutex_lock(&trace_types_lock);
7466 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7467 seq_puts(m, "delta [absolute]\n");
7469 seq_puts(m, "[delta] absolute\n");
7471 mutex_unlock(&trace_types_lock);
7476 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7478 struct trace_array *tr = inode->i_private;
7481 ret = tracing_check_open_get_tr(tr);
7485 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7487 trace_array_put(tr);
7492 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7494 if (rbe == this_cpu_read(trace_buffered_event))
7495 return ring_buffer_time_stamp(buffer);
7497 return ring_buffer_event_time_stamp(buffer, rbe);
7501 * Set or disable using the per CPU trace_buffer_event when possible.
7503 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7507 mutex_lock(&trace_types_lock);
7509 if (set && tr->no_filter_buffering_ref++)
7513 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7518 --tr->no_filter_buffering_ref;
7521 mutex_unlock(&trace_types_lock);
7526 struct ftrace_buffer_info {
7527 struct trace_iterator iter;
7529 unsigned int spare_cpu;
7533 #ifdef CONFIG_TRACER_SNAPSHOT
7534 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7536 struct trace_array *tr = inode->i_private;
7537 struct trace_iterator *iter;
7541 ret = tracing_check_open_get_tr(tr);
7545 if (file->f_mode & FMODE_READ) {
7546 iter = __tracing_open(inode, file, true);
7548 ret = PTR_ERR(iter);
7550 /* Writes still need the seq_file to hold the private data */
7552 m = kzalloc(sizeof(*m), GFP_KERNEL);
7555 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7563 iter->array_buffer = &tr->max_buffer;
7564 iter->cpu_file = tracing_get_cpu(inode);
7566 file->private_data = m;
7570 trace_array_put(tr);
7576 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7579 struct seq_file *m = filp->private_data;
7580 struct trace_iterator *iter = m->private;
7581 struct trace_array *tr = iter->tr;
7585 ret = tracing_update_buffers();
7589 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7593 mutex_lock(&trace_types_lock);
7595 if (tr->current_trace->use_max_tr) {
7600 local_irq_disable();
7601 arch_spin_lock(&tr->max_lock);
7602 if (tr->cond_snapshot)
7604 arch_spin_unlock(&tr->max_lock);
7611 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7615 if (tr->allocated_snapshot)
7619 /* Only allow per-cpu swap if the ring buffer supports it */
7620 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7621 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7626 if (tr->allocated_snapshot)
7627 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7628 &tr->array_buffer, iter->cpu_file);
7630 ret = tracing_alloc_snapshot_instance(tr);
7633 local_irq_disable();
7634 /* Now, we're going to swap */
7635 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7636 update_max_tr(tr, current, smp_processor_id(), NULL);
7638 update_max_tr_single(tr, current, iter->cpu_file);
7642 if (tr->allocated_snapshot) {
7643 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7644 tracing_reset_online_cpus(&tr->max_buffer);
7646 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7656 mutex_unlock(&trace_types_lock);
7660 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7662 struct seq_file *m = file->private_data;
7665 ret = tracing_release(inode, file);
7667 if (file->f_mode & FMODE_READ)
7670 /* If write only, the seq_file is just a stub */
7678 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7679 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7680 size_t count, loff_t *ppos);
7681 static int tracing_buffers_release(struct inode *inode, struct file *file);
7682 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7683 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7685 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7687 struct ftrace_buffer_info *info;
7690 /* The following checks for tracefs lockdown */
7691 ret = tracing_buffers_open(inode, filp);
7695 info = filp->private_data;
7697 if (info->iter.trace->use_max_tr) {
7698 tracing_buffers_release(inode, filp);
7702 info->iter.snapshot = true;
7703 info->iter.array_buffer = &info->iter.tr->max_buffer;
7708 #endif /* CONFIG_TRACER_SNAPSHOT */
7711 static const struct file_operations tracing_thresh_fops = {
7712 .open = tracing_open_generic,
7713 .read = tracing_thresh_read,
7714 .write = tracing_thresh_write,
7715 .llseek = generic_file_llseek,
7718 #ifdef CONFIG_TRACER_MAX_TRACE
7719 static const struct file_operations tracing_max_lat_fops = {
7720 .open = tracing_open_generic,
7721 .read = tracing_max_lat_read,
7722 .write = tracing_max_lat_write,
7723 .llseek = generic_file_llseek,
7727 static const struct file_operations set_tracer_fops = {
7728 .open = tracing_open_generic,
7729 .read = tracing_set_trace_read,
7730 .write = tracing_set_trace_write,
7731 .llseek = generic_file_llseek,
7734 static const struct file_operations tracing_pipe_fops = {
7735 .open = tracing_open_pipe,
7736 .poll = tracing_poll_pipe,
7737 .read = tracing_read_pipe,
7738 .splice_read = tracing_splice_read_pipe,
7739 .release = tracing_release_pipe,
7740 .llseek = no_llseek,
7743 static const struct file_operations tracing_entries_fops = {
7744 .open = tracing_open_generic_tr,
7745 .read = tracing_entries_read,
7746 .write = tracing_entries_write,
7747 .llseek = generic_file_llseek,
7748 .release = tracing_release_generic_tr,
7751 static const struct file_operations tracing_total_entries_fops = {
7752 .open = tracing_open_generic_tr,
7753 .read = tracing_total_entries_read,
7754 .llseek = generic_file_llseek,
7755 .release = tracing_release_generic_tr,
7758 static const struct file_operations tracing_free_buffer_fops = {
7759 .open = tracing_open_generic_tr,
7760 .write = tracing_free_buffer_write,
7761 .release = tracing_free_buffer_release,
7764 static const struct file_operations tracing_mark_fops = {
7765 .open = tracing_mark_open,
7766 .write = tracing_mark_write,
7767 .release = tracing_release_generic_tr,
7770 static const struct file_operations tracing_mark_raw_fops = {
7771 .open = tracing_mark_open,
7772 .write = tracing_mark_raw_write,
7773 .release = tracing_release_generic_tr,
7776 static const struct file_operations trace_clock_fops = {
7777 .open = tracing_clock_open,
7779 .llseek = seq_lseek,
7780 .release = tracing_single_release_tr,
7781 .write = tracing_clock_write,
7784 static const struct file_operations trace_time_stamp_mode_fops = {
7785 .open = tracing_time_stamp_mode_open,
7787 .llseek = seq_lseek,
7788 .release = tracing_single_release_tr,
7791 #ifdef CONFIG_TRACER_SNAPSHOT
7792 static const struct file_operations snapshot_fops = {
7793 .open = tracing_snapshot_open,
7795 .write = tracing_snapshot_write,
7796 .llseek = tracing_lseek,
7797 .release = tracing_snapshot_release,
7800 static const struct file_operations snapshot_raw_fops = {
7801 .open = snapshot_raw_open,
7802 .read = tracing_buffers_read,
7803 .release = tracing_buffers_release,
7804 .splice_read = tracing_buffers_splice_read,
7805 .llseek = no_llseek,
7808 #endif /* CONFIG_TRACER_SNAPSHOT */
7811 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7812 * @filp: The active open file structure
7813 * @ubuf: The userspace provided buffer to read value into
7814 * @cnt: The maximum number of bytes to read
7815 * @ppos: The current "file" position
7817 * This function implements the write interface for a struct trace_min_max_param.
7818 * The filp->private_data must point to a trace_min_max_param structure that
7819 * defines where to write the value, the min and the max acceptable values,
7820 * and a lock to protect the write.
7823 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7825 struct trace_min_max_param *param = filp->private_data;
7832 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7837 mutex_lock(param->lock);
7839 if (param->min && val < *param->min)
7842 if (param->max && val > *param->max)
7849 mutex_unlock(param->lock);
7858 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7859 * @filp: The active open file structure
7860 * @ubuf: The userspace provided buffer to read value into
7861 * @cnt: The maximum number of bytes to read
7862 * @ppos: The current "file" position
7864 * This function implements the read interface for a struct trace_min_max_param.
7865 * The filp->private_data must point to a trace_min_max_param struct with valid
7869 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7871 struct trace_min_max_param *param = filp->private_data;
7872 char buf[U64_STR_SIZE];
7881 if (cnt > sizeof(buf))
7884 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7886 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7889 const struct file_operations trace_min_max_fops = {
7890 .open = tracing_open_generic,
7891 .read = trace_min_max_read,
7892 .write = trace_min_max_write,
7895 #define TRACING_LOG_ERRS_MAX 8
7896 #define TRACING_LOG_LOC_MAX 128
7898 #define CMD_PREFIX " Command: "
7901 const char **errs; /* ptr to loc-specific array of err strings */
7902 u8 type; /* index into errs -> specific err string */
7903 u16 pos; /* caret position */
7907 struct tracing_log_err {
7908 struct list_head list;
7909 struct err_info info;
7910 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7911 char *cmd; /* what caused err */
7914 static DEFINE_MUTEX(tracing_err_log_lock);
7916 static struct tracing_log_err *alloc_tracing_log_err(int len)
7918 struct tracing_log_err *err;
7920 err = kzalloc(sizeof(*err), GFP_KERNEL);
7922 return ERR_PTR(-ENOMEM);
7924 err->cmd = kzalloc(len, GFP_KERNEL);
7927 return ERR_PTR(-ENOMEM);
7933 static void free_tracing_log_err(struct tracing_log_err *err)
7939 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
7942 struct tracing_log_err *err;
7945 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7946 err = alloc_tracing_log_err(len);
7947 if (PTR_ERR(err) != -ENOMEM)
7948 tr->n_err_log_entries++;
7952 cmd = kzalloc(len, GFP_KERNEL);
7954 return ERR_PTR(-ENOMEM);
7955 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7958 list_del(&err->list);
7964 * err_pos - find the position of a string within a command for error careting
7965 * @cmd: The tracing command that caused the error
7966 * @str: The string to position the caret at within @cmd
7968 * Finds the position of the first occurrence of @str within @cmd. The
7969 * return value can be passed to tracing_log_err() for caret placement
7972 * Returns the index within @cmd of the first occurrence of @str or 0
7973 * if @str was not found.
7975 unsigned int err_pos(char *cmd, const char *str)
7979 if (WARN_ON(!strlen(cmd)))
7982 found = strstr(cmd, str);
7990 * tracing_log_err - write an error to the tracing error log
7991 * @tr: The associated trace array for the error (NULL for top level array)
7992 * @loc: A string describing where the error occurred
7993 * @cmd: The tracing command that caused the error
7994 * @errs: The array of loc-specific static error strings
7995 * @type: The index into errs[], which produces the specific static err string
7996 * @pos: The position the caret should be placed in the cmd
7998 * Writes an error into tracing/error_log of the form:
8000 * <loc>: error: <text>
8004 * tracing/error_log is a small log file containing the last
8005 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
8006 * unless there has been a tracing error, and the error log can be
8007 * cleared and have its memory freed by writing the empty string in
8008 * truncation mode to it i.e. echo > tracing/error_log.
8010 * NOTE: the @errs array along with the @type param are used to
8011 * produce a static error string - this string is not copied and saved
8012 * when the error is logged - only a pointer to it is saved. See
8013 * existing callers for examples of how static strings are typically
8014 * defined for use with tracing_log_err().
8016 void tracing_log_err(struct trace_array *tr,
8017 const char *loc, const char *cmd,
8018 const char **errs, u8 type, u16 pos)
8020 struct tracing_log_err *err;
8026 len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
8028 mutex_lock(&tracing_err_log_lock);
8029 err = get_tracing_log_err(tr, len);
8030 if (PTR_ERR(err) == -ENOMEM) {
8031 mutex_unlock(&tracing_err_log_lock);
8035 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
8036 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
8038 err->info.errs = errs;
8039 err->info.type = type;
8040 err->info.pos = pos;
8041 err->info.ts = local_clock();
8043 list_add_tail(&err->list, &tr->err_log);
8044 mutex_unlock(&tracing_err_log_lock);
8047 static void clear_tracing_err_log(struct trace_array *tr)
8049 struct tracing_log_err *err, *next;
8051 mutex_lock(&tracing_err_log_lock);
8052 list_for_each_entry_safe(err, next, &tr->err_log, list) {
8053 list_del(&err->list);
8054 free_tracing_log_err(err);
8057 tr->n_err_log_entries = 0;
8058 mutex_unlock(&tracing_err_log_lock);
8061 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
8063 struct trace_array *tr = m->private;
8065 mutex_lock(&tracing_err_log_lock);
8067 return seq_list_start(&tr->err_log, *pos);
8070 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
8072 struct trace_array *tr = m->private;
8074 return seq_list_next(v, &tr->err_log, pos);
8077 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
8079 mutex_unlock(&tracing_err_log_lock);
8082 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
8086 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
8088 for (i = 0; i < pos; i++)
8093 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
8095 struct tracing_log_err *err = v;
8098 const char *err_text = err->info.errs[err->info.type];
8099 u64 sec = err->info.ts;
8102 nsec = do_div(sec, NSEC_PER_SEC);
8103 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
8104 err->loc, err_text);
8105 seq_printf(m, "%s", err->cmd);
8106 tracing_err_log_show_pos(m, err->info.pos);
8112 static const struct seq_operations tracing_err_log_seq_ops = {
8113 .start = tracing_err_log_seq_start,
8114 .next = tracing_err_log_seq_next,
8115 .stop = tracing_err_log_seq_stop,
8116 .show = tracing_err_log_seq_show
8119 static int tracing_err_log_open(struct inode *inode, struct file *file)
8121 struct trace_array *tr = inode->i_private;
8124 ret = tracing_check_open_get_tr(tr);
8128 /* If this file was opened for write, then erase contents */
8129 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
8130 clear_tracing_err_log(tr);
8132 if (file->f_mode & FMODE_READ) {
8133 ret = seq_open(file, &tracing_err_log_seq_ops);
8135 struct seq_file *m = file->private_data;
8138 trace_array_put(tr);
8144 static ssize_t tracing_err_log_write(struct file *file,
8145 const char __user *buffer,
8146 size_t count, loff_t *ppos)
8151 static int tracing_err_log_release(struct inode *inode, struct file *file)
8153 struct trace_array *tr = inode->i_private;
8155 trace_array_put(tr);
8157 if (file->f_mode & FMODE_READ)
8158 seq_release(inode, file);
8163 static const struct file_operations tracing_err_log_fops = {
8164 .open = tracing_err_log_open,
8165 .write = tracing_err_log_write,
8167 .llseek = tracing_lseek,
8168 .release = tracing_err_log_release,
8171 static int tracing_buffers_open(struct inode *inode, struct file *filp)
8173 struct trace_array *tr = inode->i_private;
8174 struct ftrace_buffer_info *info;
8177 ret = tracing_check_open_get_tr(tr);
8181 info = kvzalloc(sizeof(*info), GFP_KERNEL);
8183 trace_array_put(tr);
8187 mutex_lock(&trace_types_lock);
8190 info->iter.cpu_file = tracing_get_cpu(inode);
8191 info->iter.trace = tr->current_trace;
8192 info->iter.array_buffer = &tr->array_buffer;
8194 /* Force reading ring buffer for first read */
8195 info->read = (unsigned int)-1;
8197 filp->private_data = info;
8201 mutex_unlock(&trace_types_lock);
8203 ret = nonseekable_open(inode, filp);
8205 trace_array_put(tr);
8211 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8213 struct ftrace_buffer_info *info = filp->private_data;
8214 struct trace_iterator *iter = &info->iter;
8216 return trace_poll(iter, filp, poll_table);
8220 tracing_buffers_read(struct file *filp, char __user *ubuf,
8221 size_t count, loff_t *ppos)
8223 struct ftrace_buffer_info *info = filp->private_data;
8224 struct trace_iterator *iter = &info->iter;
8231 #ifdef CONFIG_TRACER_MAX_TRACE
8232 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8237 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8239 if (IS_ERR(info->spare)) {
8240 ret = PTR_ERR(info->spare);
8243 info->spare_cpu = iter->cpu_file;
8249 /* Do we have previous read data to read? */
8250 if (info->read < PAGE_SIZE)
8254 trace_access_lock(iter->cpu_file);
8255 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8259 trace_access_unlock(iter->cpu_file);
8262 if (trace_empty(iter)) {
8263 if ((filp->f_flags & O_NONBLOCK))
8266 ret = wait_on_pipe(iter, 0);
8277 size = PAGE_SIZE - info->read;
8281 ret = copy_to_user(ubuf, info->spare + info->read, size);
8293 static int tracing_buffers_release(struct inode *inode, struct file *file)
8295 struct ftrace_buffer_info *info = file->private_data;
8296 struct trace_iterator *iter = &info->iter;
8298 mutex_lock(&trace_types_lock);
8300 iter->tr->trace_ref--;
8302 __trace_array_put(iter->tr);
8305 /* Make sure the waiters see the new wait_index */
8308 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8311 ring_buffer_free_read_page(iter->array_buffer->buffer,
8312 info->spare_cpu, info->spare);
8315 mutex_unlock(&trace_types_lock);
8321 struct trace_buffer *buffer;
8324 refcount_t refcount;
8327 static void buffer_ref_release(struct buffer_ref *ref)
8329 if (!refcount_dec_and_test(&ref->refcount))
8331 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8335 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8336 struct pipe_buffer *buf)
8338 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8340 buffer_ref_release(ref);
8344 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8345 struct pipe_buffer *buf)
8347 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8349 if (refcount_read(&ref->refcount) > INT_MAX/2)
8352 refcount_inc(&ref->refcount);
8356 /* Pipe buffer operations for a buffer. */
8357 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8358 .release = buffer_pipe_buf_release,
8359 .get = buffer_pipe_buf_get,
8363 * Callback from splice_to_pipe(), if we need to release some pages
8364 * at the end of the spd in case we error'ed out in filling the pipe.
8366 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8368 struct buffer_ref *ref =
8369 (struct buffer_ref *)spd->partial[i].private;
8371 buffer_ref_release(ref);
8372 spd->partial[i].private = 0;
8376 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8377 struct pipe_inode_info *pipe, size_t len,
8380 struct ftrace_buffer_info *info = file->private_data;
8381 struct trace_iterator *iter = &info->iter;
8382 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8383 struct page *pages_def[PIPE_DEF_BUFFERS];
8384 struct splice_pipe_desc spd = {
8386 .partial = partial_def,
8387 .nr_pages_max = PIPE_DEF_BUFFERS,
8388 .ops = &buffer_pipe_buf_ops,
8389 .spd_release = buffer_spd_release,
8391 struct buffer_ref *ref;
8395 #ifdef CONFIG_TRACER_MAX_TRACE
8396 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8400 if (*ppos & (PAGE_SIZE - 1))
8403 if (len & (PAGE_SIZE - 1)) {
8404 if (len < PAGE_SIZE)
8409 if (splice_grow_spd(pipe, &spd))
8413 trace_access_lock(iter->cpu_file);
8414 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8416 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8420 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8426 refcount_set(&ref->refcount, 1);
8427 ref->buffer = iter->array_buffer->buffer;
8428 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8429 if (IS_ERR(ref->page)) {
8430 ret = PTR_ERR(ref->page);
8435 ref->cpu = iter->cpu_file;
8437 r = ring_buffer_read_page(ref->buffer, &ref->page,
8438 len, iter->cpu_file, 1);
8440 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8446 page = virt_to_page(ref->page);
8448 spd.pages[i] = page;
8449 spd.partial[i].len = PAGE_SIZE;
8450 spd.partial[i].offset = 0;
8451 spd.partial[i].private = (unsigned long)ref;
8455 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8458 trace_access_unlock(iter->cpu_file);
8461 /* did we read anything? */
8462 if (!spd.nr_pages) {
8469 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8472 wait_index = READ_ONCE(iter->wait_index);
8474 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8478 /* No need to wait after waking up when tracing is off */
8479 if (!tracer_tracing_is_on(iter->tr))
8482 /* Make sure we see the new wait_index */
8484 if (wait_index != iter->wait_index)
8490 ret = splice_to_pipe(pipe, &spd);
8492 splice_shrink_spd(&spd);
8497 /* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */
8498 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8500 struct ftrace_buffer_info *info = file->private_data;
8501 struct trace_iterator *iter = &info->iter;
8504 return -ENOIOCTLCMD;
8506 mutex_lock(&trace_types_lock);
8509 /* Make sure the waiters see the new wait_index */
8512 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8514 mutex_unlock(&trace_types_lock);
8518 static const struct file_operations tracing_buffers_fops = {
8519 .open = tracing_buffers_open,
8520 .read = tracing_buffers_read,
8521 .poll = tracing_buffers_poll,
8522 .release = tracing_buffers_release,
8523 .splice_read = tracing_buffers_splice_read,
8524 .unlocked_ioctl = tracing_buffers_ioctl,
8525 .llseek = no_llseek,
8529 tracing_stats_read(struct file *filp, char __user *ubuf,
8530 size_t count, loff_t *ppos)
8532 struct inode *inode = file_inode(filp);
8533 struct trace_array *tr = inode->i_private;
8534 struct array_buffer *trace_buf = &tr->array_buffer;
8535 int cpu = tracing_get_cpu(inode);
8536 struct trace_seq *s;
8538 unsigned long long t;
8539 unsigned long usec_rem;
8541 s = kmalloc(sizeof(*s), GFP_KERNEL);
8547 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8548 trace_seq_printf(s, "entries: %ld\n", cnt);
8550 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8551 trace_seq_printf(s, "overrun: %ld\n", cnt);
8553 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8554 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8556 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8557 trace_seq_printf(s, "bytes: %ld\n", cnt);
8559 if (trace_clocks[tr->clock_id].in_ns) {
8560 /* local or global for trace_clock */
8561 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8562 usec_rem = do_div(t, USEC_PER_SEC);
8563 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8566 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8567 usec_rem = do_div(t, USEC_PER_SEC);
8568 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8570 /* counter or tsc mode for trace_clock */
8571 trace_seq_printf(s, "oldest event ts: %llu\n",
8572 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8574 trace_seq_printf(s, "now ts: %llu\n",
8575 ring_buffer_time_stamp(trace_buf->buffer));
8578 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8579 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8581 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8582 trace_seq_printf(s, "read events: %ld\n", cnt);
8584 count = simple_read_from_buffer(ubuf, count, ppos,
8585 s->buffer, trace_seq_used(s));
8592 static const struct file_operations tracing_stats_fops = {
8593 .open = tracing_open_generic_tr,
8594 .read = tracing_stats_read,
8595 .llseek = generic_file_llseek,
8596 .release = tracing_release_generic_tr,
8599 #ifdef CONFIG_DYNAMIC_FTRACE
8602 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8603 size_t cnt, loff_t *ppos)
8609 /* 256 should be plenty to hold the amount needed */
8610 buf = kmalloc(256, GFP_KERNEL);
8614 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8615 ftrace_update_tot_cnt,
8616 ftrace_number_of_pages,
8617 ftrace_number_of_groups);
8619 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8624 static const struct file_operations tracing_dyn_info_fops = {
8625 .open = tracing_open_generic,
8626 .read = tracing_read_dyn_info,
8627 .llseek = generic_file_llseek,
8629 #endif /* CONFIG_DYNAMIC_FTRACE */
8631 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8633 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8634 struct trace_array *tr, struct ftrace_probe_ops *ops,
8637 tracing_snapshot_instance(tr);
8641 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8642 struct trace_array *tr, struct ftrace_probe_ops *ops,
8645 struct ftrace_func_mapper *mapper = data;
8649 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8659 tracing_snapshot_instance(tr);
8663 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8664 struct ftrace_probe_ops *ops, void *data)
8666 struct ftrace_func_mapper *mapper = data;
8669 seq_printf(m, "%ps:", (void *)ip);
8671 seq_puts(m, "snapshot");
8674 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8677 seq_printf(m, ":count=%ld\n", *count);
8679 seq_puts(m, ":unlimited\n");
8685 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8686 unsigned long ip, void *init_data, void **data)
8688 struct ftrace_func_mapper *mapper = *data;
8691 mapper = allocate_ftrace_func_mapper();
8697 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8701 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8702 unsigned long ip, void *data)
8704 struct ftrace_func_mapper *mapper = data;
8709 free_ftrace_func_mapper(mapper, NULL);
8713 ftrace_func_mapper_remove_ip(mapper, ip);
8716 static struct ftrace_probe_ops snapshot_probe_ops = {
8717 .func = ftrace_snapshot,
8718 .print = ftrace_snapshot_print,
8721 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8722 .func = ftrace_count_snapshot,
8723 .print = ftrace_snapshot_print,
8724 .init = ftrace_snapshot_init,
8725 .free = ftrace_snapshot_free,
8729 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8730 char *glob, char *cmd, char *param, int enable)
8732 struct ftrace_probe_ops *ops;
8733 void *count = (void *)-1;
8740 /* hash funcs only work with set_ftrace_filter */
8744 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8747 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8752 number = strsep(¶m, ":");
8754 if (!strlen(number))
8758 * We use the callback data field (which is a pointer)
8761 ret = kstrtoul(number, 0, (unsigned long *)&count);
8766 ret = tracing_alloc_snapshot_instance(tr);
8770 ret = register_ftrace_function_probe(glob, tr, ops, count);
8773 return ret < 0 ? ret : 0;
8776 static struct ftrace_func_command ftrace_snapshot_cmd = {
8778 .func = ftrace_trace_snapshot_callback,
8781 static __init int register_snapshot_cmd(void)
8783 return register_ftrace_command(&ftrace_snapshot_cmd);
8786 static inline __init int register_snapshot_cmd(void) { return 0; }
8787 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8789 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8791 if (WARN_ON(!tr->dir))
8792 return ERR_PTR(-ENODEV);
8794 /* Top directory uses NULL as the parent */
8795 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8798 /* All sub buffers have a descriptor */
8802 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8804 struct dentry *d_tracer;
8807 return tr->percpu_dir;
8809 d_tracer = tracing_get_dentry(tr);
8810 if (IS_ERR(d_tracer))
8813 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8815 MEM_FAIL(!tr->percpu_dir,
8816 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8818 return tr->percpu_dir;
8821 static struct dentry *
8822 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8823 void *data, long cpu, const struct file_operations *fops)
8825 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8827 if (ret) /* See tracing_get_cpu() */
8828 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8833 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8835 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8836 struct dentry *d_cpu;
8837 char cpu_dir[30]; /* 30 characters should be more than enough */
8842 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8843 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8845 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8849 /* per cpu trace_pipe */
8850 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8851 tr, cpu, &tracing_pipe_fops);
8854 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8855 tr, cpu, &tracing_fops);
8857 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8858 tr, cpu, &tracing_buffers_fops);
8860 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8861 tr, cpu, &tracing_stats_fops);
8863 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8864 tr, cpu, &tracing_entries_fops);
8866 #ifdef CONFIG_TRACER_SNAPSHOT
8867 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8868 tr, cpu, &snapshot_fops);
8870 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8871 tr, cpu, &snapshot_raw_fops);
8875 #ifdef CONFIG_FTRACE_SELFTEST
8876 /* Let selftest have access to static functions in this file */
8877 #include "trace_selftest.c"
8881 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8884 struct trace_option_dentry *topt = filp->private_data;
8887 if (topt->flags->val & topt->opt->bit)
8892 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8896 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8899 struct trace_option_dentry *topt = filp->private_data;
8903 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8907 if (val != 0 && val != 1)
8910 if (!!(topt->flags->val & topt->opt->bit) != val) {
8911 mutex_lock(&trace_types_lock);
8912 ret = __set_tracer_option(topt->tr, topt->flags,
8914 mutex_unlock(&trace_types_lock);
8925 static const struct file_operations trace_options_fops = {
8926 .open = tracing_open_generic,
8927 .read = trace_options_read,
8928 .write = trace_options_write,
8929 .llseek = generic_file_llseek,
8933 * In order to pass in both the trace_array descriptor as well as the index
8934 * to the flag that the trace option file represents, the trace_array
8935 * has a character array of trace_flags_index[], which holds the index
8936 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8937 * The address of this character array is passed to the flag option file
8938 * read/write callbacks.
8940 * In order to extract both the index and the trace_array descriptor,
8941 * get_tr_index() uses the following algorithm.
8945 * As the pointer itself contains the address of the index (remember
8948 * Then to get the trace_array descriptor, by subtracting that index
8949 * from the ptr, we get to the start of the index itself.
8951 * ptr - idx == &index[0]
8953 * Then a simple container_of() from that pointer gets us to the
8954 * trace_array descriptor.
8956 static void get_tr_index(void *data, struct trace_array **ptr,
8957 unsigned int *pindex)
8959 *pindex = *(unsigned char *)data;
8961 *ptr = container_of(data - *pindex, struct trace_array,
8966 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8969 void *tr_index = filp->private_data;
8970 struct trace_array *tr;
8974 get_tr_index(tr_index, &tr, &index);
8976 if (tr->trace_flags & (1 << index))
8981 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8985 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8988 void *tr_index = filp->private_data;
8989 struct trace_array *tr;
8994 get_tr_index(tr_index, &tr, &index);
8996 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9000 if (val != 0 && val != 1)
9003 mutex_lock(&event_mutex);
9004 mutex_lock(&trace_types_lock);
9005 ret = set_tracer_flag(tr, 1 << index, val);
9006 mutex_unlock(&trace_types_lock);
9007 mutex_unlock(&event_mutex);
9017 static const struct file_operations trace_options_core_fops = {
9018 .open = tracing_open_generic,
9019 .read = trace_options_core_read,
9020 .write = trace_options_core_write,
9021 .llseek = generic_file_llseek,
9024 struct dentry *trace_create_file(const char *name,
9026 struct dentry *parent,
9028 const struct file_operations *fops)
9032 ret = tracefs_create_file(name, mode, parent, data, fops);
9034 pr_warn("Could not create tracefs '%s' entry\n", name);
9040 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
9042 struct dentry *d_tracer;
9047 d_tracer = tracing_get_dentry(tr);
9048 if (IS_ERR(d_tracer))
9051 tr->options = tracefs_create_dir("options", d_tracer);
9053 pr_warn("Could not create tracefs directory 'options'\n");
9061 create_trace_option_file(struct trace_array *tr,
9062 struct trace_option_dentry *topt,
9063 struct tracer_flags *flags,
9064 struct tracer_opt *opt)
9066 struct dentry *t_options;
9068 t_options = trace_options_init_dentry(tr);
9072 topt->flags = flags;
9076 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
9077 t_options, topt, &trace_options_fops);
9082 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
9084 struct trace_option_dentry *topts;
9085 struct trace_options *tr_topts;
9086 struct tracer_flags *flags;
9087 struct tracer_opt *opts;
9094 flags = tracer->flags;
9096 if (!flags || !flags->opts)
9100 * If this is an instance, only create flags for tracers
9101 * the instance may have.
9103 if (!trace_ok_for_array(tracer, tr))
9106 for (i = 0; i < tr->nr_topts; i++) {
9107 /* Make sure there's no duplicate flags. */
9108 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
9114 for (cnt = 0; opts[cnt].name; cnt++)
9117 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
9121 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
9128 tr->topts = tr_topts;
9129 tr->topts[tr->nr_topts].tracer = tracer;
9130 tr->topts[tr->nr_topts].topts = topts;
9133 for (cnt = 0; opts[cnt].name; cnt++) {
9134 create_trace_option_file(tr, &topts[cnt], flags,
9136 MEM_FAIL(topts[cnt].entry == NULL,
9137 "Failed to create trace option: %s",
9142 static struct dentry *
9143 create_trace_option_core_file(struct trace_array *tr,
9144 const char *option, long index)
9146 struct dentry *t_options;
9148 t_options = trace_options_init_dentry(tr);
9152 return trace_create_file(option, TRACE_MODE_WRITE, t_options,
9153 (void *)&tr->trace_flags_index[index],
9154 &trace_options_core_fops);
9157 static void create_trace_options_dir(struct trace_array *tr)
9159 struct dentry *t_options;
9160 bool top_level = tr == &global_trace;
9163 t_options = trace_options_init_dentry(tr);
9167 for (i = 0; trace_options[i]; i++) {
9169 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
9170 create_trace_option_core_file(tr, trace_options[i], i);
9175 rb_simple_read(struct file *filp, char __user *ubuf,
9176 size_t cnt, loff_t *ppos)
9178 struct trace_array *tr = filp->private_data;
9182 r = tracer_tracing_is_on(tr);
9183 r = sprintf(buf, "%d\n", r);
9185 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9189 rb_simple_write(struct file *filp, const char __user *ubuf,
9190 size_t cnt, loff_t *ppos)
9192 struct trace_array *tr = filp->private_data;
9193 struct trace_buffer *buffer = tr->array_buffer.buffer;
9197 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9202 mutex_lock(&trace_types_lock);
9203 if (!!val == tracer_tracing_is_on(tr)) {
9204 val = 0; /* do nothing */
9206 tracer_tracing_on(tr);
9207 if (tr->current_trace->start)
9208 tr->current_trace->start(tr);
9210 tracer_tracing_off(tr);
9211 if (tr->current_trace->stop)
9212 tr->current_trace->stop(tr);
9213 /* Wake up any waiters */
9214 ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
9216 mutex_unlock(&trace_types_lock);
9224 static const struct file_operations rb_simple_fops = {
9225 .open = tracing_open_generic_tr,
9226 .read = rb_simple_read,
9227 .write = rb_simple_write,
9228 .release = tracing_release_generic_tr,
9229 .llseek = default_llseek,
9233 buffer_percent_read(struct file *filp, char __user *ubuf,
9234 size_t cnt, loff_t *ppos)
9236 struct trace_array *tr = filp->private_data;
9240 r = tr->buffer_percent;
9241 r = sprintf(buf, "%d\n", r);
9243 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9247 buffer_percent_write(struct file *filp, const char __user *ubuf,
9248 size_t cnt, loff_t *ppos)
9250 struct trace_array *tr = filp->private_data;
9254 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9261 tr->buffer_percent = val;
9268 static const struct file_operations buffer_percent_fops = {
9269 .open = tracing_open_generic_tr,
9270 .read = buffer_percent_read,
9271 .write = buffer_percent_write,
9272 .release = tracing_release_generic_tr,
9273 .llseek = default_llseek,
9276 static struct dentry *trace_instance_dir;
9279 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9282 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9284 enum ring_buffer_flags rb_flags;
9286 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9290 buf->buffer = ring_buffer_alloc(size, rb_flags);
9294 buf->data = alloc_percpu(struct trace_array_cpu);
9296 ring_buffer_free(buf->buffer);
9301 /* Allocate the first page for all buffers */
9302 set_buffer_entries(&tr->array_buffer,
9303 ring_buffer_size(tr->array_buffer.buffer, 0));
9308 static void free_trace_buffer(struct array_buffer *buf)
9311 ring_buffer_free(buf->buffer);
9313 free_percpu(buf->data);
9318 static int allocate_trace_buffers(struct trace_array *tr, int size)
9322 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9326 #ifdef CONFIG_TRACER_MAX_TRACE
9327 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9328 allocate_snapshot ? size : 1);
9329 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9330 free_trace_buffer(&tr->array_buffer);
9333 tr->allocated_snapshot = allocate_snapshot;
9335 allocate_snapshot = false;
9341 static void free_trace_buffers(struct trace_array *tr)
9346 free_trace_buffer(&tr->array_buffer);
9348 #ifdef CONFIG_TRACER_MAX_TRACE
9349 free_trace_buffer(&tr->max_buffer);
9353 static void init_trace_flags_index(struct trace_array *tr)
9357 /* Used by the trace options files */
9358 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9359 tr->trace_flags_index[i] = i;
9362 static void __update_tracer_options(struct trace_array *tr)
9366 for (t = trace_types; t; t = t->next)
9367 add_tracer_options(tr, t);
9370 static void update_tracer_options(struct trace_array *tr)
9372 mutex_lock(&trace_types_lock);
9373 tracer_options_updated = true;
9374 __update_tracer_options(tr);
9375 mutex_unlock(&trace_types_lock);
9378 /* Must have trace_types_lock held */
9379 struct trace_array *trace_array_find(const char *instance)
9381 struct trace_array *tr, *found = NULL;
9383 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9384 if (tr->name && strcmp(tr->name, instance) == 0) {
9393 struct trace_array *trace_array_find_get(const char *instance)
9395 struct trace_array *tr;
9397 mutex_lock(&trace_types_lock);
9398 tr = trace_array_find(instance);
9401 mutex_unlock(&trace_types_lock);
9406 static int trace_array_create_dir(struct trace_array *tr)
9410 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9414 ret = event_trace_add_tracer(tr->dir, tr);
9416 tracefs_remove(tr->dir);
9420 init_tracer_tracefs(tr, tr->dir);
9421 __update_tracer_options(tr);
9426 static struct trace_array *trace_array_create(const char *name)
9428 struct trace_array *tr;
9432 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9434 return ERR_PTR(ret);
9436 tr->name = kstrdup(name, GFP_KERNEL);
9440 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9443 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9445 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9447 raw_spin_lock_init(&tr->start_lock);
9449 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9451 tr->current_trace = &nop_trace;
9453 INIT_LIST_HEAD(&tr->systems);
9454 INIT_LIST_HEAD(&tr->events);
9455 INIT_LIST_HEAD(&tr->hist_vars);
9456 INIT_LIST_HEAD(&tr->err_log);
9458 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9461 if (ftrace_allocate_ftrace_ops(tr) < 0)
9464 ftrace_init_trace_array(tr);
9466 init_trace_flags_index(tr);
9468 if (trace_instance_dir) {
9469 ret = trace_array_create_dir(tr);
9473 __trace_early_add_events(tr);
9475 list_add(&tr->list, &ftrace_trace_arrays);
9482 ftrace_free_ftrace_ops(tr);
9483 free_trace_buffers(tr);
9484 free_cpumask_var(tr->tracing_cpumask);
9488 return ERR_PTR(ret);
9491 static int instance_mkdir(const char *name)
9493 struct trace_array *tr;
9496 mutex_lock(&event_mutex);
9497 mutex_lock(&trace_types_lock);
9500 if (trace_array_find(name))
9503 tr = trace_array_create(name);
9505 ret = PTR_ERR_OR_ZERO(tr);
9508 mutex_unlock(&trace_types_lock);
9509 mutex_unlock(&event_mutex);
9514 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9515 * @name: The name of the trace array to be looked up/created.
9517 * Returns pointer to trace array with given name.
9518 * NULL, if it cannot be created.
9520 * NOTE: This function increments the reference counter associated with the
9521 * trace array returned. This makes sure it cannot be freed while in use.
9522 * Use trace_array_put() once the trace array is no longer needed.
9523 * If the trace_array is to be freed, trace_array_destroy() needs to
9524 * be called after the trace_array_put(), or simply let user space delete
9525 * it from the tracefs instances directory. But until the
9526 * trace_array_put() is called, user space can not delete it.
9529 struct trace_array *trace_array_get_by_name(const char *name)
9531 struct trace_array *tr;
9533 mutex_lock(&event_mutex);
9534 mutex_lock(&trace_types_lock);
9536 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9537 if (tr->name && strcmp(tr->name, name) == 0)
9541 tr = trace_array_create(name);
9549 mutex_unlock(&trace_types_lock);
9550 mutex_unlock(&event_mutex);
9553 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9555 static int __remove_instance(struct trace_array *tr)
9559 /* Reference counter for a newly created trace array = 1. */
9560 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9563 list_del(&tr->list);
9565 /* Disable all the flags that were enabled coming in */
9566 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9567 if ((1 << i) & ZEROED_TRACE_FLAGS)
9568 set_tracer_flag(tr, 1 << i, 0);
9571 tracing_set_nop(tr);
9572 clear_ftrace_function_probes(tr);
9573 event_trace_del_tracer(tr);
9574 ftrace_clear_pids(tr);
9575 ftrace_destroy_function_files(tr);
9576 tracefs_remove(tr->dir);
9577 free_percpu(tr->last_func_repeats);
9578 free_trace_buffers(tr);
9579 clear_tracing_err_log(tr);
9581 for (i = 0; i < tr->nr_topts; i++) {
9582 kfree(tr->topts[i].topts);
9586 free_cpumask_var(tr->tracing_cpumask);
9593 int trace_array_destroy(struct trace_array *this_tr)
9595 struct trace_array *tr;
9601 mutex_lock(&event_mutex);
9602 mutex_lock(&trace_types_lock);
9606 /* Making sure trace array exists before destroying it. */
9607 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9608 if (tr == this_tr) {
9609 ret = __remove_instance(tr);
9614 mutex_unlock(&trace_types_lock);
9615 mutex_unlock(&event_mutex);
9619 EXPORT_SYMBOL_GPL(trace_array_destroy);
9621 static int instance_rmdir(const char *name)
9623 struct trace_array *tr;
9626 mutex_lock(&event_mutex);
9627 mutex_lock(&trace_types_lock);
9630 tr = trace_array_find(name);
9632 ret = __remove_instance(tr);
9634 mutex_unlock(&trace_types_lock);
9635 mutex_unlock(&event_mutex);
9640 static __init void create_trace_instances(struct dentry *d_tracer)
9642 struct trace_array *tr;
9644 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9647 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9650 mutex_lock(&event_mutex);
9651 mutex_lock(&trace_types_lock);
9653 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9656 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9657 "Failed to create instance directory\n"))
9661 mutex_unlock(&trace_types_lock);
9662 mutex_unlock(&event_mutex);
9666 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9668 struct trace_event_file *file;
9671 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9672 tr, &show_traces_fops);
9674 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9675 tr, &set_tracer_fops);
9677 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9678 tr, &tracing_cpumask_fops);
9680 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9681 tr, &tracing_iter_fops);
9683 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9686 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9687 tr, &tracing_pipe_fops);
9689 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9690 tr, &tracing_entries_fops);
9692 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9693 tr, &tracing_total_entries_fops);
9695 trace_create_file("free_buffer", 0200, d_tracer,
9696 tr, &tracing_free_buffer_fops);
9698 trace_create_file("trace_marker", 0220, d_tracer,
9699 tr, &tracing_mark_fops);
9701 file = __find_event_file(tr, "ftrace", "print");
9702 if (file && file->dir)
9703 trace_create_file("trigger", TRACE_MODE_WRITE, file->dir,
9704 file, &event_trigger_fops);
9705 tr->trace_marker_file = file;
9707 trace_create_file("trace_marker_raw", 0220, d_tracer,
9708 tr, &tracing_mark_raw_fops);
9710 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9713 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9714 tr, &rb_simple_fops);
9716 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9717 &trace_time_stamp_mode_fops);
9719 tr->buffer_percent = 50;
9721 trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
9722 tr, &buffer_percent_fops);
9724 create_trace_options_dir(tr);
9726 #ifdef CONFIG_TRACER_MAX_TRACE
9727 trace_create_maxlat_file(tr, d_tracer);
9730 if (ftrace_create_function_files(tr, d_tracer))
9731 MEM_FAIL(1, "Could not allocate function filter files");
9733 #ifdef CONFIG_TRACER_SNAPSHOT
9734 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9735 tr, &snapshot_fops);
9738 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9739 tr, &tracing_err_log_fops);
9741 for_each_tracing_cpu(cpu)
9742 tracing_init_tracefs_percpu(tr, cpu);
9744 ftrace_init_tracefs(tr, d_tracer);
9747 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9749 struct vfsmount *mnt;
9750 struct file_system_type *type;
9753 * To maintain backward compatibility for tools that mount
9754 * debugfs to get to the tracing facility, tracefs is automatically
9755 * mounted to the debugfs/tracing directory.
9757 type = get_fs_type("tracefs");
9760 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9761 put_filesystem(type);
9770 * tracing_init_dentry - initialize top level trace array
9772 * This is called when creating files or directories in the tracing
9773 * directory. It is called via fs_initcall() by any of the boot up code
9774 * and expects to return the dentry of the top level tracing directory.
9776 int tracing_init_dentry(void)
9778 struct trace_array *tr = &global_trace;
9780 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9781 pr_warn("Tracing disabled due to lockdown\n");
9785 /* The top level trace array uses NULL as parent */
9789 if (WARN_ON(!tracefs_initialized()))
9793 * As there may still be users that expect the tracing
9794 * files to exist in debugfs/tracing, we must automount
9795 * the tracefs file system there, so older tools still
9796 * work with the newer kernel.
9798 tr->dir = debugfs_create_automount("tracing", NULL,
9799 trace_automount, NULL);
9804 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9805 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9807 static struct workqueue_struct *eval_map_wq __initdata;
9808 static struct work_struct eval_map_work __initdata;
9809 static struct work_struct tracerfs_init_work __initdata;
9811 static void __init eval_map_work_func(struct work_struct *work)
9815 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9816 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9819 static int __init trace_eval_init(void)
9821 INIT_WORK(&eval_map_work, eval_map_work_func);
9823 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9825 pr_err("Unable to allocate eval_map_wq\n");
9827 eval_map_work_func(&eval_map_work);
9831 queue_work(eval_map_wq, &eval_map_work);
9835 subsys_initcall(trace_eval_init);
9837 static int __init trace_eval_sync(void)
9839 /* Make sure the eval map updates are finished */
9841 destroy_workqueue(eval_map_wq);
9845 late_initcall_sync(trace_eval_sync);
9848 #ifdef CONFIG_MODULES
9849 static void trace_module_add_evals(struct module *mod)
9851 if (!mod->num_trace_evals)
9855 * Modules with bad taint do not have events created, do
9856 * not bother with enums either.
9858 if (trace_module_has_bad_taint(mod))
9861 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9864 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9865 static void trace_module_remove_evals(struct module *mod)
9867 union trace_eval_map_item *map;
9868 union trace_eval_map_item **last = &trace_eval_maps;
9870 if (!mod->num_trace_evals)
9873 mutex_lock(&trace_eval_mutex);
9875 map = trace_eval_maps;
9878 if (map->head.mod == mod)
9880 map = trace_eval_jmp_to_tail(map);
9881 last = &map->tail.next;
9882 map = map->tail.next;
9887 *last = trace_eval_jmp_to_tail(map)->tail.next;
9890 mutex_unlock(&trace_eval_mutex);
9893 static inline void trace_module_remove_evals(struct module *mod) { }
9894 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9896 static int trace_module_notify(struct notifier_block *self,
9897 unsigned long val, void *data)
9899 struct module *mod = data;
9902 case MODULE_STATE_COMING:
9903 trace_module_add_evals(mod);
9905 case MODULE_STATE_GOING:
9906 trace_module_remove_evals(mod);
9913 static struct notifier_block trace_module_nb = {
9914 .notifier_call = trace_module_notify,
9917 #endif /* CONFIG_MODULES */
9919 static __init void tracer_init_tracefs_work_func(struct work_struct *work)
9924 init_tracer_tracefs(&global_trace, NULL);
9925 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9927 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
9928 &global_trace, &tracing_thresh_fops);
9930 trace_create_file("README", TRACE_MODE_READ, NULL,
9931 NULL, &tracing_readme_fops);
9933 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
9934 NULL, &tracing_saved_cmdlines_fops);
9936 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
9937 NULL, &tracing_saved_cmdlines_size_fops);
9939 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
9940 NULL, &tracing_saved_tgids_fops);
9942 trace_create_eval_file(NULL);
9944 #ifdef CONFIG_MODULES
9945 register_module_notifier(&trace_module_nb);
9948 #ifdef CONFIG_DYNAMIC_FTRACE
9949 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
9950 NULL, &tracing_dyn_info_fops);
9953 create_trace_instances(NULL);
9955 update_tracer_options(&global_trace);
9958 static __init int tracer_init_tracefs(void)
9962 trace_access_lock_init();
9964 ret = tracing_init_dentry();
9969 INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
9970 queue_work(eval_map_wq, &tracerfs_init_work);
9972 tracer_init_tracefs_work_func(NULL);
9975 rv_init_interface();
9980 fs_initcall(tracer_init_tracefs);
9982 static int trace_die_panic_handler(struct notifier_block *self,
9983 unsigned long ev, void *unused);
9985 static struct notifier_block trace_panic_notifier = {
9986 .notifier_call = trace_die_panic_handler,
9987 .priority = INT_MAX - 1,
9990 static struct notifier_block trace_die_notifier = {
9991 .notifier_call = trace_die_panic_handler,
9992 .priority = INT_MAX - 1,
9996 * The idea is to execute the following die/panic callback early, in order
9997 * to avoid showing irrelevant information in the trace (like other panic
9998 * notifier functions); we are the 2nd to run, after hung_task/rcu_stall
9999 * warnings get disabled (to prevent potential log flooding).
10001 static int trace_die_panic_handler(struct notifier_block *self,
10002 unsigned long ev, void *unused)
10004 if (!ftrace_dump_on_oops)
10005 return NOTIFY_DONE;
10007 /* The die notifier requires DIE_OOPS to trigger */
10008 if (self == &trace_die_notifier && ev != DIE_OOPS)
10009 return NOTIFY_DONE;
10011 ftrace_dump(ftrace_dump_on_oops);
10013 return NOTIFY_DONE;
10017 * printk is set to max of 1024, we really don't need it that big.
10018 * Nothing should be printing 1000 characters anyway.
10020 #define TRACE_MAX_PRINT 1000
10023 * Define here KERN_TRACE so that we have one place to modify
10024 * it if we decide to change what log level the ftrace dump
10027 #define KERN_TRACE KERN_EMERG
10030 trace_printk_seq(struct trace_seq *s)
10032 /* Probably should print a warning here. */
10033 if (s->seq.len >= TRACE_MAX_PRINT)
10034 s->seq.len = TRACE_MAX_PRINT;
10037 * More paranoid code. Although the buffer size is set to
10038 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
10039 * an extra layer of protection.
10041 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
10042 s->seq.len = s->seq.size - 1;
10044 /* should be zero ended, but we are paranoid. */
10045 s->buffer[s->seq.len] = 0;
10047 printk(KERN_TRACE "%s", s->buffer);
10052 void trace_init_global_iter(struct trace_iterator *iter)
10054 iter->tr = &global_trace;
10055 iter->trace = iter->tr->current_trace;
10056 iter->cpu_file = RING_BUFFER_ALL_CPUS;
10057 iter->array_buffer = &global_trace.array_buffer;
10059 if (iter->trace && iter->trace->open)
10060 iter->trace->open(iter);
10062 /* Annotate start of buffers if we had overruns */
10063 if (ring_buffer_overruns(iter->array_buffer->buffer))
10064 iter->iter_flags |= TRACE_FILE_ANNOTATE;
10066 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
10067 if (trace_clocks[iter->tr->clock_id].in_ns)
10068 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
10070 /* Can not use kmalloc for iter.temp and iter.fmt */
10071 iter->temp = static_temp_buf;
10072 iter->temp_size = STATIC_TEMP_BUF_SIZE;
10073 iter->fmt = static_fmt_buf;
10074 iter->fmt_size = STATIC_FMT_BUF_SIZE;
10077 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
10079 /* use static because iter can be a bit big for the stack */
10080 static struct trace_iterator iter;
10081 static atomic_t dump_running;
10082 struct trace_array *tr = &global_trace;
10083 unsigned int old_userobj;
10084 unsigned long flags;
10087 /* Only allow one dump user at a time. */
10088 if (atomic_inc_return(&dump_running) != 1) {
10089 atomic_dec(&dump_running);
10094 * Always turn off tracing when we dump.
10095 * We don't need to show trace output of what happens
10096 * between multiple crashes.
10098 * If the user does a sysrq-z, then they can re-enable
10099 * tracing with echo 1 > tracing_on.
10103 local_irq_save(flags);
10105 /* Simulate the iterator */
10106 trace_init_global_iter(&iter);
10108 for_each_tracing_cpu(cpu) {
10109 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10112 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10114 /* don't look at user memory in panic mode */
10115 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10117 switch (oops_dump_mode) {
10119 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10122 iter.cpu_file = raw_smp_processor_id();
10127 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
10128 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10131 printk(KERN_TRACE "Dumping ftrace buffer:\n");
10133 /* Did function tracer already get disabled? */
10134 if (ftrace_is_dead()) {
10135 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
10136 printk("# MAY BE MISSING FUNCTION EVENTS\n");
10140 * We need to stop all tracing on all CPUS to read
10141 * the next buffer. This is a bit expensive, but is
10142 * not done often. We fill all what we can read,
10143 * and then release the locks again.
10146 while (!trace_empty(&iter)) {
10149 printk(KERN_TRACE "---------------------------------\n");
10153 trace_iterator_reset(&iter);
10154 iter.iter_flags |= TRACE_FILE_LAT_FMT;
10156 if (trace_find_next_entry_inc(&iter) != NULL) {
10159 ret = print_trace_line(&iter);
10160 if (ret != TRACE_TYPE_NO_CONSUME)
10161 trace_consume(&iter);
10163 touch_nmi_watchdog();
10165 trace_printk_seq(&iter.seq);
10169 printk(KERN_TRACE " (ftrace buffer empty)\n");
10171 printk(KERN_TRACE "---------------------------------\n");
10174 tr->trace_flags |= old_userobj;
10176 for_each_tracing_cpu(cpu) {
10177 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10179 atomic_dec(&dump_running);
10180 local_irq_restore(flags);
10182 EXPORT_SYMBOL_GPL(ftrace_dump);
10184 #define WRITE_BUFSIZE 4096
10186 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
10187 size_t count, loff_t *ppos,
10188 int (*createfn)(const char *))
10190 char *kbuf, *buf, *tmp;
10195 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
10199 while (done < count) {
10200 size = count - done;
10202 if (size >= WRITE_BUFSIZE)
10203 size = WRITE_BUFSIZE - 1;
10205 if (copy_from_user(kbuf, buffer + done, size)) {
10212 tmp = strchr(buf, '\n');
10215 size = tmp - buf + 1;
10217 size = strlen(buf);
10218 if (done + size < count) {
10221 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10222 pr_warn("Line length is too long: Should be less than %d\n",
10223 WRITE_BUFSIZE - 2);
10230 /* Remove comments */
10231 tmp = strchr(buf, '#');
10236 ret = createfn(buf);
10241 } while (done < count);
10251 #ifdef CONFIG_TRACER_MAX_TRACE
10252 __init static bool tr_needs_alloc_snapshot(const char *name)
10255 int len = strlen(name);
10258 if (!boot_snapshot_index)
10261 if (strncmp(name, boot_snapshot_info, len) == 0 &&
10262 boot_snapshot_info[len] == '\t')
10265 test = kmalloc(strlen(name) + 3, GFP_KERNEL);
10269 sprintf(test, "\t%s\t", name);
10270 ret = strstr(boot_snapshot_info, test) == NULL;
10275 __init static void do_allocate_snapshot(const char *name)
10277 if (!tr_needs_alloc_snapshot(name))
10281 * When allocate_snapshot is set, the next call to
10282 * allocate_trace_buffers() (called by trace_array_get_by_name())
10283 * will allocate the snapshot buffer. That will alse clear
10286 allocate_snapshot = true;
10289 static inline void do_allocate_snapshot(const char *name) { }
10292 __init static void enable_instances(void)
10294 struct trace_array *tr;
10299 /* A tab is always appended */
10300 boot_instance_info[boot_instance_index - 1] = '\0';
10301 str = boot_instance_info;
10303 while ((curr_str = strsep(&str, "\t"))) {
10305 tok = strsep(&curr_str, ",");
10307 if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
10308 do_allocate_snapshot(tok);
10310 tr = trace_array_get_by_name(tok);
10312 pr_warn("Failed to create instance buffer %s\n", curr_str);
10315 /* Allow user space to delete it */
10316 trace_array_put(tr);
10318 while ((tok = strsep(&curr_str, ","))) {
10319 early_enable_events(tr, tok, true);
10324 __init static int tracer_alloc_buffers(void)
10330 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10331 pr_warn("Tracing disabled due to lockdown\n");
10336 * Make sure we don't accidentally add more trace options
10337 * than we have bits for.
10339 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10341 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10344 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10345 goto out_free_buffer_mask;
10347 /* Only allocate trace_printk buffers if a trace_printk exists */
10348 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10349 /* Must be called before global_trace.buffer is allocated */
10350 trace_printk_init_buffers();
10352 /* To save memory, keep the ring buffer size to its minimum */
10353 if (ring_buffer_expanded)
10354 ring_buf_size = trace_buf_size;
10358 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10359 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10361 raw_spin_lock_init(&global_trace.start_lock);
10364 * The prepare callbacks allocates some memory for the ring buffer. We
10365 * don't free the buffer if the CPU goes down. If we were to free
10366 * the buffer, then the user would lose any trace that was in the
10367 * buffer. The memory will be removed once the "instance" is removed.
10369 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10370 "trace/RB:prepare", trace_rb_cpu_prepare,
10373 goto out_free_cpumask;
10374 /* Used for event triggers */
10376 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10378 goto out_rm_hp_state;
10380 if (trace_create_savedcmd() < 0)
10381 goto out_free_temp_buffer;
10383 /* TODO: make the number of buffers hot pluggable with CPUS */
10384 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10385 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10386 goto out_free_savedcmd;
10389 if (global_trace.buffer_disabled)
10392 if (trace_boot_clock) {
10393 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10395 pr_warn("Trace clock %s not defined, going back to default\n",
10400 * register_tracer() might reference current_trace, so it
10401 * needs to be set before we register anything. This is
10402 * just a bootstrap of current_trace anyway.
10404 global_trace.current_trace = &nop_trace;
10406 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10408 ftrace_init_global_array_ops(&global_trace);
10410 init_trace_flags_index(&global_trace);
10412 register_tracer(&nop_trace);
10414 /* Function tracing may start here (via kernel command line) */
10415 init_function_trace();
10417 /* All seems OK, enable tracing */
10418 tracing_disabled = 0;
10420 atomic_notifier_chain_register(&panic_notifier_list,
10421 &trace_panic_notifier);
10423 register_die_notifier(&trace_die_notifier);
10425 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10427 INIT_LIST_HEAD(&global_trace.systems);
10428 INIT_LIST_HEAD(&global_trace.events);
10429 INIT_LIST_HEAD(&global_trace.hist_vars);
10430 INIT_LIST_HEAD(&global_trace.err_log);
10431 list_add(&global_trace.list, &ftrace_trace_arrays);
10433 apply_trace_boot_options();
10435 register_snapshot_cmd();
10442 free_saved_cmdlines_buffer(savedcmd);
10443 out_free_temp_buffer:
10444 ring_buffer_free(temp_buffer);
10446 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10448 free_cpumask_var(global_trace.tracing_cpumask);
10449 out_free_buffer_mask:
10450 free_cpumask_var(tracing_buffer_mask);
10455 void __init ftrace_boot_snapshot(void)
10457 #ifdef CONFIG_TRACER_MAX_TRACE
10458 struct trace_array *tr;
10460 if (!snapshot_at_boot)
10463 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
10464 if (!tr->allocated_snapshot)
10467 tracing_snapshot_instance(tr);
10468 trace_array_puts(tr, "** Boot snapshot taken **\n");
10473 void __init early_trace_init(void)
10475 if (tracepoint_printk) {
10476 tracepoint_print_iter =
10477 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10478 if (MEM_FAIL(!tracepoint_print_iter,
10479 "Failed to allocate trace iterator\n"))
10480 tracepoint_printk = 0;
10482 static_key_enable(&tracepoint_printk_key.key);
10484 tracer_alloc_buffers();
10489 void __init trace_init(void)
10491 trace_event_init();
10493 if (boot_instance_index)
10494 enable_instances();
10497 __init static void clear_boot_tracer(void)
10500 * The default tracer at boot buffer is an init section.
10501 * This function is called in lateinit. If we did not
10502 * find the boot tracer, then clear it out, to prevent
10503 * later registration from accessing the buffer that is
10504 * about to be freed.
10506 if (!default_bootup_tracer)
10509 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10510 default_bootup_tracer);
10511 default_bootup_tracer = NULL;
10514 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10515 __init static void tracing_set_default_clock(void)
10517 /* sched_clock_stable() is determined in late_initcall */
10518 if (!trace_boot_clock && !sched_clock_stable()) {
10519 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10520 pr_warn("Can not set tracing clock due to lockdown\n");
10524 printk(KERN_WARNING
10525 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10526 "If you want to keep using the local clock, then add:\n"
10527 " \"trace_clock=local\"\n"
10528 "on the kernel command line\n");
10529 tracing_set_clock(&global_trace, "global");
10533 static inline void tracing_set_default_clock(void) { }
10536 __init static int late_trace_init(void)
10538 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10539 static_key_disable(&tracepoint_printk_key.key);
10540 tracepoint_printk = 0;
10543 tracing_set_default_clock();
10544 clear_boot_tracer();
10548 late_initcall_sync(late_trace_init);