1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/panic_notifier.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
52 #include <asm/setup.h> /* COMMAND_LINE_SIZE */
55 #include "trace_output.h"
58 * On boot up, the ring buffer is set to the minimum size, so that
59 * we do not waste memory on systems that are not using tracing.
61 bool ring_buffer_expanded;
63 #ifdef CONFIG_FTRACE_STARTUP_TEST
65 * We need to change this state when a selftest is running.
66 * A selftest will lurk into the ring-buffer to count the
67 * entries inserted during the selftest although some concurrent
68 * insertions into the ring-buffer such as trace_printk could occurred
69 * at the same time, giving false positive or negative results.
71 static bool __read_mostly tracing_selftest_running;
74 * If boot-time tracing including tracers/events via kernel cmdline
75 * is running, we do not want to run SELFTEST.
77 bool __read_mostly tracing_selftest_disabled;
79 void __init disable_tracing_selftest(const char *reason)
81 if (!tracing_selftest_disabled) {
82 tracing_selftest_disabled = true;
83 pr_info("Ftrace startup test is disabled due to %s\n", reason);
87 #define tracing_selftest_running 0
88 #define tracing_selftest_disabled 0
91 /* Pipe tracepoints to printk */
92 static struct trace_iterator *tracepoint_print_iter;
93 int tracepoint_printk;
94 static bool tracepoint_printk_stop_on_boot __initdata;
95 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
97 /* For tracers that don't implement custom flags */
98 static struct tracer_opt dummy_tracer_opt[] = {
103 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
109 * To prevent the comm cache from being overwritten when no
110 * tracing is active, only save the comm when a trace event
113 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
116 * Kill all tracing for good (never come back).
117 * It is initialized to 1 but will turn to zero if the initialization
118 * of the tracer is successful. But that is the only place that sets
121 static int tracing_disabled = 1;
123 cpumask_var_t __read_mostly tracing_buffer_mask;
126 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
128 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
129 * is set, then ftrace_dump is called. This will output the contents
130 * of the ftrace buffers to the console. This is very useful for
131 * capturing traces that lead to crashes and outputing it to a
134 * It is default off, but you can enable it with either specifying
135 * "ftrace_dump_on_oops" in the kernel command line, or setting
136 * /proc/sys/kernel/ftrace_dump_on_oops
137 * Set 1 if you want to dump buffers of all CPUs
138 * Set 2 if you want to dump the buffer of the CPU that triggered oops
141 enum ftrace_dump_mode ftrace_dump_on_oops;
143 /* When set, tracing will stop when a WARN*() is hit */
144 int __disable_trace_on_warning;
146 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
147 /* Map of enums to their values, for "eval_map" file */
148 struct trace_eval_map_head {
150 unsigned long length;
153 union trace_eval_map_item;
155 struct trace_eval_map_tail {
157 * "end" is first and points to NULL as it must be different
158 * than "mod" or "eval_string"
160 union trace_eval_map_item *next;
161 const char *end; /* points to NULL */
164 static DEFINE_MUTEX(trace_eval_mutex);
167 * The trace_eval_maps are saved in an array with two extra elements,
168 * one at the beginning, and one at the end. The beginning item contains
169 * the count of the saved maps (head.length), and the module they
170 * belong to if not built in (head.mod). The ending item contains a
171 * pointer to the next array of saved eval_map items.
173 union trace_eval_map_item {
174 struct trace_eval_map map;
175 struct trace_eval_map_head head;
176 struct trace_eval_map_tail tail;
179 static union trace_eval_map_item *trace_eval_maps;
180 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
182 int tracing_set_tracer(struct trace_array *tr, const char *buf);
183 static void ftrace_trace_userstack(struct trace_array *tr,
184 struct trace_buffer *buffer,
185 unsigned int trace_ctx);
187 #define MAX_TRACER_SIZE 100
188 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
189 static char *default_bootup_tracer;
191 static bool allocate_snapshot;
192 static bool snapshot_at_boot;
194 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata;
195 static int boot_instance_index;
197 static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
198 static int boot_snapshot_index;
200 static int __init set_cmdline_ftrace(char *str)
202 strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
203 default_bootup_tracer = bootup_tracer_buf;
204 /* We are using ftrace early, expand it */
205 ring_buffer_expanded = true;
208 __setup("ftrace=", set_cmdline_ftrace);
210 static int __init set_ftrace_dump_on_oops(char *str)
212 if (*str++ != '=' || !*str || !strcmp("1", str)) {
213 ftrace_dump_on_oops = DUMP_ALL;
217 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
218 ftrace_dump_on_oops = DUMP_ORIG;
224 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
226 static int __init stop_trace_on_warning(char *str)
228 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
229 __disable_trace_on_warning = 1;
232 __setup("traceoff_on_warning", stop_trace_on_warning);
234 static int __init boot_alloc_snapshot(char *str)
236 char *slot = boot_snapshot_info + boot_snapshot_index;
237 int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
242 if (strlen(str) >= left)
245 ret = snprintf(slot, left, "%s\t", str);
246 boot_snapshot_index += ret;
248 allocate_snapshot = true;
249 /* We also need the main ring buffer expanded */
250 ring_buffer_expanded = true;
254 __setup("alloc_snapshot", boot_alloc_snapshot);
257 static int __init boot_snapshot(char *str)
259 snapshot_at_boot = true;
260 boot_alloc_snapshot(str);
263 __setup("ftrace_boot_snapshot", boot_snapshot);
266 static int __init boot_instance(char *str)
268 char *slot = boot_instance_info + boot_instance_index;
269 int left = sizeof(boot_instance_info) - boot_instance_index;
272 if (strlen(str) >= left)
275 ret = snprintf(slot, left, "%s\t", str);
276 boot_instance_index += ret;
280 __setup("trace_instance=", boot_instance);
283 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
285 static int __init set_trace_boot_options(char *str)
287 strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
290 __setup("trace_options=", set_trace_boot_options);
292 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
293 static char *trace_boot_clock __initdata;
295 static int __init set_trace_boot_clock(char *str)
297 strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
298 trace_boot_clock = trace_boot_clock_buf;
301 __setup("trace_clock=", set_trace_boot_clock);
303 static int __init set_tracepoint_printk(char *str)
305 /* Ignore the "tp_printk_stop_on_boot" param */
309 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
310 tracepoint_printk = 1;
313 __setup("tp_printk", set_tracepoint_printk);
315 static int __init set_tracepoint_printk_stop(char *str)
317 tracepoint_printk_stop_on_boot = true;
320 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
322 unsigned long long ns2usecs(u64 nsec)
330 trace_process_export(struct trace_export *export,
331 struct ring_buffer_event *event, int flag)
333 struct trace_entry *entry;
334 unsigned int size = 0;
336 if (export->flags & flag) {
337 entry = ring_buffer_event_data(event);
338 size = ring_buffer_event_length(event);
339 export->write(export, entry, size);
343 static DEFINE_MUTEX(ftrace_export_lock);
345 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
347 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
348 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
349 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
351 static inline void ftrace_exports_enable(struct trace_export *export)
353 if (export->flags & TRACE_EXPORT_FUNCTION)
354 static_branch_inc(&trace_function_exports_enabled);
356 if (export->flags & TRACE_EXPORT_EVENT)
357 static_branch_inc(&trace_event_exports_enabled);
359 if (export->flags & TRACE_EXPORT_MARKER)
360 static_branch_inc(&trace_marker_exports_enabled);
363 static inline void ftrace_exports_disable(struct trace_export *export)
365 if (export->flags & TRACE_EXPORT_FUNCTION)
366 static_branch_dec(&trace_function_exports_enabled);
368 if (export->flags & TRACE_EXPORT_EVENT)
369 static_branch_dec(&trace_event_exports_enabled);
371 if (export->flags & TRACE_EXPORT_MARKER)
372 static_branch_dec(&trace_marker_exports_enabled);
375 static void ftrace_exports(struct ring_buffer_event *event, int flag)
377 struct trace_export *export;
379 preempt_disable_notrace();
381 export = rcu_dereference_raw_check(ftrace_exports_list);
383 trace_process_export(export, event, flag);
384 export = rcu_dereference_raw_check(export->next);
387 preempt_enable_notrace();
391 add_trace_export(struct trace_export **list, struct trace_export *export)
393 rcu_assign_pointer(export->next, *list);
395 * We are entering export into the list but another
396 * CPU might be walking that list. We need to make sure
397 * the export->next pointer is valid before another CPU sees
398 * the export pointer included into the list.
400 rcu_assign_pointer(*list, export);
404 rm_trace_export(struct trace_export **list, struct trace_export *export)
406 struct trace_export **p;
408 for (p = list; *p != NULL; p = &(*p)->next)
415 rcu_assign_pointer(*p, (*p)->next);
421 add_ftrace_export(struct trace_export **list, struct trace_export *export)
423 ftrace_exports_enable(export);
425 add_trace_export(list, export);
429 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
433 ret = rm_trace_export(list, export);
434 ftrace_exports_disable(export);
439 int register_ftrace_export(struct trace_export *export)
441 if (WARN_ON_ONCE(!export->write))
444 mutex_lock(&ftrace_export_lock);
446 add_ftrace_export(&ftrace_exports_list, export);
448 mutex_unlock(&ftrace_export_lock);
452 EXPORT_SYMBOL_GPL(register_ftrace_export);
454 int unregister_ftrace_export(struct trace_export *export)
458 mutex_lock(&ftrace_export_lock);
460 ret = rm_ftrace_export(&ftrace_exports_list, export);
462 mutex_unlock(&ftrace_export_lock);
466 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
468 /* trace_flags holds trace_options default values */
469 #define TRACE_DEFAULT_FLAGS \
470 (FUNCTION_DEFAULT_FLAGS | \
471 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
472 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
473 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
474 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
477 /* trace_options that are only supported by global_trace */
478 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
479 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
481 /* trace_flags that are default zero for instances */
482 #define ZEROED_TRACE_FLAGS \
483 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
486 * The global_trace is the descriptor that holds the top-level tracing
487 * buffers for the live tracing.
489 static struct trace_array global_trace = {
490 .trace_flags = TRACE_DEFAULT_FLAGS,
493 LIST_HEAD(ftrace_trace_arrays);
495 int trace_array_get(struct trace_array *this_tr)
497 struct trace_array *tr;
500 mutex_lock(&trace_types_lock);
501 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
508 mutex_unlock(&trace_types_lock);
513 static void __trace_array_put(struct trace_array *this_tr)
515 WARN_ON(!this_tr->ref);
520 * trace_array_put - Decrement the reference counter for this trace array.
521 * @this_tr : pointer to the trace array
523 * NOTE: Use this when we no longer need the trace array returned by
524 * trace_array_get_by_name(). This ensures the trace array can be later
528 void trace_array_put(struct trace_array *this_tr)
533 mutex_lock(&trace_types_lock);
534 __trace_array_put(this_tr);
535 mutex_unlock(&trace_types_lock);
537 EXPORT_SYMBOL_GPL(trace_array_put);
539 int tracing_check_open_get_tr(struct trace_array *tr)
543 ret = security_locked_down(LOCKDOWN_TRACEFS);
547 if (tracing_disabled)
550 if (tr && trace_array_get(tr) < 0)
556 int call_filter_check_discard(struct trace_event_call *call, void *rec,
557 struct trace_buffer *buffer,
558 struct ring_buffer_event *event)
560 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
561 !filter_match_preds(call->filter, rec)) {
562 __trace_event_discard_commit(buffer, event);
570 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
571 * @filtered_pids: The list of pids to check
572 * @search_pid: The PID to find in @filtered_pids
574 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
577 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
579 return trace_pid_list_is_set(filtered_pids, search_pid);
583 * trace_ignore_this_task - should a task be ignored for tracing
584 * @filtered_pids: The list of pids to check
585 * @filtered_no_pids: The list of pids not to be traced
586 * @task: The task that should be ignored if not filtered
588 * Checks if @task should be traced or not from @filtered_pids.
589 * Returns true if @task should *NOT* be traced.
590 * Returns false if @task should be traced.
593 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
594 struct trace_pid_list *filtered_no_pids,
595 struct task_struct *task)
598 * If filtered_no_pids is not empty, and the task's pid is listed
599 * in filtered_no_pids, then return true.
600 * Otherwise, if filtered_pids is empty, that means we can
601 * trace all tasks. If it has content, then only trace pids
602 * within filtered_pids.
605 return (filtered_pids &&
606 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
608 trace_find_filtered_pid(filtered_no_pids, task->pid));
612 * trace_filter_add_remove_task - Add or remove a task from a pid_list
613 * @pid_list: The list to modify
614 * @self: The current task for fork or NULL for exit
615 * @task: The task to add or remove
617 * If adding a task, if @self is defined, the task is only added if @self
618 * is also included in @pid_list. This happens on fork and tasks should
619 * only be added when the parent is listed. If @self is NULL, then the
620 * @task pid will be removed from the list, which would happen on exit
623 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
624 struct task_struct *self,
625 struct task_struct *task)
630 /* For forks, we only add if the forking task is listed */
632 if (!trace_find_filtered_pid(pid_list, self->pid))
636 /* "self" is set for forks, and NULL for exits */
638 trace_pid_list_set(pid_list, task->pid);
640 trace_pid_list_clear(pid_list, task->pid);
644 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
645 * @pid_list: The pid list to show
646 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
647 * @pos: The position of the file
649 * This is used by the seq_file "next" operation to iterate the pids
650 * listed in a trace_pid_list structure.
652 * Returns the pid+1 as we want to display pid of zero, but NULL would
653 * stop the iteration.
655 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
657 long pid = (unsigned long)v;
662 /* pid already is +1 of the actual previous bit */
663 if (trace_pid_list_next(pid_list, pid, &next) < 0)
668 /* Return pid + 1 to allow zero to be represented */
669 return (void *)(pid + 1);
673 * trace_pid_start - Used for seq_file to start reading pid lists
674 * @pid_list: The pid list to show
675 * @pos: The position of the file
677 * This is used by seq_file "start" operation to start the iteration
680 * Returns the pid+1 as we want to display pid of zero, but NULL would
681 * stop the iteration.
683 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
689 if (trace_pid_list_first(pid_list, &first) < 0)
694 /* Return pid + 1 so that zero can be the exit value */
695 for (pid++; pid && l < *pos;
696 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
702 * trace_pid_show - show the current pid in seq_file processing
703 * @m: The seq_file structure to write into
704 * @v: A void pointer of the pid (+1) value to display
706 * Can be directly used by seq_file operations to display the current
709 int trace_pid_show(struct seq_file *m, void *v)
711 unsigned long pid = (unsigned long)v - 1;
713 seq_printf(m, "%lu\n", pid);
717 /* 128 should be much more than enough */
718 #define PID_BUF_SIZE 127
720 int trace_pid_write(struct trace_pid_list *filtered_pids,
721 struct trace_pid_list **new_pid_list,
722 const char __user *ubuf, size_t cnt)
724 struct trace_pid_list *pid_list;
725 struct trace_parser parser;
733 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
737 * Always recreate a new array. The write is an all or nothing
738 * operation. Always create a new array when adding new pids by
739 * the user. If the operation fails, then the current list is
742 pid_list = trace_pid_list_alloc();
744 trace_parser_put(&parser);
749 /* copy the current bits to the new max */
750 ret = trace_pid_list_first(filtered_pids, &pid);
752 trace_pid_list_set(pid_list, pid);
753 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
763 ret = trace_get_user(&parser, ubuf, cnt, &pos);
771 if (!trace_parser_loaded(&parser))
775 if (kstrtoul(parser.buffer, 0, &val))
780 if (trace_pid_list_set(pid_list, pid) < 0) {
786 trace_parser_clear(&parser);
789 trace_parser_put(&parser);
792 trace_pid_list_free(pid_list);
797 /* Cleared the list of pids */
798 trace_pid_list_free(pid_list);
802 *new_pid_list = pid_list;
807 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
811 /* Early boot up does not have a buffer yet */
813 return trace_clock_local();
815 ts = ring_buffer_time_stamp(buf->buffer);
816 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
821 u64 ftrace_now(int cpu)
823 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
827 * tracing_is_enabled - Show if global_trace has been enabled
829 * Shows if the global trace has been enabled or not. It uses the
830 * mirror flag "buffer_disabled" to be used in fast paths such as for
831 * the irqsoff tracer. But it may be inaccurate due to races. If you
832 * need to know the accurate state, use tracing_is_on() which is a little
833 * slower, but accurate.
835 int tracing_is_enabled(void)
838 * For quick access (irqsoff uses this in fast path), just
839 * return the mirror variable of the state of the ring buffer.
840 * It's a little racy, but we don't really care.
843 return !global_trace.buffer_disabled;
847 * trace_buf_size is the size in bytes that is allocated
848 * for a buffer. Note, the number of bytes is always rounded
851 * This number is purposely set to a low number of 16384.
852 * If the dump on oops happens, it will be much appreciated
853 * to not have to wait for all that output. Anyway this can be
854 * boot time and run time configurable.
856 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
858 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
860 /* trace_types holds a link list of available tracers. */
861 static struct tracer *trace_types __read_mostly;
864 * trace_types_lock is used to protect the trace_types list.
866 DEFINE_MUTEX(trace_types_lock);
869 * serialize the access of the ring buffer
871 * ring buffer serializes readers, but it is low level protection.
872 * The validity of the events (which returns by ring_buffer_peek() ..etc)
873 * are not protected by ring buffer.
875 * The content of events may become garbage if we allow other process consumes
876 * these events concurrently:
877 * A) the page of the consumed events may become a normal page
878 * (not reader page) in ring buffer, and this page will be rewritten
879 * by events producer.
880 * B) The page of the consumed events may become a page for splice_read,
881 * and this page will be returned to system.
883 * These primitives allow multi process access to different cpu ring buffer
886 * These primitives don't distinguish read-only and read-consume access.
887 * Multi read-only access are also serialized.
891 static DECLARE_RWSEM(all_cpu_access_lock);
892 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
894 static inline void trace_access_lock(int cpu)
896 if (cpu == RING_BUFFER_ALL_CPUS) {
897 /* gain it for accessing the whole ring buffer. */
898 down_write(&all_cpu_access_lock);
900 /* gain it for accessing a cpu ring buffer. */
902 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
903 down_read(&all_cpu_access_lock);
905 /* Secondly block other access to this @cpu ring buffer. */
906 mutex_lock(&per_cpu(cpu_access_lock, cpu));
910 static inline void trace_access_unlock(int cpu)
912 if (cpu == RING_BUFFER_ALL_CPUS) {
913 up_write(&all_cpu_access_lock);
915 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
916 up_read(&all_cpu_access_lock);
920 static inline void trace_access_lock_init(void)
924 for_each_possible_cpu(cpu)
925 mutex_init(&per_cpu(cpu_access_lock, cpu));
930 static DEFINE_MUTEX(access_lock);
932 static inline void trace_access_lock(int cpu)
935 mutex_lock(&access_lock);
938 static inline void trace_access_unlock(int cpu)
941 mutex_unlock(&access_lock);
944 static inline void trace_access_lock_init(void)
950 #ifdef CONFIG_STACKTRACE
951 static void __ftrace_trace_stack(struct trace_buffer *buffer,
952 unsigned int trace_ctx,
953 int skip, struct pt_regs *regs);
954 static inline void ftrace_trace_stack(struct trace_array *tr,
955 struct trace_buffer *buffer,
956 unsigned int trace_ctx,
957 int skip, struct pt_regs *regs);
960 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
961 unsigned int trace_ctx,
962 int skip, struct pt_regs *regs)
965 static inline void ftrace_trace_stack(struct trace_array *tr,
966 struct trace_buffer *buffer,
967 unsigned long trace_ctx,
968 int skip, struct pt_regs *regs)
974 static __always_inline void
975 trace_event_setup(struct ring_buffer_event *event,
976 int type, unsigned int trace_ctx)
978 struct trace_entry *ent = ring_buffer_event_data(event);
980 tracing_generic_entry_update(ent, type, trace_ctx);
983 static __always_inline struct ring_buffer_event *
984 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
987 unsigned int trace_ctx)
989 struct ring_buffer_event *event;
991 event = ring_buffer_lock_reserve(buffer, len);
993 trace_event_setup(event, type, trace_ctx);
998 void tracer_tracing_on(struct trace_array *tr)
1000 if (tr->array_buffer.buffer)
1001 ring_buffer_record_on(tr->array_buffer.buffer);
1003 * This flag is looked at when buffers haven't been allocated
1004 * yet, or by some tracers (like irqsoff), that just want to
1005 * know if the ring buffer has been disabled, but it can handle
1006 * races of where it gets disabled but we still do a record.
1007 * As the check is in the fast path of the tracers, it is more
1008 * important to be fast than accurate.
1010 tr->buffer_disabled = 0;
1011 /* Make the flag seen by readers */
1016 * tracing_on - enable tracing buffers
1018 * This function enables tracing buffers that may have been
1019 * disabled with tracing_off.
1021 void tracing_on(void)
1023 tracer_tracing_on(&global_trace);
1025 EXPORT_SYMBOL_GPL(tracing_on);
1028 static __always_inline void
1029 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
1031 __this_cpu_write(trace_taskinfo_save, true);
1033 /* If this is the temp buffer, we need to commit fully */
1034 if (this_cpu_read(trace_buffered_event) == event) {
1035 /* Length is in event->array[0] */
1036 ring_buffer_write(buffer, event->array[0], &event->array[1]);
1037 /* Release the temp buffer */
1038 this_cpu_dec(trace_buffered_event_cnt);
1039 /* ring_buffer_unlock_commit() enables preemption */
1040 preempt_enable_notrace();
1042 ring_buffer_unlock_commit(buffer);
1045 int __trace_array_puts(struct trace_array *tr, unsigned long ip,
1046 const char *str, int size)
1048 struct ring_buffer_event *event;
1049 struct trace_buffer *buffer;
1050 struct print_entry *entry;
1051 unsigned int trace_ctx;
1054 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1057 if (unlikely(tracing_selftest_running && tr == &global_trace))
1060 if (unlikely(tracing_disabled))
1063 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1065 trace_ctx = tracing_gen_ctx();
1066 buffer = tr->array_buffer.buffer;
1067 ring_buffer_nest_start(buffer);
1068 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1075 entry = ring_buffer_event_data(event);
1078 memcpy(&entry->buf, str, size);
1080 /* Add a newline if necessary */
1081 if (entry->buf[size - 1] != '\n') {
1082 entry->buf[size] = '\n';
1083 entry->buf[size + 1] = '\0';
1085 entry->buf[size] = '\0';
1087 __buffer_unlock_commit(buffer, event);
1088 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1090 ring_buffer_nest_end(buffer);
1093 EXPORT_SYMBOL_GPL(__trace_array_puts);
1096 * __trace_puts - write a constant string into the trace buffer.
1097 * @ip: The address of the caller
1098 * @str: The constant string to write
1099 * @size: The size of the string.
1101 int __trace_puts(unsigned long ip, const char *str, int size)
1103 return __trace_array_puts(&global_trace, ip, str, size);
1105 EXPORT_SYMBOL_GPL(__trace_puts);
1108 * __trace_bputs - write the pointer to a constant string into trace buffer
1109 * @ip: The address of the caller
1110 * @str: The constant string to write to the buffer to
1112 int __trace_bputs(unsigned long ip, const char *str)
1114 struct ring_buffer_event *event;
1115 struct trace_buffer *buffer;
1116 struct bputs_entry *entry;
1117 unsigned int trace_ctx;
1118 int size = sizeof(struct bputs_entry);
1121 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1124 if (unlikely(tracing_selftest_running || tracing_disabled))
1127 trace_ctx = tracing_gen_ctx();
1128 buffer = global_trace.array_buffer.buffer;
1130 ring_buffer_nest_start(buffer);
1131 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1136 entry = ring_buffer_event_data(event);
1140 __buffer_unlock_commit(buffer, event);
1141 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1145 ring_buffer_nest_end(buffer);
1148 EXPORT_SYMBOL_GPL(__trace_bputs);
1150 #ifdef CONFIG_TRACER_SNAPSHOT
1151 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1154 struct tracer *tracer = tr->current_trace;
1155 unsigned long flags;
1158 trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1159 trace_array_puts(tr, "*** snapshot is being ignored ***\n");
1163 if (!tr->allocated_snapshot) {
1164 trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
1165 trace_array_puts(tr, "*** stopping trace here! ***\n");
1166 tracer_tracing_off(tr);
1170 /* Note, snapshot can not be used when the tracer uses it */
1171 if (tracer->use_max_tr) {
1172 trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
1173 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1177 local_irq_save(flags);
1178 update_max_tr(tr, current, smp_processor_id(), cond_data);
1179 local_irq_restore(flags);
1182 void tracing_snapshot_instance(struct trace_array *tr)
1184 tracing_snapshot_instance_cond(tr, NULL);
1188 * tracing_snapshot - take a snapshot of the current buffer.
1190 * This causes a swap between the snapshot buffer and the current live
1191 * tracing buffer. You can use this to take snapshots of the live
1192 * trace when some condition is triggered, but continue to trace.
1194 * Note, make sure to allocate the snapshot with either
1195 * a tracing_snapshot_alloc(), or by doing it manually
1196 * with: echo 1 > /sys/kernel/tracing/snapshot
1198 * If the snapshot buffer is not allocated, it will stop tracing.
1199 * Basically making a permanent snapshot.
1201 void tracing_snapshot(void)
1203 struct trace_array *tr = &global_trace;
1205 tracing_snapshot_instance(tr);
1207 EXPORT_SYMBOL_GPL(tracing_snapshot);
1210 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1211 * @tr: The tracing instance to snapshot
1212 * @cond_data: The data to be tested conditionally, and possibly saved
1214 * This is the same as tracing_snapshot() except that the snapshot is
1215 * conditional - the snapshot will only happen if the
1216 * cond_snapshot.update() implementation receiving the cond_data
1217 * returns true, which means that the trace array's cond_snapshot
1218 * update() operation used the cond_data to determine whether the
1219 * snapshot should be taken, and if it was, presumably saved it along
1220 * with the snapshot.
1222 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1224 tracing_snapshot_instance_cond(tr, cond_data);
1226 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1229 * tracing_cond_snapshot_data - get the user data associated with a snapshot
1230 * @tr: The tracing instance
1232 * When the user enables a conditional snapshot using
1233 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1234 * with the snapshot. This accessor is used to retrieve it.
1236 * Should not be called from cond_snapshot.update(), since it takes
1237 * the tr->max_lock lock, which the code calling
1238 * cond_snapshot.update() has already done.
1240 * Returns the cond_data associated with the trace array's snapshot.
1242 void *tracing_cond_snapshot_data(struct trace_array *tr)
1244 void *cond_data = NULL;
1246 local_irq_disable();
1247 arch_spin_lock(&tr->max_lock);
1249 if (tr->cond_snapshot)
1250 cond_data = tr->cond_snapshot->cond_data;
1252 arch_spin_unlock(&tr->max_lock);
1257 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1259 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1260 struct array_buffer *size_buf, int cpu_id);
1261 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1263 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1267 if (!tr->allocated_snapshot) {
1269 /* allocate spare buffer */
1270 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1271 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1275 tr->allocated_snapshot = true;
1281 static void free_snapshot(struct trace_array *tr)
1284 * We don't free the ring buffer. instead, resize it because
1285 * The max_tr ring buffer has some state (e.g. ring->clock) and
1286 * we want preserve it.
1288 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1289 set_buffer_entries(&tr->max_buffer, 1);
1290 tracing_reset_online_cpus(&tr->max_buffer);
1291 tr->allocated_snapshot = false;
1295 * tracing_alloc_snapshot - allocate snapshot buffer.
1297 * This only allocates the snapshot buffer if it isn't already
1298 * allocated - it doesn't also take a snapshot.
1300 * This is meant to be used in cases where the snapshot buffer needs
1301 * to be set up for events that can't sleep but need to be able to
1302 * trigger a snapshot.
1304 int tracing_alloc_snapshot(void)
1306 struct trace_array *tr = &global_trace;
1309 ret = tracing_alloc_snapshot_instance(tr);
1314 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1317 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1319 * This is similar to tracing_snapshot(), but it will allocate the
1320 * snapshot buffer if it isn't already allocated. Use this only
1321 * where it is safe to sleep, as the allocation may sleep.
1323 * This causes a swap between the snapshot buffer and the current live
1324 * tracing buffer. You can use this to take snapshots of the live
1325 * trace when some condition is triggered, but continue to trace.
1327 void tracing_snapshot_alloc(void)
1331 ret = tracing_alloc_snapshot();
1337 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1340 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1341 * @tr: The tracing instance
1342 * @cond_data: User data to associate with the snapshot
1343 * @update: Implementation of the cond_snapshot update function
1345 * Check whether the conditional snapshot for the given instance has
1346 * already been enabled, or if the current tracer is already using a
1347 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1348 * save the cond_data and update function inside.
1350 * Returns 0 if successful, error otherwise.
1352 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1353 cond_update_fn_t update)
1355 struct cond_snapshot *cond_snapshot;
1358 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1362 cond_snapshot->cond_data = cond_data;
1363 cond_snapshot->update = update;
1365 mutex_lock(&trace_types_lock);
1367 ret = tracing_alloc_snapshot_instance(tr);
1371 if (tr->current_trace->use_max_tr) {
1377 * The cond_snapshot can only change to NULL without the
1378 * trace_types_lock. We don't care if we race with it going
1379 * to NULL, but we want to make sure that it's not set to
1380 * something other than NULL when we get here, which we can
1381 * do safely with only holding the trace_types_lock and not
1382 * having to take the max_lock.
1384 if (tr->cond_snapshot) {
1389 local_irq_disable();
1390 arch_spin_lock(&tr->max_lock);
1391 tr->cond_snapshot = cond_snapshot;
1392 arch_spin_unlock(&tr->max_lock);
1395 mutex_unlock(&trace_types_lock);
1400 mutex_unlock(&trace_types_lock);
1401 kfree(cond_snapshot);
1404 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1407 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1408 * @tr: The tracing instance
1410 * Check whether the conditional snapshot for the given instance is
1411 * enabled; if so, free the cond_snapshot associated with it,
1412 * otherwise return -EINVAL.
1414 * Returns 0 if successful, error otherwise.
1416 int tracing_snapshot_cond_disable(struct trace_array *tr)
1420 local_irq_disable();
1421 arch_spin_lock(&tr->max_lock);
1423 if (!tr->cond_snapshot)
1426 kfree(tr->cond_snapshot);
1427 tr->cond_snapshot = NULL;
1430 arch_spin_unlock(&tr->max_lock);
1435 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1437 void tracing_snapshot(void)
1439 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1441 EXPORT_SYMBOL_GPL(tracing_snapshot);
1442 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1444 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1446 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1447 int tracing_alloc_snapshot(void)
1449 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1452 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1453 void tracing_snapshot_alloc(void)
1458 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1459 void *tracing_cond_snapshot_data(struct trace_array *tr)
1463 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1464 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1468 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1469 int tracing_snapshot_cond_disable(struct trace_array *tr)
1473 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1474 #define free_snapshot(tr) do { } while (0)
1475 #endif /* CONFIG_TRACER_SNAPSHOT */
1477 void tracer_tracing_off(struct trace_array *tr)
1479 if (tr->array_buffer.buffer)
1480 ring_buffer_record_off(tr->array_buffer.buffer);
1482 * This flag is looked at when buffers haven't been allocated
1483 * yet, or by some tracers (like irqsoff), that just want to
1484 * know if the ring buffer has been disabled, but it can handle
1485 * races of where it gets disabled but we still do a record.
1486 * As the check is in the fast path of the tracers, it is more
1487 * important to be fast than accurate.
1489 tr->buffer_disabled = 1;
1490 /* Make the flag seen by readers */
1495 * tracing_off - turn off tracing buffers
1497 * This function stops the tracing buffers from recording data.
1498 * It does not disable any overhead the tracers themselves may
1499 * be causing. This function simply causes all recording to
1500 * the ring buffers to fail.
1502 void tracing_off(void)
1504 tracer_tracing_off(&global_trace);
1506 EXPORT_SYMBOL_GPL(tracing_off);
1508 void disable_trace_on_warning(void)
1510 if (__disable_trace_on_warning) {
1511 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1512 "Disabling tracing due to warning\n");
1518 * tracer_tracing_is_on - show real state of ring buffer enabled
1519 * @tr : the trace array to know if ring buffer is enabled
1521 * Shows real state of the ring buffer if it is enabled or not.
1523 bool tracer_tracing_is_on(struct trace_array *tr)
1525 if (tr->array_buffer.buffer)
1526 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1527 return !tr->buffer_disabled;
1531 * tracing_is_on - show state of ring buffers enabled
1533 int tracing_is_on(void)
1535 return tracer_tracing_is_on(&global_trace);
1537 EXPORT_SYMBOL_GPL(tracing_is_on);
1539 static int __init set_buf_size(char *str)
1541 unsigned long buf_size;
1545 buf_size = memparse(str, &str);
1547 * nr_entries can not be zero and the startup
1548 * tests require some buffer space. Therefore
1549 * ensure we have at least 4096 bytes of buffer.
1551 trace_buf_size = max(4096UL, buf_size);
1554 __setup("trace_buf_size=", set_buf_size);
1556 static int __init set_tracing_thresh(char *str)
1558 unsigned long threshold;
1563 ret = kstrtoul(str, 0, &threshold);
1566 tracing_thresh = threshold * 1000;
1569 __setup("tracing_thresh=", set_tracing_thresh);
1571 unsigned long nsecs_to_usecs(unsigned long nsecs)
1573 return nsecs / 1000;
1577 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1578 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1579 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1580 * of strings in the order that the evals (enum) were defined.
1585 /* These must match the bit positions in trace_iterator_flags */
1586 static const char *trace_options[] = {
1594 int in_ns; /* is this clock in nanoseconds? */
1595 } trace_clocks[] = {
1596 { trace_clock_local, "local", 1 },
1597 { trace_clock_global, "global", 1 },
1598 { trace_clock_counter, "counter", 0 },
1599 { trace_clock_jiffies, "uptime", 0 },
1600 { trace_clock, "perf", 1 },
1601 { ktime_get_mono_fast_ns, "mono", 1 },
1602 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1603 { ktime_get_boot_fast_ns, "boot", 1 },
1604 { ktime_get_tai_fast_ns, "tai", 1 },
1608 bool trace_clock_in_ns(struct trace_array *tr)
1610 if (trace_clocks[tr->clock_id].in_ns)
1617 * trace_parser_get_init - gets the buffer for trace parser
1619 int trace_parser_get_init(struct trace_parser *parser, int size)
1621 memset(parser, 0, sizeof(*parser));
1623 parser->buffer = kmalloc(size, GFP_KERNEL);
1624 if (!parser->buffer)
1627 parser->size = size;
1632 * trace_parser_put - frees the buffer for trace parser
1634 void trace_parser_put(struct trace_parser *parser)
1636 kfree(parser->buffer);
1637 parser->buffer = NULL;
1641 * trace_get_user - reads the user input string separated by space
1642 * (matched by isspace(ch))
1644 * For each string found the 'struct trace_parser' is updated,
1645 * and the function returns.
1647 * Returns number of bytes read.
1649 * See kernel/trace/trace.h for 'struct trace_parser' details.
1651 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1652 size_t cnt, loff_t *ppos)
1659 trace_parser_clear(parser);
1661 ret = get_user(ch, ubuf++);
1669 * The parser is not finished with the last write,
1670 * continue reading the user input without skipping spaces.
1672 if (!parser->cont) {
1673 /* skip white space */
1674 while (cnt && isspace(ch)) {
1675 ret = get_user(ch, ubuf++);
1684 /* only spaces were written */
1685 if (isspace(ch) || !ch) {
1692 /* read the non-space input */
1693 while (cnt && !isspace(ch) && ch) {
1694 if (parser->idx < parser->size - 1)
1695 parser->buffer[parser->idx++] = ch;
1700 ret = get_user(ch, ubuf++);
1707 /* We either got finished input or we have to wait for another call. */
1708 if (isspace(ch) || !ch) {
1709 parser->buffer[parser->idx] = 0;
1710 parser->cont = false;
1711 } else if (parser->idx < parser->size - 1) {
1712 parser->cont = true;
1713 parser->buffer[parser->idx++] = ch;
1714 /* Make sure the parsed string always terminates with '\0'. */
1715 parser->buffer[parser->idx] = 0;
1728 /* TODO add a seq_buf_to_buffer() */
1729 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1733 if (trace_seq_used(s) <= s->seq.readpos)
1736 len = trace_seq_used(s) - s->seq.readpos;
1739 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1741 s->seq.readpos += cnt;
1745 unsigned long __read_mostly tracing_thresh;
1747 #ifdef CONFIG_TRACER_MAX_TRACE
1748 static const struct file_operations tracing_max_lat_fops;
1750 #ifdef LATENCY_FS_NOTIFY
1752 static struct workqueue_struct *fsnotify_wq;
1754 static void latency_fsnotify_workfn(struct work_struct *work)
1756 struct trace_array *tr = container_of(work, struct trace_array,
1758 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1761 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1763 struct trace_array *tr = container_of(iwork, struct trace_array,
1765 queue_work(fsnotify_wq, &tr->fsnotify_work);
1768 static void trace_create_maxlat_file(struct trace_array *tr,
1769 struct dentry *d_tracer)
1771 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1772 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1773 tr->d_max_latency = trace_create_file("tracing_max_latency",
1776 &tracing_max_lat_fops);
1779 __init static int latency_fsnotify_init(void)
1781 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1782 WQ_UNBOUND | WQ_HIGHPRI, 0);
1784 pr_err("Unable to allocate tr_max_lat_wq\n");
1790 late_initcall_sync(latency_fsnotify_init);
1792 void latency_fsnotify(struct trace_array *tr)
1797 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1798 * possible that we are called from __schedule() or do_idle(), which
1799 * could cause a deadlock.
1801 irq_work_queue(&tr->fsnotify_irqwork);
1804 #else /* !LATENCY_FS_NOTIFY */
1806 #define trace_create_maxlat_file(tr, d_tracer) \
1807 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1808 d_tracer, tr, &tracing_max_lat_fops)
1813 * Copy the new maximum trace into the separate maximum-trace
1814 * structure. (this way the maximum trace is permanently saved,
1815 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1818 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1820 struct array_buffer *trace_buf = &tr->array_buffer;
1821 struct array_buffer *max_buf = &tr->max_buffer;
1822 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1823 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1826 max_buf->time_start = data->preempt_timestamp;
1828 max_data->saved_latency = tr->max_latency;
1829 max_data->critical_start = data->critical_start;
1830 max_data->critical_end = data->critical_end;
1832 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1833 max_data->pid = tsk->pid;
1835 * If tsk == current, then use current_uid(), as that does not use
1836 * RCU. The irq tracer can be called out of RCU scope.
1839 max_data->uid = current_uid();
1841 max_data->uid = task_uid(tsk);
1843 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1844 max_data->policy = tsk->policy;
1845 max_data->rt_priority = tsk->rt_priority;
1847 /* record this tasks comm */
1848 tracing_record_cmdline(tsk);
1849 latency_fsnotify(tr);
1853 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1855 * @tsk: the task with the latency
1856 * @cpu: The cpu that initiated the trace.
1857 * @cond_data: User data associated with a conditional snapshot
1859 * Flip the buffers between the @tr and the max_tr and record information
1860 * about which task was the cause of this latency.
1863 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1869 WARN_ON_ONCE(!irqs_disabled());
1871 if (!tr->allocated_snapshot) {
1872 /* Only the nop tracer should hit this when disabling */
1873 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1877 arch_spin_lock(&tr->max_lock);
1879 /* Inherit the recordable setting from array_buffer */
1880 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1881 ring_buffer_record_on(tr->max_buffer.buffer);
1883 ring_buffer_record_off(tr->max_buffer.buffer);
1885 #ifdef CONFIG_TRACER_SNAPSHOT
1886 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1887 arch_spin_unlock(&tr->max_lock);
1891 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1893 __update_max_tr(tr, tsk, cpu);
1895 arch_spin_unlock(&tr->max_lock);
1899 * update_max_tr_single - only copy one trace over, and reset the rest
1901 * @tsk: task with the latency
1902 * @cpu: the cpu of the buffer to copy.
1904 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1907 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1914 WARN_ON_ONCE(!irqs_disabled());
1915 if (!tr->allocated_snapshot) {
1916 /* Only the nop tracer should hit this when disabling */
1917 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1921 arch_spin_lock(&tr->max_lock);
1923 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1925 if (ret == -EBUSY) {
1927 * We failed to swap the buffer due to a commit taking
1928 * place on this CPU. We fail to record, but we reset
1929 * the max trace buffer (no one writes directly to it)
1930 * and flag that it failed.
1931 * Another reason is resize is in progress.
1933 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1934 "Failed to swap buffers due to commit or resize in progress\n");
1937 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1939 __update_max_tr(tr, tsk, cpu);
1940 arch_spin_unlock(&tr->max_lock);
1943 #endif /* CONFIG_TRACER_MAX_TRACE */
1945 static int wait_on_pipe(struct trace_iterator *iter, int full)
1947 /* Iterators are static, they should be filled or empty */
1948 if (trace_buffer_iter(iter, iter->cpu_file))
1951 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1955 #ifdef CONFIG_FTRACE_STARTUP_TEST
1956 static bool selftests_can_run;
1958 struct trace_selftests {
1959 struct list_head list;
1960 struct tracer *type;
1963 static LIST_HEAD(postponed_selftests);
1965 static int save_selftest(struct tracer *type)
1967 struct trace_selftests *selftest;
1969 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1973 selftest->type = type;
1974 list_add(&selftest->list, &postponed_selftests);
1978 static int run_tracer_selftest(struct tracer *type)
1980 struct trace_array *tr = &global_trace;
1981 struct tracer *saved_tracer = tr->current_trace;
1984 if (!type->selftest || tracing_selftest_disabled)
1988 * If a tracer registers early in boot up (before scheduling is
1989 * initialized and such), then do not run its selftests yet.
1990 * Instead, run it a little later in the boot process.
1992 if (!selftests_can_run)
1993 return save_selftest(type);
1995 if (!tracing_is_on()) {
1996 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
2002 * Run a selftest on this tracer.
2003 * Here we reset the trace buffer, and set the current
2004 * tracer to be this tracer. The tracer can then run some
2005 * internal tracing to verify that everything is in order.
2006 * If we fail, we do not register this tracer.
2008 tracing_reset_online_cpus(&tr->array_buffer);
2010 tr->current_trace = type;
2012 #ifdef CONFIG_TRACER_MAX_TRACE
2013 if (type->use_max_tr) {
2014 /* If we expanded the buffers, make sure the max is expanded too */
2015 if (ring_buffer_expanded)
2016 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
2017 RING_BUFFER_ALL_CPUS);
2018 tr->allocated_snapshot = true;
2022 /* the test is responsible for initializing and enabling */
2023 pr_info("Testing tracer %s: ", type->name);
2024 ret = type->selftest(type, tr);
2025 /* the test is responsible for resetting too */
2026 tr->current_trace = saved_tracer;
2028 printk(KERN_CONT "FAILED!\n");
2029 /* Add the warning after printing 'FAILED' */
2033 /* Only reset on passing, to avoid touching corrupted buffers */
2034 tracing_reset_online_cpus(&tr->array_buffer);
2036 #ifdef CONFIG_TRACER_MAX_TRACE
2037 if (type->use_max_tr) {
2038 tr->allocated_snapshot = false;
2040 /* Shrink the max buffer again */
2041 if (ring_buffer_expanded)
2042 ring_buffer_resize(tr->max_buffer.buffer, 1,
2043 RING_BUFFER_ALL_CPUS);
2047 printk(KERN_CONT "PASSED\n");
2051 static int do_run_tracer_selftest(struct tracer *type)
2056 * Tests can take a long time, especially if they are run one after the
2057 * other, as does happen during bootup when all the tracers are
2058 * registered. This could cause the soft lockup watchdog to trigger.
2062 tracing_selftest_running = true;
2063 ret = run_tracer_selftest(type);
2064 tracing_selftest_running = false;
2069 static __init int init_trace_selftests(void)
2071 struct trace_selftests *p, *n;
2072 struct tracer *t, **last;
2075 selftests_can_run = true;
2077 mutex_lock(&trace_types_lock);
2079 if (list_empty(&postponed_selftests))
2082 pr_info("Running postponed tracer tests:\n");
2084 tracing_selftest_running = true;
2085 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2086 /* This loop can take minutes when sanitizers are enabled, so
2087 * lets make sure we allow RCU processing.
2090 ret = run_tracer_selftest(p->type);
2091 /* If the test fails, then warn and remove from available_tracers */
2093 WARN(1, "tracer: %s failed selftest, disabling\n",
2095 last = &trace_types;
2096 for (t = trace_types; t; t = t->next) {
2107 tracing_selftest_running = false;
2110 mutex_unlock(&trace_types_lock);
2114 core_initcall(init_trace_selftests);
2116 static inline int run_tracer_selftest(struct tracer *type)
2120 static inline int do_run_tracer_selftest(struct tracer *type)
2124 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2126 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2128 static void __init apply_trace_boot_options(void);
2131 * register_tracer - register a tracer with the ftrace system.
2132 * @type: the plugin for the tracer
2134 * Register a new plugin tracer.
2136 int __init register_tracer(struct tracer *type)
2142 pr_info("Tracer must have a name\n");
2146 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2147 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2151 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2152 pr_warn("Can not register tracer %s due to lockdown\n",
2157 mutex_lock(&trace_types_lock);
2159 for (t = trace_types; t; t = t->next) {
2160 if (strcmp(type->name, t->name) == 0) {
2162 pr_info("Tracer %s already registered\n",
2169 if (!type->set_flag)
2170 type->set_flag = &dummy_set_flag;
2172 /*allocate a dummy tracer_flags*/
2173 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2178 type->flags->val = 0;
2179 type->flags->opts = dummy_tracer_opt;
2181 if (!type->flags->opts)
2182 type->flags->opts = dummy_tracer_opt;
2184 /* store the tracer for __set_tracer_option */
2185 type->flags->trace = type;
2187 ret = do_run_tracer_selftest(type);
2191 type->next = trace_types;
2193 add_tracer_options(&global_trace, type);
2196 mutex_unlock(&trace_types_lock);
2198 if (ret || !default_bootup_tracer)
2201 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2204 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2205 /* Do we want this tracer to start on bootup? */
2206 tracing_set_tracer(&global_trace, type->name);
2207 default_bootup_tracer = NULL;
2209 apply_trace_boot_options();
2211 /* disable other selftests, since this will break it. */
2212 disable_tracing_selftest("running a tracer");
2218 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2220 struct trace_buffer *buffer = buf->buffer;
2225 ring_buffer_record_disable(buffer);
2227 /* Make sure all commits have finished */
2229 ring_buffer_reset_cpu(buffer, cpu);
2231 ring_buffer_record_enable(buffer);
2234 void tracing_reset_online_cpus(struct array_buffer *buf)
2236 struct trace_buffer *buffer = buf->buffer;
2241 ring_buffer_record_disable(buffer);
2243 /* Make sure all commits have finished */
2246 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2248 ring_buffer_reset_online_cpus(buffer);
2250 ring_buffer_record_enable(buffer);
2253 /* Must have trace_types_lock held */
2254 void tracing_reset_all_online_cpus_unlocked(void)
2256 struct trace_array *tr;
2258 lockdep_assert_held(&trace_types_lock);
2260 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2261 if (!tr->clear_trace)
2263 tr->clear_trace = false;
2264 tracing_reset_online_cpus(&tr->array_buffer);
2265 #ifdef CONFIG_TRACER_MAX_TRACE
2266 tracing_reset_online_cpus(&tr->max_buffer);
2271 void tracing_reset_all_online_cpus(void)
2273 mutex_lock(&trace_types_lock);
2274 tracing_reset_all_online_cpus_unlocked();
2275 mutex_unlock(&trace_types_lock);
2279 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2280 * is the tgid last observed corresponding to pid=i.
2282 static int *tgid_map;
2284 /* The maximum valid index into tgid_map. */
2285 static size_t tgid_map_max;
2287 #define SAVED_CMDLINES_DEFAULT 128
2288 #define NO_CMDLINE_MAP UINT_MAX
2290 * Preemption must be disabled before acquiring trace_cmdline_lock.
2291 * The various trace_arrays' max_lock must be acquired in a context
2292 * where interrupt is disabled.
2294 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2295 struct saved_cmdlines_buffer {
2296 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2297 unsigned *map_cmdline_to_pid;
2298 unsigned cmdline_num;
2300 char *saved_cmdlines;
2302 static struct saved_cmdlines_buffer *savedcmd;
2304 static inline char *get_saved_cmdlines(int idx)
2306 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2309 static inline void set_cmdline(int idx, const char *cmdline)
2311 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2314 static int allocate_cmdlines_buffer(unsigned int val,
2315 struct saved_cmdlines_buffer *s)
2317 s->map_cmdline_to_pid = kmalloc_array(val,
2318 sizeof(*s->map_cmdline_to_pid),
2320 if (!s->map_cmdline_to_pid)
2323 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2324 if (!s->saved_cmdlines) {
2325 kfree(s->map_cmdline_to_pid);
2330 s->cmdline_num = val;
2331 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2332 sizeof(s->map_pid_to_cmdline));
2333 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2334 val * sizeof(*s->map_cmdline_to_pid));
2339 static int trace_create_savedcmd(void)
2343 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2347 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2357 int is_tracing_stopped(void)
2359 return global_trace.stop_count;
2363 * tracing_start - quick start of the tracer
2365 * If tracing is enabled but was stopped by tracing_stop,
2366 * this will start the tracer back up.
2368 void tracing_start(void)
2370 struct trace_buffer *buffer;
2371 unsigned long flags;
2373 if (tracing_disabled)
2376 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2377 if (--global_trace.stop_count) {
2378 if (global_trace.stop_count < 0) {
2379 /* Someone screwed up their debugging */
2381 global_trace.stop_count = 0;
2386 /* Prevent the buffers from switching */
2387 arch_spin_lock(&global_trace.max_lock);
2389 buffer = global_trace.array_buffer.buffer;
2391 ring_buffer_record_enable(buffer);
2393 #ifdef CONFIG_TRACER_MAX_TRACE
2394 buffer = global_trace.max_buffer.buffer;
2396 ring_buffer_record_enable(buffer);
2399 arch_spin_unlock(&global_trace.max_lock);
2402 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2405 static void tracing_start_tr(struct trace_array *tr)
2407 struct trace_buffer *buffer;
2408 unsigned long flags;
2410 if (tracing_disabled)
2413 /* If global, we need to also start the max tracer */
2414 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2415 return tracing_start();
2417 raw_spin_lock_irqsave(&tr->start_lock, flags);
2419 if (--tr->stop_count) {
2420 if (tr->stop_count < 0) {
2421 /* Someone screwed up their debugging */
2428 buffer = tr->array_buffer.buffer;
2430 ring_buffer_record_enable(buffer);
2433 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2437 * tracing_stop - quick stop of the tracer
2439 * Light weight way to stop tracing. Use in conjunction with
2442 void tracing_stop(void)
2444 struct trace_buffer *buffer;
2445 unsigned long flags;
2447 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2448 if (global_trace.stop_count++)
2451 /* Prevent the buffers from switching */
2452 arch_spin_lock(&global_trace.max_lock);
2454 buffer = global_trace.array_buffer.buffer;
2456 ring_buffer_record_disable(buffer);
2458 #ifdef CONFIG_TRACER_MAX_TRACE
2459 buffer = global_trace.max_buffer.buffer;
2461 ring_buffer_record_disable(buffer);
2464 arch_spin_unlock(&global_trace.max_lock);
2467 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2470 static void tracing_stop_tr(struct trace_array *tr)
2472 struct trace_buffer *buffer;
2473 unsigned long flags;
2475 /* If global, we need to also stop the max tracer */
2476 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2477 return tracing_stop();
2479 raw_spin_lock_irqsave(&tr->start_lock, flags);
2480 if (tr->stop_count++)
2483 buffer = tr->array_buffer.buffer;
2485 ring_buffer_record_disable(buffer);
2488 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2491 static int trace_save_cmdline(struct task_struct *tsk)
2495 /* treat recording of idle task as a success */
2499 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2502 * It's not the end of the world if we don't get
2503 * the lock, but we also don't want to spin
2504 * nor do we want to disable interrupts,
2505 * so if we miss here, then better luck next time.
2507 * This is called within the scheduler and wake up, so interrupts
2508 * had better been disabled and run queue lock been held.
2510 lockdep_assert_preemption_disabled();
2511 if (!arch_spin_trylock(&trace_cmdline_lock))
2514 idx = savedcmd->map_pid_to_cmdline[tpid];
2515 if (idx == NO_CMDLINE_MAP) {
2516 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2518 savedcmd->map_pid_to_cmdline[tpid] = idx;
2519 savedcmd->cmdline_idx = idx;
2522 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2523 set_cmdline(idx, tsk->comm);
2525 arch_spin_unlock(&trace_cmdline_lock);
2530 static void __trace_find_cmdline(int pid, char comm[])
2536 strcpy(comm, "<idle>");
2540 if (WARN_ON_ONCE(pid < 0)) {
2541 strcpy(comm, "<XXX>");
2545 tpid = pid & (PID_MAX_DEFAULT - 1);
2546 map = savedcmd->map_pid_to_cmdline[tpid];
2547 if (map != NO_CMDLINE_MAP) {
2548 tpid = savedcmd->map_cmdline_to_pid[map];
2550 strscpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2554 strcpy(comm, "<...>");
2557 void trace_find_cmdline(int pid, char comm[])
2560 arch_spin_lock(&trace_cmdline_lock);
2562 __trace_find_cmdline(pid, comm);
2564 arch_spin_unlock(&trace_cmdline_lock);
2568 static int *trace_find_tgid_ptr(int pid)
2571 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2572 * if we observe a non-NULL tgid_map then we also observe the correct
2575 int *map = smp_load_acquire(&tgid_map);
2577 if (unlikely(!map || pid > tgid_map_max))
2583 int trace_find_tgid(int pid)
2585 int *ptr = trace_find_tgid_ptr(pid);
2587 return ptr ? *ptr : 0;
2590 static int trace_save_tgid(struct task_struct *tsk)
2594 /* treat recording of idle task as a success */
2598 ptr = trace_find_tgid_ptr(tsk->pid);
2606 static bool tracing_record_taskinfo_skip(int flags)
2608 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2610 if (!__this_cpu_read(trace_taskinfo_save))
2616 * tracing_record_taskinfo - record the task info of a task
2618 * @task: task to record
2619 * @flags: TRACE_RECORD_CMDLINE for recording comm
2620 * TRACE_RECORD_TGID for recording tgid
2622 void tracing_record_taskinfo(struct task_struct *task, int flags)
2626 if (tracing_record_taskinfo_skip(flags))
2630 * Record as much task information as possible. If some fail, continue
2631 * to try to record the others.
2633 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2634 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2636 /* If recording any information failed, retry again soon. */
2640 __this_cpu_write(trace_taskinfo_save, false);
2644 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2646 * @prev: previous task during sched_switch
2647 * @next: next task during sched_switch
2648 * @flags: TRACE_RECORD_CMDLINE for recording comm
2649 * TRACE_RECORD_TGID for recording tgid
2651 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2652 struct task_struct *next, int flags)
2656 if (tracing_record_taskinfo_skip(flags))
2660 * Record as much task information as possible. If some fail, continue
2661 * to try to record the others.
2663 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2664 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2665 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2666 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2668 /* If recording any information failed, retry again soon. */
2672 __this_cpu_write(trace_taskinfo_save, false);
2675 /* Helpers to record a specific task information */
2676 void tracing_record_cmdline(struct task_struct *task)
2678 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2681 void tracing_record_tgid(struct task_struct *task)
2683 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2687 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2688 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2689 * simplifies those functions and keeps them in sync.
2691 enum print_line_t trace_handle_return(struct trace_seq *s)
2693 return trace_seq_has_overflowed(s) ?
2694 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2696 EXPORT_SYMBOL_GPL(trace_handle_return);
2698 static unsigned short migration_disable_value(void)
2700 #if defined(CONFIG_SMP)
2701 return current->migration_disabled;
2707 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2709 unsigned int trace_flags = irqs_status;
2712 pc = preempt_count();
2715 trace_flags |= TRACE_FLAG_NMI;
2716 if (pc & HARDIRQ_MASK)
2717 trace_flags |= TRACE_FLAG_HARDIRQ;
2718 if (in_serving_softirq())
2719 trace_flags |= TRACE_FLAG_SOFTIRQ;
2720 if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2721 trace_flags |= TRACE_FLAG_BH_OFF;
2723 if (tif_need_resched())
2724 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2725 if (test_preempt_need_resched())
2726 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2727 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2728 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2731 struct ring_buffer_event *
2732 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2735 unsigned int trace_ctx)
2737 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2740 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2741 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2742 static int trace_buffered_event_ref;
2745 * trace_buffered_event_enable - enable buffering events
2747 * When events are being filtered, it is quicker to use a temporary
2748 * buffer to write the event data into if there's a likely chance
2749 * that it will not be committed. The discard of the ring buffer
2750 * is not as fast as committing, and is much slower than copying
2753 * When an event is to be filtered, allocate per cpu buffers to
2754 * write the event data into, and if the event is filtered and discarded
2755 * it is simply dropped, otherwise, the entire data is to be committed
2758 void trace_buffered_event_enable(void)
2760 struct ring_buffer_event *event;
2764 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2766 if (trace_buffered_event_ref++)
2769 for_each_tracing_cpu(cpu) {
2770 page = alloc_pages_node(cpu_to_node(cpu),
2771 GFP_KERNEL | __GFP_NORETRY, 0);
2772 /* This is just an optimization and can handle failures */
2774 pr_err("Failed to allocate event buffer\n");
2778 event = page_address(page);
2779 memset(event, 0, sizeof(*event));
2781 per_cpu(trace_buffered_event, cpu) = event;
2784 if (cpu == smp_processor_id() &&
2785 __this_cpu_read(trace_buffered_event) !=
2786 per_cpu(trace_buffered_event, cpu))
2792 static void enable_trace_buffered_event(void *data)
2794 /* Probably not needed, but do it anyway */
2796 this_cpu_dec(trace_buffered_event_cnt);
2799 static void disable_trace_buffered_event(void *data)
2801 this_cpu_inc(trace_buffered_event_cnt);
2805 * trace_buffered_event_disable - disable buffering events
2807 * When a filter is removed, it is faster to not use the buffered
2808 * events, and to commit directly into the ring buffer. Free up
2809 * the temp buffers when there are no more users. This requires
2810 * special synchronization with current events.
2812 void trace_buffered_event_disable(void)
2816 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2818 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2821 if (--trace_buffered_event_ref)
2825 /* For each CPU, set the buffer as used. */
2826 smp_call_function_many(tracing_buffer_mask,
2827 disable_trace_buffered_event, NULL, 1);
2830 /* Wait for all current users to finish */
2833 for_each_tracing_cpu(cpu) {
2834 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2835 per_cpu(trace_buffered_event, cpu) = NULL;
2838 * Make sure trace_buffered_event is NULL before clearing
2839 * trace_buffered_event_cnt.
2844 /* Do the work on each cpu */
2845 smp_call_function_many(tracing_buffer_mask,
2846 enable_trace_buffered_event, NULL, 1);
2850 static struct trace_buffer *temp_buffer;
2852 struct ring_buffer_event *
2853 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2854 struct trace_event_file *trace_file,
2855 int type, unsigned long len,
2856 unsigned int trace_ctx)
2858 struct ring_buffer_event *entry;
2859 struct trace_array *tr = trace_file->tr;
2862 *current_rb = tr->array_buffer.buffer;
2864 if (!tr->no_filter_buffering_ref &&
2865 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2866 preempt_disable_notrace();
2868 * Filtering is on, so try to use the per cpu buffer first.
2869 * This buffer will simulate a ring_buffer_event,
2870 * where the type_len is zero and the array[0] will
2871 * hold the full length.
2872 * (see include/linux/ring-buffer.h for details on
2873 * how the ring_buffer_event is structured).
2875 * Using a temp buffer during filtering and copying it
2876 * on a matched filter is quicker than writing directly
2877 * into the ring buffer and then discarding it when
2878 * it doesn't match. That is because the discard
2879 * requires several atomic operations to get right.
2880 * Copying on match and doing nothing on a failed match
2881 * is still quicker than no copy on match, but having
2882 * to discard out of the ring buffer on a failed match.
2884 if ((entry = __this_cpu_read(trace_buffered_event))) {
2885 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2887 val = this_cpu_inc_return(trace_buffered_event_cnt);
2890 * Preemption is disabled, but interrupts and NMIs
2891 * can still come in now. If that happens after
2892 * the above increment, then it will have to go
2893 * back to the old method of allocating the event
2894 * on the ring buffer, and if the filter fails, it
2895 * will have to call ring_buffer_discard_commit()
2898 * Need to also check the unlikely case that the
2899 * length is bigger than the temp buffer size.
2900 * If that happens, then the reserve is pretty much
2901 * guaranteed to fail, as the ring buffer currently
2902 * only allows events less than a page. But that may
2903 * change in the future, so let the ring buffer reserve
2904 * handle the failure in that case.
2906 if (val == 1 && likely(len <= max_len)) {
2907 trace_event_setup(entry, type, trace_ctx);
2908 entry->array[0] = len;
2909 /* Return with preemption disabled */
2912 this_cpu_dec(trace_buffered_event_cnt);
2914 /* __trace_buffer_lock_reserve() disables preemption */
2915 preempt_enable_notrace();
2918 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2921 * If tracing is off, but we have triggers enabled
2922 * we still need to look at the event data. Use the temp_buffer
2923 * to store the trace event for the trigger to use. It's recursive
2924 * safe and will not be recorded anywhere.
2926 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2927 *current_rb = temp_buffer;
2928 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2933 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2935 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2936 static DEFINE_MUTEX(tracepoint_printk_mutex);
2938 static void output_printk(struct trace_event_buffer *fbuffer)
2940 struct trace_event_call *event_call;
2941 struct trace_event_file *file;
2942 struct trace_event *event;
2943 unsigned long flags;
2944 struct trace_iterator *iter = tracepoint_print_iter;
2946 /* We should never get here if iter is NULL */
2947 if (WARN_ON_ONCE(!iter))
2950 event_call = fbuffer->trace_file->event_call;
2951 if (!event_call || !event_call->event.funcs ||
2952 !event_call->event.funcs->trace)
2955 file = fbuffer->trace_file;
2956 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2957 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2958 !filter_match_preds(file->filter, fbuffer->entry)))
2961 event = &fbuffer->trace_file->event_call->event;
2963 raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2964 trace_seq_init(&iter->seq);
2965 iter->ent = fbuffer->entry;
2966 event_call->event.funcs->trace(iter, 0, event);
2967 trace_seq_putc(&iter->seq, 0);
2968 printk("%s", iter->seq.buffer);
2970 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2973 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2974 void *buffer, size_t *lenp,
2977 int save_tracepoint_printk;
2980 mutex_lock(&tracepoint_printk_mutex);
2981 save_tracepoint_printk = tracepoint_printk;
2983 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2986 * This will force exiting early, as tracepoint_printk
2987 * is always zero when tracepoint_printk_iter is not allocated
2989 if (!tracepoint_print_iter)
2990 tracepoint_printk = 0;
2992 if (save_tracepoint_printk == tracepoint_printk)
2995 if (tracepoint_printk)
2996 static_key_enable(&tracepoint_printk_key.key);
2998 static_key_disable(&tracepoint_printk_key.key);
3001 mutex_unlock(&tracepoint_printk_mutex);
3006 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
3008 enum event_trigger_type tt = ETT_NONE;
3009 struct trace_event_file *file = fbuffer->trace_file;
3011 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
3012 fbuffer->entry, &tt))
3015 if (static_key_false(&tracepoint_printk_key.key))
3016 output_printk(fbuffer);
3018 if (static_branch_unlikely(&trace_event_exports_enabled))
3019 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
3021 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
3022 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
3026 event_triggers_post_call(file, tt);
3029 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
3034 * trace_buffer_unlock_commit_regs()
3035 * trace_event_buffer_commit()
3036 * trace_event_raw_event_xxx()
3038 # define STACK_SKIP 3
3040 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
3041 struct trace_buffer *buffer,
3042 struct ring_buffer_event *event,
3043 unsigned int trace_ctx,
3044 struct pt_regs *regs)
3046 __buffer_unlock_commit(buffer, event);
3049 * If regs is not set, then skip the necessary functions.
3050 * Note, we can still get here via blktrace, wakeup tracer
3051 * and mmiotrace, but that's ok if they lose a function or
3052 * two. They are not that meaningful.
3054 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
3055 ftrace_trace_userstack(tr, buffer, trace_ctx);
3059 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
3062 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
3063 struct ring_buffer_event *event)
3065 __buffer_unlock_commit(buffer, event);
3069 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
3070 parent_ip, unsigned int trace_ctx)
3072 struct trace_event_call *call = &event_function;
3073 struct trace_buffer *buffer = tr->array_buffer.buffer;
3074 struct ring_buffer_event *event;
3075 struct ftrace_entry *entry;
3077 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
3081 entry = ring_buffer_event_data(event);
3083 entry->parent_ip = parent_ip;
3085 if (!call_filter_check_discard(call, entry, buffer, event)) {
3086 if (static_branch_unlikely(&trace_function_exports_enabled))
3087 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
3088 __buffer_unlock_commit(buffer, event);
3092 #ifdef CONFIG_STACKTRACE
3094 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
3095 #define FTRACE_KSTACK_NESTING 4
3097 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
3099 struct ftrace_stack {
3100 unsigned long calls[FTRACE_KSTACK_ENTRIES];
3104 struct ftrace_stacks {
3105 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
3108 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
3109 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
3111 static void __ftrace_trace_stack(struct trace_buffer *buffer,
3112 unsigned int trace_ctx,
3113 int skip, struct pt_regs *regs)
3115 struct trace_event_call *call = &event_kernel_stack;
3116 struct ring_buffer_event *event;
3117 unsigned int size, nr_entries;
3118 struct ftrace_stack *fstack;
3119 struct stack_entry *entry;
3123 * Add one, for this function and the call to save_stack_trace()
3124 * If regs is set, then these functions will not be in the way.
3126 #ifndef CONFIG_UNWINDER_ORC
3131 preempt_disable_notrace();
3133 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3135 /* This should never happen. If it does, yell once and skip */
3136 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3140 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3141 * interrupt will either see the value pre increment or post
3142 * increment. If the interrupt happens pre increment it will have
3143 * restored the counter when it returns. We just need a barrier to
3144 * keep gcc from moving things around.
3148 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3149 size = ARRAY_SIZE(fstack->calls);
3152 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3155 nr_entries = stack_trace_save(fstack->calls, size, skip);
3158 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3159 struct_size(entry, caller, nr_entries),
3163 entry = ring_buffer_event_data(event);
3165 entry->size = nr_entries;
3166 memcpy(&entry->caller, fstack->calls,
3167 flex_array_size(entry, caller, nr_entries));
3169 if (!call_filter_check_discard(call, entry, buffer, event))
3170 __buffer_unlock_commit(buffer, event);
3173 /* Again, don't let gcc optimize things here */
3175 __this_cpu_dec(ftrace_stack_reserve);
3176 preempt_enable_notrace();
3180 static inline void ftrace_trace_stack(struct trace_array *tr,
3181 struct trace_buffer *buffer,
3182 unsigned int trace_ctx,
3183 int skip, struct pt_regs *regs)
3185 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3188 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3191 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3194 struct trace_buffer *buffer = tr->array_buffer.buffer;
3196 if (rcu_is_watching()) {
3197 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3201 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY)))
3205 * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
3206 * but if the above rcu_is_watching() failed, then the NMI
3207 * triggered someplace critical, and ct_irq_enter() should
3208 * not be called from NMI.
3210 if (unlikely(in_nmi()))
3213 ct_irq_enter_irqson();
3214 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3215 ct_irq_exit_irqson();
3219 * trace_dump_stack - record a stack back trace in the trace buffer
3220 * @skip: Number of functions to skip (helper handlers)
3222 void trace_dump_stack(int skip)
3224 if (tracing_disabled || tracing_selftest_running)
3227 #ifndef CONFIG_UNWINDER_ORC
3228 /* Skip 1 to skip this function. */
3231 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3232 tracing_gen_ctx(), skip, NULL);
3234 EXPORT_SYMBOL_GPL(trace_dump_stack);
3236 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3237 static DEFINE_PER_CPU(int, user_stack_count);
3240 ftrace_trace_userstack(struct trace_array *tr,
3241 struct trace_buffer *buffer, unsigned int trace_ctx)
3243 struct trace_event_call *call = &event_user_stack;
3244 struct ring_buffer_event *event;
3245 struct userstack_entry *entry;
3247 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3251 * NMIs can not handle page faults, even with fix ups.
3252 * The save user stack can (and often does) fault.
3254 if (unlikely(in_nmi()))
3258 * prevent recursion, since the user stack tracing may
3259 * trigger other kernel events.
3262 if (__this_cpu_read(user_stack_count))
3265 __this_cpu_inc(user_stack_count);
3267 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3268 sizeof(*entry), trace_ctx);
3270 goto out_drop_count;
3271 entry = ring_buffer_event_data(event);
3273 entry->tgid = current->tgid;
3274 memset(&entry->caller, 0, sizeof(entry->caller));
3276 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3277 if (!call_filter_check_discard(call, entry, buffer, event))
3278 __buffer_unlock_commit(buffer, event);
3281 __this_cpu_dec(user_stack_count);
3285 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3286 static void ftrace_trace_userstack(struct trace_array *tr,
3287 struct trace_buffer *buffer,
3288 unsigned int trace_ctx)
3291 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3293 #endif /* CONFIG_STACKTRACE */
3296 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3297 unsigned long long delta)
3299 entry->bottom_delta_ts = delta & U32_MAX;
3300 entry->top_delta_ts = (delta >> 32);
3303 void trace_last_func_repeats(struct trace_array *tr,
3304 struct trace_func_repeats *last_info,
3305 unsigned int trace_ctx)
3307 struct trace_buffer *buffer = tr->array_buffer.buffer;
3308 struct func_repeats_entry *entry;
3309 struct ring_buffer_event *event;
3312 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3313 sizeof(*entry), trace_ctx);
3317 delta = ring_buffer_event_time_stamp(buffer, event) -
3318 last_info->ts_last_call;
3320 entry = ring_buffer_event_data(event);
3321 entry->ip = last_info->ip;
3322 entry->parent_ip = last_info->parent_ip;
3323 entry->count = last_info->count;
3324 func_repeats_set_delta_ts(entry, delta);
3326 __buffer_unlock_commit(buffer, event);
3329 /* created for use with alloc_percpu */
3330 struct trace_buffer_struct {
3332 char buffer[4][TRACE_BUF_SIZE];
3335 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3338 * This allows for lockless recording. If we're nested too deeply, then
3339 * this returns NULL.
3341 static char *get_trace_buf(void)
3343 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3345 if (!trace_percpu_buffer || buffer->nesting >= 4)
3350 /* Interrupts must see nesting incremented before we use the buffer */
3352 return &buffer->buffer[buffer->nesting - 1][0];
3355 static void put_trace_buf(void)
3357 /* Don't let the decrement of nesting leak before this */
3359 this_cpu_dec(trace_percpu_buffer->nesting);
3362 static int alloc_percpu_trace_buffer(void)
3364 struct trace_buffer_struct __percpu *buffers;
3366 if (trace_percpu_buffer)
3369 buffers = alloc_percpu(struct trace_buffer_struct);
3370 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3373 trace_percpu_buffer = buffers;
3377 static int buffers_allocated;
3379 void trace_printk_init_buffers(void)
3381 if (buffers_allocated)
3384 if (alloc_percpu_trace_buffer())
3387 /* trace_printk() is for debug use only. Don't use it in production. */
3390 pr_warn("**********************************************************\n");
3391 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3393 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3395 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3396 pr_warn("** unsafe for production use. **\n");
3398 pr_warn("** If you see this message and you are not debugging **\n");
3399 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3401 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3402 pr_warn("**********************************************************\n");
3404 /* Expand the buffers to set size */
3405 tracing_update_buffers();
3407 buffers_allocated = 1;
3410 * trace_printk_init_buffers() can be called by modules.
3411 * If that happens, then we need to start cmdline recording
3412 * directly here. If the global_trace.buffer is already
3413 * allocated here, then this was called by module code.
3415 if (global_trace.array_buffer.buffer)
3416 tracing_start_cmdline_record();
3418 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3420 void trace_printk_start_comm(void)
3422 /* Start tracing comms if trace printk is set */
3423 if (!buffers_allocated)
3425 tracing_start_cmdline_record();
3428 static void trace_printk_start_stop_comm(int enabled)
3430 if (!buffers_allocated)
3434 tracing_start_cmdline_record();
3436 tracing_stop_cmdline_record();
3440 * trace_vbprintk - write binary msg to tracing buffer
3441 * @ip: The address of the caller
3442 * @fmt: The string format to write to the buffer
3443 * @args: Arguments for @fmt
3445 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3447 struct trace_event_call *call = &event_bprint;
3448 struct ring_buffer_event *event;
3449 struct trace_buffer *buffer;
3450 struct trace_array *tr = &global_trace;
3451 struct bprint_entry *entry;
3452 unsigned int trace_ctx;
3456 if (unlikely(tracing_selftest_running || tracing_disabled))
3459 /* Don't pollute graph traces with trace_vprintk internals */
3460 pause_graph_tracing();
3462 trace_ctx = tracing_gen_ctx();
3463 preempt_disable_notrace();
3465 tbuffer = get_trace_buf();
3471 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3473 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3476 size = sizeof(*entry) + sizeof(u32) * len;
3477 buffer = tr->array_buffer.buffer;
3478 ring_buffer_nest_start(buffer);
3479 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3483 entry = ring_buffer_event_data(event);
3487 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3488 if (!call_filter_check_discard(call, entry, buffer, event)) {
3489 __buffer_unlock_commit(buffer, event);
3490 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3494 ring_buffer_nest_end(buffer);
3499 preempt_enable_notrace();
3500 unpause_graph_tracing();
3504 EXPORT_SYMBOL_GPL(trace_vbprintk);
3508 __trace_array_vprintk(struct trace_buffer *buffer,
3509 unsigned long ip, const char *fmt, va_list args)
3511 struct trace_event_call *call = &event_print;
3512 struct ring_buffer_event *event;
3514 struct print_entry *entry;
3515 unsigned int trace_ctx;
3518 if (tracing_disabled)
3521 /* Don't pollute graph traces with trace_vprintk internals */
3522 pause_graph_tracing();
3524 trace_ctx = tracing_gen_ctx();
3525 preempt_disable_notrace();
3528 tbuffer = get_trace_buf();
3534 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3536 size = sizeof(*entry) + len + 1;
3537 ring_buffer_nest_start(buffer);
3538 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3542 entry = ring_buffer_event_data(event);
3545 memcpy(&entry->buf, tbuffer, len + 1);
3546 if (!call_filter_check_discard(call, entry, buffer, event)) {
3547 __buffer_unlock_commit(buffer, event);
3548 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3552 ring_buffer_nest_end(buffer);
3556 preempt_enable_notrace();
3557 unpause_graph_tracing();
3563 int trace_array_vprintk(struct trace_array *tr,
3564 unsigned long ip, const char *fmt, va_list args)
3566 if (tracing_selftest_running && tr == &global_trace)
3569 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3573 * trace_array_printk - Print a message to a specific instance
3574 * @tr: The instance trace_array descriptor
3575 * @ip: The instruction pointer that this is called from.
3576 * @fmt: The format to print (printf format)
3578 * If a subsystem sets up its own instance, they have the right to
3579 * printk strings into their tracing instance buffer using this
3580 * function. Note, this function will not write into the top level
3581 * buffer (use trace_printk() for that), as writing into the top level
3582 * buffer should only have events that can be individually disabled.
3583 * trace_printk() is only used for debugging a kernel, and should not
3584 * be ever incorporated in normal use.
3586 * trace_array_printk() can be used, as it will not add noise to the
3587 * top level tracing buffer.
3589 * Note, trace_array_init_printk() must be called on @tr before this
3593 int trace_array_printk(struct trace_array *tr,
3594 unsigned long ip, const char *fmt, ...)
3602 /* This is only allowed for created instances */
3603 if (tr == &global_trace)
3606 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3610 ret = trace_array_vprintk(tr, ip, fmt, ap);
3614 EXPORT_SYMBOL_GPL(trace_array_printk);
3617 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3618 * @tr: The trace array to initialize the buffers for
3620 * As trace_array_printk() only writes into instances, they are OK to
3621 * have in the kernel (unlike trace_printk()). This needs to be called
3622 * before trace_array_printk() can be used on a trace_array.
3624 int trace_array_init_printk(struct trace_array *tr)
3629 /* This is only allowed for created instances */
3630 if (tr == &global_trace)
3633 return alloc_percpu_trace_buffer();
3635 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3638 int trace_array_printk_buf(struct trace_buffer *buffer,
3639 unsigned long ip, const char *fmt, ...)
3644 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3648 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3654 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3656 return trace_array_vprintk(&global_trace, ip, fmt, args);
3658 EXPORT_SYMBOL_GPL(trace_vprintk);
3660 static void trace_iterator_increment(struct trace_iterator *iter)
3662 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3666 ring_buffer_iter_advance(buf_iter);
3669 static struct trace_entry *
3670 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3671 unsigned long *lost_events)
3673 struct ring_buffer_event *event;
3674 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3677 event = ring_buffer_iter_peek(buf_iter, ts);
3679 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3680 (unsigned long)-1 : 0;
3682 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3687 iter->ent_size = ring_buffer_event_length(event);
3688 return ring_buffer_event_data(event);
3694 static struct trace_entry *
3695 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3696 unsigned long *missing_events, u64 *ent_ts)
3698 struct trace_buffer *buffer = iter->array_buffer->buffer;
3699 struct trace_entry *ent, *next = NULL;
3700 unsigned long lost_events = 0, next_lost = 0;
3701 int cpu_file = iter->cpu_file;
3702 u64 next_ts = 0, ts;
3708 * If we are in a per_cpu trace file, don't bother by iterating over
3709 * all cpu and peek directly.
3711 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3712 if (ring_buffer_empty_cpu(buffer, cpu_file))
3714 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3716 *ent_cpu = cpu_file;
3721 for_each_tracing_cpu(cpu) {
3723 if (ring_buffer_empty_cpu(buffer, cpu))
3726 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3729 * Pick the entry with the smallest timestamp:
3731 if (ent && (!next || ts < next_ts)) {
3735 next_lost = lost_events;
3736 next_size = iter->ent_size;
3740 iter->ent_size = next_size;
3743 *ent_cpu = next_cpu;
3749 *missing_events = next_lost;
3754 #define STATIC_FMT_BUF_SIZE 128
3755 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3757 char *trace_iter_expand_format(struct trace_iterator *iter)
3762 * iter->tr is NULL when used with tp_printk, which makes
3763 * this get called where it is not safe to call krealloc().
3765 if (!iter->tr || iter->fmt == static_fmt_buf)
3768 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3771 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3778 /* Returns true if the string is safe to dereference from an event */
3779 static bool trace_safe_str(struct trace_iterator *iter, const char *str,
3782 unsigned long addr = (unsigned long)str;
3783 struct trace_event *trace_event;
3784 struct trace_event_call *event;
3786 /* Ignore strings with no length */
3790 /* OK if part of the event data */
3791 if ((addr >= (unsigned long)iter->ent) &&
3792 (addr < (unsigned long)iter->ent + iter->ent_size))
3795 /* OK if part of the temp seq buffer */
3796 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3797 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3800 /* Core rodata can not be freed */
3801 if (is_kernel_rodata(addr))
3804 if (trace_is_tracepoint_string(str))
3808 * Now this could be a module event, referencing core module
3809 * data, which is OK.
3814 trace_event = ftrace_find_event(iter->ent->type);
3818 event = container_of(trace_event, struct trace_event_call, event);
3819 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3822 /* Would rather have rodata, but this will suffice */
3823 if (within_module_core(addr, event->module))
3829 static const char *show_buffer(struct trace_seq *s)
3831 struct seq_buf *seq = &s->seq;
3833 seq_buf_terminate(seq);
3838 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3840 static int test_can_verify_check(const char *fmt, ...)
3847 * The verifier is dependent on vsnprintf() modifies the va_list
3848 * passed to it, where it is sent as a reference. Some architectures
3849 * (like x86_32) passes it by value, which means that vsnprintf()
3850 * does not modify the va_list passed to it, and the verifier
3851 * would then need to be able to understand all the values that
3852 * vsnprintf can use. If it is passed by value, then the verifier
3856 vsnprintf(buf, 16, "%d", ap);
3857 ret = va_arg(ap, int);
3863 static void test_can_verify(void)
3865 if (!test_can_verify_check("%d %d", 0, 1)) {
3866 pr_info("trace event string verifier disabled\n");
3867 static_branch_inc(&trace_no_verify);
3872 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3873 * @iter: The iterator that holds the seq buffer and the event being printed
3874 * @fmt: The format used to print the event
3875 * @ap: The va_list holding the data to print from @fmt.
3877 * This writes the data into the @iter->seq buffer using the data from
3878 * @fmt and @ap. If the format has a %s, then the source of the string
3879 * is examined to make sure it is safe to print, otherwise it will
3880 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3883 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3886 const char *p = fmt;
3890 if (WARN_ON_ONCE(!fmt))
3893 if (static_branch_unlikely(&trace_no_verify))
3896 /* Don't bother checking when doing a ftrace_dump() */
3897 if (iter->fmt == static_fmt_buf)
3906 /* We only care about %s and variants */
3907 for (i = 0; p[i]; i++) {
3908 if (i + 1 >= iter->fmt_size) {
3910 * If we can't expand the copy buffer,
3913 if (!trace_iter_expand_format(iter))
3917 if (p[i] == '\\' && p[i+1]) {
3922 /* Need to test cases like %08.*s */
3923 for (j = 1; p[i+j]; j++) {
3924 if (isdigit(p[i+j]) ||
3927 if (p[i+j] == '*') {
3939 /* If no %s found then just print normally */
3943 /* Copy up to the %s, and print that */
3944 strncpy(iter->fmt, p, i);
3945 iter->fmt[i] = '\0';
3946 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3949 * If iter->seq is full, the above call no longer guarantees
3950 * that ap is in sync with fmt processing, and further calls
3951 * to va_arg() can return wrong positional arguments.
3953 * Ensure that ap is no longer used in this case.
3955 if (iter->seq.full) {
3961 len = va_arg(ap, int);
3963 /* The ap now points to the string data of the %s */
3964 str = va_arg(ap, const char *);
3967 * If you hit this warning, it is likely that the
3968 * trace event in question used %s on a string that
3969 * was saved at the time of the event, but may not be
3970 * around when the trace is read. Use __string(),
3971 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3972 * instead. See samples/trace_events/trace-events-sample.h
3975 if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
3976 "fmt: '%s' current_buffer: '%s'",
3977 fmt, show_buffer(&iter->seq))) {
3980 /* Try to safely read the string */
3982 if (len + 1 > iter->fmt_size)
3983 len = iter->fmt_size - 1;
3986 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3990 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3994 trace_seq_printf(&iter->seq, "(0x%px)", str);
3996 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3998 str = "[UNSAFE-MEMORY]";
3999 strcpy(iter->fmt, "%s");
4001 strncpy(iter->fmt, p + i, j + 1);
4002 iter->fmt[j+1] = '\0';
4005 trace_seq_printf(&iter->seq, iter->fmt, len, str);
4007 trace_seq_printf(&iter->seq, iter->fmt, str);
4013 trace_seq_vprintf(&iter->seq, p, ap);
4016 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
4018 const char *p, *new_fmt;
4021 if (WARN_ON_ONCE(!fmt))
4024 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
4028 new_fmt = q = iter->fmt;
4030 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
4031 if (!trace_iter_expand_format(iter))
4034 q += iter->fmt - new_fmt;
4035 new_fmt = iter->fmt;
4040 /* Replace %p with %px */
4044 } else if (p[0] == 'p' && !isalnum(p[1])) {
4055 #define STATIC_TEMP_BUF_SIZE 128
4056 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
4058 /* Find the next real entry, without updating the iterator itself */
4059 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
4060 int *ent_cpu, u64 *ent_ts)
4062 /* __find_next_entry will reset ent_size */
4063 int ent_size = iter->ent_size;
4064 struct trace_entry *entry;
4067 * If called from ftrace_dump(), then the iter->temp buffer
4068 * will be the static_temp_buf and not created from kmalloc.
4069 * If the entry size is greater than the buffer, we can
4070 * not save it. Just return NULL in that case. This is only
4071 * used to add markers when two consecutive events' time
4072 * stamps have a large delta. See trace_print_lat_context()
4074 if (iter->temp == static_temp_buf &&
4075 STATIC_TEMP_BUF_SIZE < ent_size)
4079 * The __find_next_entry() may call peek_next_entry(), which may
4080 * call ring_buffer_peek() that may make the contents of iter->ent
4081 * undefined. Need to copy iter->ent now.
4083 if (iter->ent && iter->ent != iter->temp) {
4084 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
4085 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
4087 temp = kmalloc(iter->ent_size, GFP_KERNEL);
4092 iter->temp_size = iter->ent_size;
4094 memcpy(iter->temp, iter->ent, iter->ent_size);
4095 iter->ent = iter->temp;
4097 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
4098 /* Put back the original ent_size */
4099 iter->ent_size = ent_size;
4104 /* Find the next real entry, and increment the iterator to the next entry */
4105 void *trace_find_next_entry_inc(struct trace_iterator *iter)
4107 iter->ent = __find_next_entry(iter, &iter->cpu,
4108 &iter->lost_events, &iter->ts);
4111 trace_iterator_increment(iter);
4113 return iter->ent ? iter : NULL;
4116 static void trace_consume(struct trace_iterator *iter)
4118 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
4119 &iter->lost_events);
4122 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
4124 struct trace_iterator *iter = m->private;
4128 WARN_ON_ONCE(iter->leftover);
4132 /* can't go backwards */
4137 ent = trace_find_next_entry_inc(iter);
4141 while (ent && iter->idx < i)
4142 ent = trace_find_next_entry_inc(iter);
4149 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4151 struct ring_buffer_iter *buf_iter;
4152 unsigned long entries = 0;
4155 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4157 buf_iter = trace_buffer_iter(iter, cpu);
4161 ring_buffer_iter_reset(buf_iter);
4164 * We could have the case with the max latency tracers
4165 * that a reset never took place on a cpu. This is evident
4166 * by the timestamp being before the start of the buffer.
4168 while (ring_buffer_iter_peek(buf_iter, &ts)) {
4169 if (ts >= iter->array_buffer->time_start)
4172 ring_buffer_iter_advance(buf_iter);
4175 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4179 * The current tracer is copied to avoid a global locking
4182 static void *s_start(struct seq_file *m, loff_t *pos)
4184 struct trace_iterator *iter = m->private;
4185 struct trace_array *tr = iter->tr;
4186 int cpu_file = iter->cpu_file;
4191 mutex_lock(&trace_types_lock);
4192 if (unlikely(tr->current_trace != iter->trace)) {
4193 /* Close iter->trace before switching to the new current tracer */
4194 if (iter->trace->close)
4195 iter->trace->close(iter);
4196 iter->trace = tr->current_trace;
4197 /* Reopen the new current tracer */
4198 if (iter->trace->open)
4199 iter->trace->open(iter);
4201 mutex_unlock(&trace_types_lock);
4203 #ifdef CONFIG_TRACER_MAX_TRACE
4204 if (iter->snapshot && iter->trace->use_max_tr)
4205 return ERR_PTR(-EBUSY);
4208 if (*pos != iter->pos) {
4213 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4214 for_each_tracing_cpu(cpu)
4215 tracing_iter_reset(iter, cpu);
4217 tracing_iter_reset(iter, cpu_file);
4220 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4225 * If we overflowed the seq_file before, then we want
4226 * to just reuse the trace_seq buffer again.
4232 p = s_next(m, p, &l);
4236 trace_event_read_lock();
4237 trace_access_lock(cpu_file);
4241 static void s_stop(struct seq_file *m, void *p)
4243 struct trace_iterator *iter = m->private;
4245 #ifdef CONFIG_TRACER_MAX_TRACE
4246 if (iter->snapshot && iter->trace->use_max_tr)
4250 trace_access_unlock(iter->cpu_file);
4251 trace_event_read_unlock();
4255 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4256 unsigned long *entries, int cpu)
4258 unsigned long count;
4260 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4262 * If this buffer has skipped entries, then we hold all
4263 * entries for the trace and we need to ignore the
4264 * ones before the time stamp.
4266 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4267 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4268 /* total is the same as the entries */
4272 ring_buffer_overrun_cpu(buf->buffer, cpu);
4277 get_total_entries(struct array_buffer *buf,
4278 unsigned long *total, unsigned long *entries)
4286 for_each_tracing_cpu(cpu) {
4287 get_total_entries_cpu(buf, &t, &e, cpu);
4293 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4295 unsigned long total, entries;
4300 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4305 unsigned long trace_total_entries(struct trace_array *tr)
4307 unsigned long total, entries;
4312 get_total_entries(&tr->array_buffer, &total, &entries);
4317 static void print_lat_help_header(struct seq_file *m)
4319 seq_puts(m, "# _------=> CPU# \n"
4320 "# / _-----=> irqs-off/BH-disabled\n"
4321 "# | / _----=> need-resched \n"
4322 "# || / _---=> hardirq/softirq \n"
4323 "# ||| / _--=> preempt-depth \n"
4324 "# |||| / _-=> migrate-disable \n"
4325 "# ||||| / delay \n"
4326 "# cmd pid |||||| time | caller \n"
4327 "# \\ / |||||| \\ | / \n");
4330 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4332 unsigned long total;
4333 unsigned long entries;
4335 get_total_entries(buf, &total, &entries);
4336 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4337 entries, total, num_online_cpus());
4341 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4344 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4346 print_event_info(buf, m);
4348 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4349 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4352 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4355 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4356 static const char space[] = " ";
4357 int prec = tgid ? 12 : 2;
4359 print_event_info(buf, m);
4361 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
4362 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4363 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4364 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4365 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4366 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4367 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4368 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
4372 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4374 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4375 struct array_buffer *buf = iter->array_buffer;
4376 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4377 struct tracer *type = iter->trace;
4378 unsigned long entries;
4379 unsigned long total;
4380 const char *name = type->name;
4382 get_total_entries(buf, &total, &entries);
4384 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4386 seq_puts(m, "# -----------------------------------"
4387 "---------------------------------\n");
4388 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4389 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4390 nsecs_to_usecs(data->saved_latency),
4394 preempt_model_none() ? "server" :
4395 preempt_model_voluntary() ? "desktop" :
4396 preempt_model_full() ? "preempt" :
4397 preempt_model_rt() ? "preempt_rt" :
4399 /* These are reserved for later use */
4402 seq_printf(m, " #P:%d)\n", num_online_cpus());
4406 seq_puts(m, "# -----------------\n");
4407 seq_printf(m, "# | task: %.16s-%d "
4408 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4409 data->comm, data->pid,
4410 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4411 data->policy, data->rt_priority);
4412 seq_puts(m, "# -----------------\n");
4414 if (data->critical_start) {
4415 seq_puts(m, "# => started at: ");
4416 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4417 trace_print_seq(m, &iter->seq);
4418 seq_puts(m, "\n# => ended at: ");
4419 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4420 trace_print_seq(m, &iter->seq);
4421 seq_puts(m, "\n#\n");
4427 static void test_cpu_buff_start(struct trace_iterator *iter)
4429 struct trace_seq *s = &iter->seq;
4430 struct trace_array *tr = iter->tr;
4432 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4435 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4438 if (cpumask_available(iter->started) &&
4439 cpumask_test_cpu(iter->cpu, iter->started))
4442 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4445 if (cpumask_available(iter->started))
4446 cpumask_set_cpu(iter->cpu, iter->started);
4448 /* Don't print started cpu buffer for the first entry of the trace */
4450 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4454 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4456 struct trace_array *tr = iter->tr;
4457 struct trace_seq *s = &iter->seq;
4458 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4459 struct trace_entry *entry;
4460 struct trace_event *event;
4464 test_cpu_buff_start(iter);
4466 event = ftrace_find_event(entry->type);
4468 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4469 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4470 trace_print_lat_context(iter);
4472 trace_print_context(iter);
4475 if (trace_seq_has_overflowed(s))
4476 return TRACE_TYPE_PARTIAL_LINE;
4479 if (tr->trace_flags & TRACE_ITER_FIELDS)
4480 return print_event_fields(iter, event);
4481 return event->funcs->trace(iter, sym_flags, event);
4484 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4486 return trace_handle_return(s);
4489 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4491 struct trace_array *tr = iter->tr;
4492 struct trace_seq *s = &iter->seq;
4493 struct trace_entry *entry;
4494 struct trace_event *event;
4498 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4499 trace_seq_printf(s, "%d %d %llu ",
4500 entry->pid, iter->cpu, iter->ts);
4502 if (trace_seq_has_overflowed(s))
4503 return TRACE_TYPE_PARTIAL_LINE;
4505 event = ftrace_find_event(entry->type);
4507 return event->funcs->raw(iter, 0, event);
4509 trace_seq_printf(s, "%d ?\n", entry->type);
4511 return trace_handle_return(s);
4514 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4516 struct trace_array *tr = iter->tr;
4517 struct trace_seq *s = &iter->seq;
4518 unsigned char newline = '\n';
4519 struct trace_entry *entry;
4520 struct trace_event *event;
4524 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4525 SEQ_PUT_HEX_FIELD(s, entry->pid);
4526 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4527 SEQ_PUT_HEX_FIELD(s, iter->ts);
4528 if (trace_seq_has_overflowed(s))
4529 return TRACE_TYPE_PARTIAL_LINE;
4532 event = ftrace_find_event(entry->type);
4534 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4535 if (ret != TRACE_TYPE_HANDLED)
4539 SEQ_PUT_FIELD(s, newline);
4541 return trace_handle_return(s);
4544 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4546 struct trace_array *tr = iter->tr;
4547 struct trace_seq *s = &iter->seq;
4548 struct trace_entry *entry;
4549 struct trace_event *event;
4553 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4554 SEQ_PUT_FIELD(s, entry->pid);
4555 SEQ_PUT_FIELD(s, iter->cpu);
4556 SEQ_PUT_FIELD(s, iter->ts);
4557 if (trace_seq_has_overflowed(s))
4558 return TRACE_TYPE_PARTIAL_LINE;
4561 event = ftrace_find_event(entry->type);
4562 return event ? event->funcs->binary(iter, 0, event) :
4566 int trace_empty(struct trace_iterator *iter)
4568 struct ring_buffer_iter *buf_iter;
4571 /* If we are looking at one CPU buffer, only check that one */
4572 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4573 cpu = iter->cpu_file;
4574 buf_iter = trace_buffer_iter(iter, cpu);
4576 if (!ring_buffer_iter_empty(buf_iter))
4579 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4585 for_each_tracing_cpu(cpu) {
4586 buf_iter = trace_buffer_iter(iter, cpu);
4588 if (!ring_buffer_iter_empty(buf_iter))
4591 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4599 /* Called with trace_event_read_lock() held. */
4600 enum print_line_t print_trace_line(struct trace_iterator *iter)
4602 struct trace_array *tr = iter->tr;
4603 unsigned long trace_flags = tr->trace_flags;
4604 enum print_line_t ret;
4606 if (iter->lost_events) {
4607 if (iter->lost_events == (unsigned long)-1)
4608 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4611 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4612 iter->cpu, iter->lost_events);
4613 if (trace_seq_has_overflowed(&iter->seq))
4614 return TRACE_TYPE_PARTIAL_LINE;
4617 if (iter->trace && iter->trace->print_line) {
4618 ret = iter->trace->print_line(iter);
4619 if (ret != TRACE_TYPE_UNHANDLED)
4623 if (iter->ent->type == TRACE_BPUTS &&
4624 trace_flags & TRACE_ITER_PRINTK &&
4625 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4626 return trace_print_bputs_msg_only(iter);
4628 if (iter->ent->type == TRACE_BPRINT &&
4629 trace_flags & TRACE_ITER_PRINTK &&
4630 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4631 return trace_print_bprintk_msg_only(iter);
4633 if (iter->ent->type == TRACE_PRINT &&
4634 trace_flags & TRACE_ITER_PRINTK &&
4635 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4636 return trace_print_printk_msg_only(iter);
4638 if (trace_flags & TRACE_ITER_BIN)
4639 return print_bin_fmt(iter);
4641 if (trace_flags & TRACE_ITER_HEX)
4642 return print_hex_fmt(iter);
4644 if (trace_flags & TRACE_ITER_RAW)
4645 return print_raw_fmt(iter);
4647 return print_trace_fmt(iter);
4650 void trace_latency_header(struct seq_file *m)
4652 struct trace_iterator *iter = m->private;
4653 struct trace_array *tr = iter->tr;
4655 /* print nothing if the buffers are empty */
4656 if (trace_empty(iter))
4659 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4660 print_trace_header(m, iter);
4662 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4663 print_lat_help_header(m);
4666 void trace_default_header(struct seq_file *m)
4668 struct trace_iterator *iter = m->private;
4669 struct trace_array *tr = iter->tr;
4670 unsigned long trace_flags = tr->trace_flags;
4672 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4675 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4676 /* print nothing if the buffers are empty */
4677 if (trace_empty(iter))
4679 print_trace_header(m, iter);
4680 if (!(trace_flags & TRACE_ITER_VERBOSE))
4681 print_lat_help_header(m);
4683 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4684 if (trace_flags & TRACE_ITER_IRQ_INFO)
4685 print_func_help_header_irq(iter->array_buffer,
4688 print_func_help_header(iter->array_buffer, m,
4694 static void test_ftrace_alive(struct seq_file *m)
4696 if (!ftrace_is_dead())
4698 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4699 "# MAY BE MISSING FUNCTION EVENTS\n");
4702 #ifdef CONFIG_TRACER_MAX_TRACE
4703 static void show_snapshot_main_help(struct seq_file *m)
4705 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4706 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4707 "# Takes a snapshot of the main buffer.\n"
4708 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4709 "# (Doesn't have to be '2' works with any number that\n"
4710 "# is not a '0' or '1')\n");
4713 static void show_snapshot_percpu_help(struct seq_file *m)
4715 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4716 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4717 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4718 "# Takes a snapshot of the main buffer for this cpu.\n");
4720 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4721 "# Must use main snapshot file to allocate.\n");
4723 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4724 "# (Doesn't have to be '2' works with any number that\n"
4725 "# is not a '0' or '1')\n");
4728 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4730 if (iter->tr->allocated_snapshot)
4731 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4733 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4735 seq_puts(m, "# Snapshot commands:\n");
4736 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4737 show_snapshot_main_help(m);
4739 show_snapshot_percpu_help(m);
4742 /* Should never be called */
4743 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4746 static int s_show(struct seq_file *m, void *v)
4748 struct trace_iterator *iter = v;
4751 if (iter->ent == NULL) {
4753 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4755 test_ftrace_alive(m);
4757 if (iter->snapshot && trace_empty(iter))
4758 print_snapshot_help(m, iter);
4759 else if (iter->trace && iter->trace->print_header)
4760 iter->trace->print_header(m);
4762 trace_default_header(m);
4764 } else if (iter->leftover) {
4766 * If we filled the seq_file buffer earlier, we
4767 * want to just show it now.
4769 ret = trace_print_seq(m, &iter->seq);
4771 /* ret should this time be zero, but you never know */
4772 iter->leftover = ret;
4775 print_trace_line(iter);
4776 ret = trace_print_seq(m, &iter->seq);
4778 * If we overflow the seq_file buffer, then it will
4779 * ask us for this data again at start up.
4781 * ret is 0 if seq_file write succeeded.
4784 iter->leftover = ret;
4791 * Should be used after trace_array_get(), trace_types_lock
4792 * ensures that i_cdev was already initialized.
4794 static inline int tracing_get_cpu(struct inode *inode)
4796 if (inode->i_cdev) /* See trace_create_cpu_file() */
4797 return (long)inode->i_cdev - 1;
4798 return RING_BUFFER_ALL_CPUS;
4801 static const struct seq_operations tracer_seq_ops = {
4809 * Note, as iter itself can be allocated and freed in different
4810 * ways, this function is only used to free its content, and not
4811 * the iterator itself. The only requirement to all the allocations
4812 * is that it must zero all fields (kzalloc), as freeing works with
4813 * ethier allocated content or NULL.
4815 static void free_trace_iter_content(struct trace_iterator *iter)
4817 /* The fmt is either NULL, allocated or points to static_fmt_buf */
4818 if (iter->fmt != static_fmt_buf)
4822 kfree(iter->buffer_iter);
4823 mutex_destroy(&iter->mutex);
4824 free_cpumask_var(iter->started);
4827 static struct trace_iterator *
4828 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4830 struct trace_array *tr = inode->i_private;
4831 struct trace_iterator *iter;
4834 if (tracing_disabled)
4835 return ERR_PTR(-ENODEV);
4837 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4839 return ERR_PTR(-ENOMEM);
4841 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4843 if (!iter->buffer_iter)
4847 * trace_find_next_entry() may need to save off iter->ent.
4848 * It will place it into the iter->temp buffer. As most
4849 * events are less than 128, allocate a buffer of that size.
4850 * If one is greater, then trace_find_next_entry() will
4851 * allocate a new buffer to adjust for the bigger iter->ent.
4852 * It's not critical if it fails to get allocated here.
4854 iter->temp = kmalloc(128, GFP_KERNEL);
4856 iter->temp_size = 128;
4859 * trace_event_printf() may need to modify given format
4860 * string to replace %p with %px so that it shows real address
4861 * instead of hash value. However, that is only for the event
4862 * tracing, other tracer may not need. Defer the allocation
4863 * until it is needed.
4868 mutex_lock(&trace_types_lock);
4869 iter->trace = tr->current_trace;
4871 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4876 #ifdef CONFIG_TRACER_MAX_TRACE
4877 /* Currently only the top directory has a snapshot */
4878 if (tr->current_trace->print_max || snapshot)
4879 iter->array_buffer = &tr->max_buffer;
4882 iter->array_buffer = &tr->array_buffer;
4883 iter->snapshot = snapshot;
4885 iter->cpu_file = tracing_get_cpu(inode);
4886 mutex_init(&iter->mutex);
4888 /* Notify the tracer early; before we stop tracing. */
4889 if (iter->trace->open)
4890 iter->trace->open(iter);
4892 /* Annotate start of buffers if we had overruns */
4893 if (ring_buffer_overruns(iter->array_buffer->buffer))
4894 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4896 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4897 if (trace_clocks[tr->clock_id].in_ns)
4898 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4901 * If pause-on-trace is enabled, then stop the trace while
4902 * dumping, unless this is the "snapshot" file
4904 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4905 tracing_stop_tr(tr);
4907 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4908 for_each_tracing_cpu(cpu) {
4909 iter->buffer_iter[cpu] =
4910 ring_buffer_read_prepare(iter->array_buffer->buffer,
4913 ring_buffer_read_prepare_sync();
4914 for_each_tracing_cpu(cpu) {
4915 ring_buffer_read_start(iter->buffer_iter[cpu]);
4916 tracing_iter_reset(iter, cpu);
4919 cpu = iter->cpu_file;
4920 iter->buffer_iter[cpu] =
4921 ring_buffer_read_prepare(iter->array_buffer->buffer,
4923 ring_buffer_read_prepare_sync();
4924 ring_buffer_read_start(iter->buffer_iter[cpu]);
4925 tracing_iter_reset(iter, cpu);
4928 mutex_unlock(&trace_types_lock);
4933 mutex_unlock(&trace_types_lock);
4934 free_trace_iter_content(iter);
4936 seq_release_private(inode, file);
4937 return ERR_PTR(-ENOMEM);
4940 int tracing_open_generic(struct inode *inode, struct file *filp)
4944 ret = tracing_check_open_get_tr(NULL);
4948 filp->private_data = inode->i_private;
4952 bool tracing_is_disabled(void)
4954 return (tracing_disabled) ? true: false;
4958 * Open and update trace_array ref count.
4959 * Must have the current trace_array passed to it.
4961 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4963 struct trace_array *tr = inode->i_private;
4966 ret = tracing_check_open_get_tr(tr);
4970 filp->private_data = inode->i_private;
4976 * The private pointer of the inode is the trace_event_file.
4977 * Update the tr ref count associated to it.
4979 int tracing_open_file_tr(struct inode *inode, struct file *filp)
4981 struct trace_event_file *file = inode->i_private;
4984 ret = tracing_check_open_get_tr(file->tr);
4988 mutex_lock(&event_mutex);
4990 /* Fail if the file is marked for removal */
4991 if (file->flags & EVENT_FILE_FL_FREED) {
4992 trace_array_put(file->tr);
4995 event_file_get(file);
4998 mutex_unlock(&event_mutex);
5002 filp->private_data = inode->i_private;
5007 int tracing_release_file_tr(struct inode *inode, struct file *filp)
5009 struct trace_event_file *file = inode->i_private;
5011 trace_array_put(file->tr);
5012 event_file_put(file);
5017 static int tracing_mark_open(struct inode *inode, struct file *filp)
5019 stream_open(inode, filp);
5020 return tracing_open_generic_tr(inode, filp);
5023 static int tracing_release(struct inode *inode, struct file *file)
5025 struct trace_array *tr = inode->i_private;
5026 struct seq_file *m = file->private_data;
5027 struct trace_iterator *iter;
5030 if (!(file->f_mode & FMODE_READ)) {
5031 trace_array_put(tr);
5035 /* Writes do not use seq_file */
5037 mutex_lock(&trace_types_lock);
5039 for_each_tracing_cpu(cpu) {
5040 if (iter->buffer_iter[cpu])
5041 ring_buffer_read_finish(iter->buffer_iter[cpu]);
5044 if (iter->trace && iter->trace->close)
5045 iter->trace->close(iter);
5047 if (!iter->snapshot && tr->stop_count)
5048 /* reenable tracing if it was previously enabled */
5049 tracing_start_tr(tr);
5051 __trace_array_put(tr);
5053 mutex_unlock(&trace_types_lock);
5055 free_trace_iter_content(iter);
5056 seq_release_private(inode, file);
5061 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
5063 struct trace_array *tr = inode->i_private;
5065 trace_array_put(tr);
5069 static int tracing_single_release_tr(struct inode *inode, struct file *file)
5071 struct trace_array *tr = inode->i_private;
5073 trace_array_put(tr);
5075 return single_release(inode, file);
5078 static int tracing_open(struct inode *inode, struct file *file)
5080 struct trace_array *tr = inode->i_private;
5081 struct trace_iterator *iter;
5084 ret = tracing_check_open_get_tr(tr);
5088 /* If this file was open for write, then erase contents */
5089 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
5090 int cpu = tracing_get_cpu(inode);
5091 struct array_buffer *trace_buf = &tr->array_buffer;
5093 #ifdef CONFIG_TRACER_MAX_TRACE
5094 if (tr->current_trace->print_max)
5095 trace_buf = &tr->max_buffer;
5098 if (cpu == RING_BUFFER_ALL_CPUS)
5099 tracing_reset_online_cpus(trace_buf);
5101 tracing_reset_cpu(trace_buf, cpu);
5104 if (file->f_mode & FMODE_READ) {
5105 iter = __tracing_open(inode, file, false);
5107 ret = PTR_ERR(iter);
5108 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5109 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5113 trace_array_put(tr);
5119 * Some tracers are not suitable for instance buffers.
5120 * A tracer is always available for the global array (toplevel)
5121 * or if it explicitly states that it is.
5124 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
5126 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
5129 /* Find the next tracer that this trace array may use */
5130 static struct tracer *
5131 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
5133 while (t && !trace_ok_for_array(t, tr))
5140 t_next(struct seq_file *m, void *v, loff_t *pos)
5142 struct trace_array *tr = m->private;
5143 struct tracer *t = v;
5148 t = get_tracer_for_array(tr, t->next);
5153 static void *t_start(struct seq_file *m, loff_t *pos)
5155 struct trace_array *tr = m->private;
5159 mutex_lock(&trace_types_lock);
5161 t = get_tracer_for_array(tr, trace_types);
5162 for (; t && l < *pos; t = t_next(m, t, &l))
5168 static void t_stop(struct seq_file *m, void *p)
5170 mutex_unlock(&trace_types_lock);
5173 static int t_show(struct seq_file *m, void *v)
5175 struct tracer *t = v;
5180 seq_puts(m, t->name);
5189 static const struct seq_operations show_traces_seq_ops = {
5196 static int show_traces_open(struct inode *inode, struct file *file)
5198 struct trace_array *tr = inode->i_private;
5202 ret = tracing_check_open_get_tr(tr);
5206 ret = seq_open(file, &show_traces_seq_ops);
5208 trace_array_put(tr);
5212 m = file->private_data;
5218 static int show_traces_release(struct inode *inode, struct file *file)
5220 struct trace_array *tr = inode->i_private;
5222 trace_array_put(tr);
5223 return seq_release(inode, file);
5227 tracing_write_stub(struct file *filp, const char __user *ubuf,
5228 size_t count, loff_t *ppos)
5233 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5237 if (file->f_mode & FMODE_READ)
5238 ret = seq_lseek(file, offset, whence);
5240 file->f_pos = ret = 0;
5245 static const struct file_operations tracing_fops = {
5246 .open = tracing_open,
5248 .read_iter = seq_read_iter,
5249 .splice_read = copy_splice_read,
5250 .write = tracing_write_stub,
5251 .llseek = tracing_lseek,
5252 .release = tracing_release,
5255 static const struct file_operations show_traces_fops = {
5256 .open = show_traces_open,
5258 .llseek = seq_lseek,
5259 .release = show_traces_release,
5263 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5264 size_t count, loff_t *ppos)
5266 struct trace_array *tr = file_inode(filp)->i_private;
5270 len = snprintf(NULL, 0, "%*pb\n",
5271 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5272 mask_str = kmalloc(len, GFP_KERNEL);
5276 len = snprintf(mask_str, len, "%*pb\n",
5277 cpumask_pr_args(tr->tracing_cpumask));
5282 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5290 int tracing_set_cpumask(struct trace_array *tr,
5291 cpumask_var_t tracing_cpumask_new)
5298 local_irq_disable();
5299 arch_spin_lock(&tr->max_lock);
5300 for_each_tracing_cpu(cpu) {
5302 * Increase/decrease the disabled counter if we are
5303 * about to flip a bit in the cpumask:
5305 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5306 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5307 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5308 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5309 #ifdef CONFIG_TRACER_MAX_TRACE
5310 ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
5313 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5314 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5315 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5316 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5317 #ifdef CONFIG_TRACER_MAX_TRACE
5318 ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
5322 arch_spin_unlock(&tr->max_lock);
5325 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5331 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5332 size_t count, loff_t *ppos)
5334 struct trace_array *tr = file_inode(filp)->i_private;
5335 cpumask_var_t tracing_cpumask_new;
5338 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5341 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5345 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5349 free_cpumask_var(tracing_cpumask_new);
5354 free_cpumask_var(tracing_cpumask_new);
5359 static const struct file_operations tracing_cpumask_fops = {
5360 .open = tracing_open_generic_tr,
5361 .read = tracing_cpumask_read,
5362 .write = tracing_cpumask_write,
5363 .release = tracing_release_generic_tr,
5364 .llseek = generic_file_llseek,
5367 static int tracing_trace_options_show(struct seq_file *m, void *v)
5369 struct tracer_opt *trace_opts;
5370 struct trace_array *tr = m->private;
5374 mutex_lock(&trace_types_lock);
5375 tracer_flags = tr->current_trace->flags->val;
5376 trace_opts = tr->current_trace->flags->opts;
5378 for (i = 0; trace_options[i]; i++) {
5379 if (tr->trace_flags & (1 << i))
5380 seq_printf(m, "%s\n", trace_options[i]);
5382 seq_printf(m, "no%s\n", trace_options[i]);
5385 for (i = 0; trace_opts[i].name; i++) {
5386 if (tracer_flags & trace_opts[i].bit)
5387 seq_printf(m, "%s\n", trace_opts[i].name);
5389 seq_printf(m, "no%s\n", trace_opts[i].name);
5391 mutex_unlock(&trace_types_lock);
5396 static int __set_tracer_option(struct trace_array *tr,
5397 struct tracer_flags *tracer_flags,
5398 struct tracer_opt *opts, int neg)
5400 struct tracer *trace = tracer_flags->trace;
5403 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5408 tracer_flags->val &= ~opts->bit;
5410 tracer_flags->val |= opts->bit;
5414 /* Try to assign a tracer specific option */
5415 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5417 struct tracer *trace = tr->current_trace;
5418 struct tracer_flags *tracer_flags = trace->flags;
5419 struct tracer_opt *opts = NULL;
5422 for (i = 0; tracer_flags->opts[i].name; i++) {
5423 opts = &tracer_flags->opts[i];
5425 if (strcmp(cmp, opts->name) == 0)
5426 return __set_tracer_option(tr, trace->flags, opts, neg);
5432 /* Some tracers require overwrite to stay enabled */
5433 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5435 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5441 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5445 if ((mask == TRACE_ITER_RECORD_TGID) ||
5446 (mask == TRACE_ITER_RECORD_CMD))
5447 lockdep_assert_held(&event_mutex);
5449 /* do nothing if flag is already set */
5450 if (!!(tr->trace_flags & mask) == !!enabled)
5453 /* Give the tracer a chance to approve the change */
5454 if (tr->current_trace->flag_changed)
5455 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5459 tr->trace_flags |= mask;
5461 tr->trace_flags &= ~mask;
5463 if (mask == TRACE_ITER_RECORD_CMD)
5464 trace_event_enable_cmd_record(enabled);
5466 if (mask == TRACE_ITER_RECORD_TGID) {
5468 tgid_map_max = pid_max;
5469 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5473 * Pairs with smp_load_acquire() in
5474 * trace_find_tgid_ptr() to ensure that if it observes
5475 * the tgid_map we just allocated then it also observes
5476 * the corresponding tgid_map_max value.
5478 smp_store_release(&tgid_map, map);
5481 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5485 trace_event_enable_tgid_record(enabled);
5488 if (mask == TRACE_ITER_EVENT_FORK)
5489 trace_event_follow_fork(tr, enabled);
5491 if (mask == TRACE_ITER_FUNC_FORK)
5492 ftrace_pid_follow_fork(tr, enabled);
5494 if (mask == TRACE_ITER_OVERWRITE) {
5495 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5496 #ifdef CONFIG_TRACER_MAX_TRACE
5497 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5501 if (mask == TRACE_ITER_PRINTK) {
5502 trace_printk_start_stop_comm(enabled);
5503 trace_printk_control(enabled);
5509 int trace_set_options(struct trace_array *tr, char *option)
5514 size_t orig_len = strlen(option);
5517 cmp = strstrip(option);
5519 len = str_has_prefix(cmp, "no");
5525 mutex_lock(&event_mutex);
5526 mutex_lock(&trace_types_lock);
5528 ret = match_string(trace_options, -1, cmp);
5529 /* If no option could be set, test the specific tracer options */
5531 ret = set_tracer_option(tr, cmp, neg);
5533 ret = set_tracer_flag(tr, 1 << ret, !neg);
5535 mutex_unlock(&trace_types_lock);
5536 mutex_unlock(&event_mutex);
5539 * If the first trailing whitespace is replaced with '\0' by strstrip,
5540 * turn it back into a space.
5542 if (orig_len > strlen(option))
5543 option[strlen(option)] = ' ';
5548 static void __init apply_trace_boot_options(void)
5550 char *buf = trace_boot_options_buf;
5554 option = strsep(&buf, ",");
5560 trace_set_options(&global_trace, option);
5562 /* Put back the comma to allow this to be called again */
5569 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5570 size_t cnt, loff_t *ppos)
5572 struct seq_file *m = filp->private_data;
5573 struct trace_array *tr = m->private;
5577 if (cnt >= sizeof(buf))
5580 if (copy_from_user(buf, ubuf, cnt))
5585 ret = trace_set_options(tr, buf);
5594 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5596 struct trace_array *tr = inode->i_private;
5599 ret = tracing_check_open_get_tr(tr);
5603 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5605 trace_array_put(tr);
5610 static const struct file_operations tracing_iter_fops = {
5611 .open = tracing_trace_options_open,
5613 .llseek = seq_lseek,
5614 .release = tracing_single_release_tr,
5615 .write = tracing_trace_options_write,
5618 static const char readme_msg[] =
5619 "tracing mini-HOWTO:\n\n"
5620 "# echo 0 > tracing_on : quick way to disable tracing\n"
5621 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5622 " Important files:\n"
5623 " trace\t\t\t- The static contents of the buffer\n"
5624 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5625 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5626 " current_tracer\t- function and latency tracers\n"
5627 " available_tracers\t- list of configured tracers for current_tracer\n"
5628 " error_log\t- error log for failed commands (that support it)\n"
5629 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5630 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5631 " trace_clock\t\t- change the clock used to order events\n"
5632 " local: Per cpu clock but may not be synced across CPUs\n"
5633 " global: Synced across CPUs but slows tracing down.\n"
5634 " counter: Not a clock, but just an increment\n"
5635 " uptime: Jiffy counter from time of boot\n"
5636 " perf: Same clock that perf events use\n"
5637 #ifdef CONFIG_X86_64
5638 " x86-tsc: TSC cycle counter\n"
5640 "\n timestamp_mode\t- view the mode used to timestamp events\n"
5641 " delta: Delta difference against a buffer-wide timestamp\n"
5642 " absolute: Absolute (standalone) timestamp\n"
5643 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5644 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5645 " tracing_cpumask\t- Limit which CPUs to trace\n"
5646 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5647 "\t\t\t Remove sub-buffer with rmdir\n"
5648 " trace_options\t\t- Set format or modify how tracing happens\n"
5649 "\t\t\t Disable an option by prefixing 'no' to the\n"
5650 "\t\t\t option name\n"
5651 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5652 #ifdef CONFIG_DYNAMIC_FTRACE
5653 "\n available_filter_functions - list of functions that can be filtered on\n"
5654 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5655 "\t\t\t functions\n"
5656 "\t accepts: func_full_name or glob-matching-pattern\n"
5657 "\t modules: Can select a group via module\n"
5658 "\t Format: :mod:<module-name>\n"
5659 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5660 "\t triggers: a command to perform when function is hit\n"
5661 "\t Format: <function>:<trigger>[:count]\n"
5662 "\t trigger: traceon, traceoff\n"
5663 "\t\t enable_event:<system>:<event>\n"
5664 "\t\t disable_event:<system>:<event>\n"
5665 #ifdef CONFIG_STACKTRACE
5668 #ifdef CONFIG_TRACER_SNAPSHOT
5673 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5674 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5675 "\t The first one will disable tracing every time do_fault is hit\n"
5676 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5677 "\t The first time do trap is hit and it disables tracing, the\n"
5678 "\t counter will decrement to 2. If tracing is already disabled,\n"
5679 "\t the counter will not decrement. It only decrements when the\n"
5680 "\t trigger did work\n"
5681 "\t To remove trigger without count:\n"
5682 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5683 "\t To remove trigger with a count:\n"
5684 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5685 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5686 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5687 "\t modules: Can select a group via module command :mod:\n"
5688 "\t Does not accept triggers\n"
5689 #endif /* CONFIG_DYNAMIC_FTRACE */
5690 #ifdef CONFIG_FUNCTION_TRACER
5691 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5693 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5696 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5697 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5698 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5699 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5701 #ifdef CONFIG_TRACER_SNAPSHOT
5702 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5703 "\t\t\t snapshot buffer. Read the contents for more\n"
5704 "\t\t\t information\n"
5706 #ifdef CONFIG_STACK_TRACER
5707 " stack_trace\t\t- Shows the max stack trace when active\n"
5708 " stack_max_size\t- Shows current max stack size that was traced\n"
5709 "\t\t\t Write into this file to reset the max size (trigger a\n"
5710 "\t\t\t new trace)\n"
5711 #ifdef CONFIG_DYNAMIC_FTRACE
5712 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5715 #endif /* CONFIG_STACK_TRACER */
5716 #ifdef CONFIG_DYNAMIC_EVENTS
5717 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5718 "\t\t\t Write into this file to define/undefine new trace events.\n"
5720 #ifdef CONFIG_KPROBE_EVENTS
5721 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5722 "\t\t\t Write into this file to define/undefine new trace events.\n"
5724 #ifdef CONFIG_UPROBE_EVENTS
5725 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5726 "\t\t\t Write into this file to define/undefine new trace events.\n"
5728 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \
5729 defined(CONFIG_FPROBE_EVENTS)
5730 "\t accepts: event-definitions (one definition per line)\n"
5731 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5732 "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n"
5733 "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
5735 #ifdef CONFIG_FPROBE_EVENTS
5736 "\t f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
5737 "\t t[:[<group>/][<event>]] <tracepoint> [<args>]\n"
5739 #ifdef CONFIG_HIST_TRIGGERS
5740 "\t s:[synthetic/]<event> <field> [<field>]\n"
5742 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
5743 "\t -:[<group>/][<event>]\n"
5744 #ifdef CONFIG_KPROBE_EVENTS
5745 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5746 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5748 #ifdef CONFIG_UPROBE_EVENTS
5749 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5751 "\t args: <name>=fetcharg[:type]\n"
5752 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5753 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5754 #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS
5755 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5756 "\t <argname>[->field[->field|.field...]],\n"
5758 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5761 "\t $stack<index>, $stack, $retval, $comm,\n"
5763 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5764 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n"
5765 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5766 "\t symstr, <type>\\[<array-size>\\]\n"
5767 #ifdef CONFIG_HIST_TRIGGERS
5768 "\t field: <stype> <name>;\n"
5769 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5770 "\t [unsigned] char/int/long\n"
5772 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5773 "\t of the <attached-group>/<attached-event>.\n"
5775 " events/\t\t- Directory containing all trace event subsystems:\n"
5776 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5777 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5778 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5780 " filter\t\t- If set, only events passing filter are traced\n"
5781 " events/<system>/<event>/\t- Directory containing control files for\n"
5783 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5784 " filter\t\t- If set, only events passing filter are traced\n"
5785 " trigger\t\t- If set, a command to perform when event is hit\n"
5786 "\t Format: <trigger>[:count][if <filter>]\n"
5787 "\t trigger: traceon, traceoff\n"
5788 "\t enable_event:<system>:<event>\n"
5789 "\t disable_event:<system>:<event>\n"
5790 #ifdef CONFIG_HIST_TRIGGERS
5791 "\t enable_hist:<system>:<event>\n"
5792 "\t disable_hist:<system>:<event>\n"
5794 #ifdef CONFIG_STACKTRACE
5797 #ifdef CONFIG_TRACER_SNAPSHOT
5800 #ifdef CONFIG_HIST_TRIGGERS
5801 "\t\t hist (see below)\n"
5803 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5804 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5805 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5806 "\t events/block/block_unplug/trigger\n"
5807 "\t The first disables tracing every time block_unplug is hit.\n"
5808 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5809 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5810 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5811 "\t Like function triggers, the counter is only decremented if it\n"
5812 "\t enabled or disabled tracing.\n"
5813 "\t To remove a trigger without a count:\n"
5814 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5815 "\t To remove a trigger with a count:\n"
5816 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5817 "\t Filters can be ignored when removing a trigger.\n"
5818 #ifdef CONFIG_HIST_TRIGGERS
5819 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5820 "\t Format: hist:keys=<field1[,field2,...]>\n"
5821 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5822 "\t [:values=<field1[,field2,...]>]\n"
5823 "\t [:sort=<field1[,field2,...]>]\n"
5824 "\t [:size=#entries]\n"
5825 "\t [:pause][:continue][:clear]\n"
5826 "\t [:name=histname1]\n"
5827 "\t [:nohitcount]\n"
5828 "\t [:<handler>.<action>]\n"
5829 "\t [if <filter>]\n\n"
5830 "\t Note, special fields can be used as well:\n"
5831 "\t common_timestamp - to record current timestamp\n"
5832 "\t common_cpu - to record the CPU the event happened on\n"
5834 "\t A hist trigger variable can be:\n"
5835 "\t - a reference to a field e.g. x=current_timestamp,\n"
5836 "\t - a reference to another variable e.g. y=$x,\n"
5837 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5838 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5840 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5841 "\t multiplication(*) and division(/) operators. An operand can be either a\n"
5842 "\t variable reference, field or numeric literal.\n"
5844 "\t When a matching event is hit, an entry is added to a hash\n"
5845 "\t table using the key(s) and value(s) named, and the value of a\n"
5846 "\t sum called 'hitcount' is incremented. Keys and values\n"
5847 "\t correspond to fields in the event's format description. Keys\n"
5848 "\t can be any field, or the special string 'common_stacktrace'.\n"
5849 "\t Compound keys consisting of up to two fields can be specified\n"
5850 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5851 "\t fields. Sort keys consisting of up to two fields can be\n"
5852 "\t specified using the 'sort' keyword. The sort direction can\n"
5853 "\t be modified by appending '.descending' or '.ascending' to a\n"
5854 "\t sort field. The 'size' parameter can be used to specify more\n"
5855 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5856 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5857 "\t its histogram data will be shared with other triggers of the\n"
5858 "\t same name, and trigger hits will update this common data.\n\n"
5859 "\t Reading the 'hist' file for the event will dump the hash\n"
5860 "\t table in its entirety to stdout. If there are multiple hist\n"
5861 "\t triggers attached to an event, there will be a table for each\n"
5862 "\t trigger in the output. The table displayed for a named\n"
5863 "\t trigger will be the same as any other instance having the\n"
5864 "\t same name. The default format used to display a given field\n"
5865 "\t can be modified by appending any of the following modifiers\n"
5866 "\t to the field name, as applicable:\n\n"
5867 "\t .hex display a number as a hex value\n"
5868 "\t .sym display an address as a symbol\n"
5869 "\t .sym-offset display an address as a symbol and offset\n"
5870 "\t .execname display a common_pid as a program name\n"
5871 "\t .syscall display a syscall id as a syscall name\n"
5872 "\t .log2 display log2 value rather than raw number\n"
5873 "\t .buckets=size display values in groups of size rather than raw number\n"
5874 "\t .usecs display a common_timestamp in microseconds\n"
5875 "\t .percent display a number of percentage value\n"
5876 "\t .graph display a bar-graph of a value\n\n"
5877 "\t The 'pause' parameter can be used to pause an existing hist\n"
5878 "\t trigger or to start a hist trigger but not log any events\n"
5879 "\t until told to do so. 'continue' can be used to start or\n"
5880 "\t restart a paused hist trigger.\n\n"
5881 "\t The 'clear' parameter will clear the contents of a running\n"
5882 "\t hist trigger and leave its current paused/active state\n"
5884 "\t The 'nohitcount' (or NOHC) parameter will suppress display of\n"
5885 "\t raw hitcount in the histogram.\n\n"
5886 "\t The enable_hist and disable_hist triggers can be used to\n"
5887 "\t have one event conditionally start and stop another event's\n"
5888 "\t already-attached hist trigger. The syntax is analogous to\n"
5889 "\t the enable_event and disable_event triggers.\n\n"
5890 "\t Hist trigger handlers and actions are executed whenever a\n"
5891 "\t a histogram entry is added or updated. They take the form:\n\n"
5892 "\t <handler>.<action>\n\n"
5893 "\t The available handlers are:\n\n"
5894 "\t onmatch(matching.event) - invoke on addition or update\n"
5895 "\t onmax(var) - invoke if var exceeds current max\n"
5896 "\t onchange(var) - invoke action if var changes\n\n"
5897 "\t The available actions are:\n\n"
5898 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5899 "\t save(field,...) - save current event fields\n"
5900 #ifdef CONFIG_TRACER_SNAPSHOT
5901 "\t snapshot() - snapshot the trace buffer\n\n"
5903 #ifdef CONFIG_SYNTH_EVENTS
5904 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5905 "\t Write into this file to define/undefine new synthetic events.\n"
5906 "\t example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n"
5912 tracing_readme_read(struct file *filp, char __user *ubuf,
5913 size_t cnt, loff_t *ppos)
5915 return simple_read_from_buffer(ubuf, cnt, ppos,
5916 readme_msg, strlen(readme_msg));
5919 static const struct file_operations tracing_readme_fops = {
5920 .open = tracing_open_generic,
5921 .read = tracing_readme_read,
5922 .llseek = generic_file_llseek,
5925 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5929 return trace_find_tgid_ptr(pid);
5932 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5936 return trace_find_tgid_ptr(pid);
5939 static void saved_tgids_stop(struct seq_file *m, void *v)
5943 static int saved_tgids_show(struct seq_file *m, void *v)
5945 int *entry = (int *)v;
5946 int pid = entry - tgid_map;
5952 seq_printf(m, "%d %d\n", pid, tgid);
5956 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5957 .start = saved_tgids_start,
5958 .stop = saved_tgids_stop,
5959 .next = saved_tgids_next,
5960 .show = saved_tgids_show,
5963 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5967 ret = tracing_check_open_get_tr(NULL);
5971 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5975 static const struct file_operations tracing_saved_tgids_fops = {
5976 .open = tracing_saved_tgids_open,
5978 .llseek = seq_lseek,
5979 .release = seq_release,
5982 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5984 unsigned int *ptr = v;
5986 if (*pos || m->count)
5991 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5993 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
6002 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
6008 arch_spin_lock(&trace_cmdline_lock);
6010 v = &savedcmd->map_cmdline_to_pid[0];
6012 v = saved_cmdlines_next(m, v, &l);
6020 static void saved_cmdlines_stop(struct seq_file *m, void *v)
6022 arch_spin_unlock(&trace_cmdline_lock);
6026 static int saved_cmdlines_show(struct seq_file *m, void *v)
6028 char buf[TASK_COMM_LEN];
6029 unsigned int *pid = v;
6031 __trace_find_cmdline(*pid, buf);
6032 seq_printf(m, "%d %s\n", *pid, buf);
6036 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
6037 .start = saved_cmdlines_start,
6038 .next = saved_cmdlines_next,
6039 .stop = saved_cmdlines_stop,
6040 .show = saved_cmdlines_show,
6043 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
6047 ret = tracing_check_open_get_tr(NULL);
6051 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
6054 static const struct file_operations tracing_saved_cmdlines_fops = {
6055 .open = tracing_saved_cmdlines_open,
6057 .llseek = seq_lseek,
6058 .release = seq_release,
6062 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
6063 size_t cnt, loff_t *ppos)
6069 arch_spin_lock(&trace_cmdline_lock);
6070 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
6071 arch_spin_unlock(&trace_cmdline_lock);
6074 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6077 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
6079 kfree(s->saved_cmdlines);
6080 kfree(s->map_cmdline_to_pid);
6084 static int tracing_resize_saved_cmdlines(unsigned int val)
6086 struct saved_cmdlines_buffer *s, *savedcmd_temp;
6088 s = kmalloc(sizeof(*s), GFP_KERNEL);
6092 if (allocate_cmdlines_buffer(val, s) < 0) {
6098 arch_spin_lock(&trace_cmdline_lock);
6099 savedcmd_temp = savedcmd;
6101 arch_spin_unlock(&trace_cmdline_lock);
6103 free_saved_cmdlines_buffer(savedcmd_temp);
6109 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
6110 size_t cnt, loff_t *ppos)
6115 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6119 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
6120 if (!val || val > PID_MAX_DEFAULT)
6123 ret = tracing_resize_saved_cmdlines((unsigned int)val);
6132 static const struct file_operations tracing_saved_cmdlines_size_fops = {
6133 .open = tracing_open_generic,
6134 .read = tracing_saved_cmdlines_size_read,
6135 .write = tracing_saved_cmdlines_size_write,
6138 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
6139 static union trace_eval_map_item *
6140 update_eval_map(union trace_eval_map_item *ptr)
6142 if (!ptr->map.eval_string) {
6143 if (ptr->tail.next) {
6144 ptr = ptr->tail.next;
6145 /* Set ptr to the next real item (skip head) */
6153 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
6155 union trace_eval_map_item *ptr = v;
6158 * Paranoid! If ptr points to end, we don't want to increment past it.
6159 * This really should never happen.
6162 ptr = update_eval_map(ptr);
6163 if (WARN_ON_ONCE(!ptr))
6167 ptr = update_eval_map(ptr);
6172 static void *eval_map_start(struct seq_file *m, loff_t *pos)
6174 union trace_eval_map_item *v;
6177 mutex_lock(&trace_eval_mutex);
6179 v = trace_eval_maps;
6183 while (v && l < *pos) {
6184 v = eval_map_next(m, v, &l);
6190 static void eval_map_stop(struct seq_file *m, void *v)
6192 mutex_unlock(&trace_eval_mutex);
6195 static int eval_map_show(struct seq_file *m, void *v)
6197 union trace_eval_map_item *ptr = v;
6199 seq_printf(m, "%s %ld (%s)\n",
6200 ptr->map.eval_string, ptr->map.eval_value,
6206 static const struct seq_operations tracing_eval_map_seq_ops = {
6207 .start = eval_map_start,
6208 .next = eval_map_next,
6209 .stop = eval_map_stop,
6210 .show = eval_map_show,
6213 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
6217 ret = tracing_check_open_get_tr(NULL);
6221 return seq_open(filp, &tracing_eval_map_seq_ops);
6224 static const struct file_operations tracing_eval_map_fops = {
6225 .open = tracing_eval_map_open,
6227 .llseek = seq_lseek,
6228 .release = seq_release,
6231 static inline union trace_eval_map_item *
6232 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
6234 /* Return tail of array given the head */
6235 return ptr + ptr->head.length + 1;
6239 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
6242 struct trace_eval_map **stop;
6243 struct trace_eval_map **map;
6244 union trace_eval_map_item *map_array;
6245 union trace_eval_map_item *ptr;
6250 * The trace_eval_maps contains the map plus a head and tail item,
6251 * where the head holds the module and length of array, and the
6252 * tail holds a pointer to the next list.
6254 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6256 pr_warn("Unable to allocate trace eval mapping\n");
6260 mutex_lock(&trace_eval_mutex);
6262 if (!trace_eval_maps)
6263 trace_eval_maps = map_array;
6265 ptr = trace_eval_maps;
6267 ptr = trace_eval_jmp_to_tail(ptr);
6268 if (!ptr->tail.next)
6270 ptr = ptr->tail.next;
6273 ptr->tail.next = map_array;
6275 map_array->head.mod = mod;
6276 map_array->head.length = len;
6279 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6280 map_array->map = **map;
6283 memset(map_array, 0, sizeof(*map_array));
6285 mutex_unlock(&trace_eval_mutex);
6288 static void trace_create_eval_file(struct dentry *d_tracer)
6290 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
6291 NULL, &tracing_eval_map_fops);
6294 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
6295 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6296 static inline void trace_insert_eval_map_file(struct module *mod,
6297 struct trace_eval_map **start, int len) { }
6298 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6300 static void trace_insert_eval_map(struct module *mod,
6301 struct trace_eval_map **start, int len)
6303 struct trace_eval_map **map;
6310 trace_event_eval_update(map, len);
6312 trace_insert_eval_map_file(mod, start, len);
6316 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6317 size_t cnt, loff_t *ppos)
6319 struct trace_array *tr = filp->private_data;
6320 char buf[MAX_TRACER_SIZE+2];
6323 mutex_lock(&trace_types_lock);
6324 r = sprintf(buf, "%s\n", tr->current_trace->name);
6325 mutex_unlock(&trace_types_lock);
6327 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6330 int tracer_init(struct tracer *t, struct trace_array *tr)
6332 tracing_reset_online_cpus(&tr->array_buffer);
6336 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6340 for_each_tracing_cpu(cpu)
6341 per_cpu_ptr(buf->data, cpu)->entries = val;
6344 static void update_buffer_entries(struct array_buffer *buf, int cpu)
6346 if (cpu == RING_BUFFER_ALL_CPUS) {
6347 set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
6349 per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
6353 #ifdef CONFIG_TRACER_MAX_TRACE
6354 /* resize @tr's buffer to the size of @size_tr's entries */
6355 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6356 struct array_buffer *size_buf, int cpu_id)
6360 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6361 for_each_tracing_cpu(cpu) {
6362 ret = ring_buffer_resize(trace_buf->buffer,
6363 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6366 per_cpu_ptr(trace_buf->data, cpu)->entries =
6367 per_cpu_ptr(size_buf->data, cpu)->entries;
6370 ret = ring_buffer_resize(trace_buf->buffer,
6371 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6373 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6374 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6379 #endif /* CONFIG_TRACER_MAX_TRACE */
6381 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6382 unsigned long size, int cpu)
6387 * If kernel or user changes the size of the ring buffer
6388 * we use the size that was given, and we can forget about
6389 * expanding it later.
6391 ring_buffer_expanded = true;
6393 /* May be called before buffers are initialized */
6394 if (!tr->array_buffer.buffer)
6397 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6401 #ifdef CONFIG_TRACER_MAX_TRACE
6402 if (!tr->current_trace->use_max_tr)
6405 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6407 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6408 &tr->array_buffer, cpu);
6411 * AARGH! We are left with different
6412 * size max buffer!!!!
6413 * The max buffer is our "snapshot" buffer.
6414 * When a tracer needs a snapshot (one of the
6415 * latency tracers), it swaps the max buffer
6416 * with the saved snap shot. We succeeded to
6417 * update the size of the main buffer, but failed to
6418 * update the size of the max buffer. But when we tried
6419 * to reset the main buffer to the original size, we
6420 * failed there too. This is very unlikely to
6421 * happen, but if it does, warn and kill all
6425 tracing_disabled = 1;
6430 update_buffer_entries(&tr->max_buffer, cpu);
6433 #endif /* CONFIG_TRACER_MAX_TRACE */
6435 update_buffer_entries(&tr->array_buffer, cpu);
6440 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6441 unsigned long size, int cpu_id)
6445 mutex_lock(&trace_types_lock);
6447 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6448 /* make sure, this cpu is enabled in the mask */
6449 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6455 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6460 mutex_unlock(&trace_types_lock);
6467 * tracing_update_buffers - used by tracing facility to expand ring buffers
6469 * To save on memory when the tracing is never used on a system with it
6470 * configured in. The ring buffers are set to a minimum size. But once
6471 * a user starts to use the tracing facility, then they need to grow
6472 * to their default size.
6474 * This function is to be called when a tracer is about to be used.
6476 int tracing_update_buffers(void)
6480 mutex_lock(&trace_types_lock);
6481 if (!ring_buffer_expanded)
6482 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6483 RING_BUFFER_ALL_CPUS);
6484 mutex_unlock(&trace_types_lock);
6489 struct trace_option_dentry;
6492 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6495 * Used to clear out the tracer before deletion of an instance.
6496 * Must have trace_types_lock held.
6498 static void tracing_set_nop(struct trace_array *tr)
6500 if (tr->current_trace == &nop_trace)
6503 tr->current_trace->enabled--;
6505 if (tr->current_trace->reset)
6506 tr->current_trace->reset(tr);
6508 tr->current_trace = &nop_trace;
6511 static bool tracer_options_updated;
6513 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6515 /* Only enable if the directory has been created already. */
6519 /* Only create trace option files after update_tracer_options finish */
6520 if (!tracer_options_updated)
6523 create_trace_option_files(tr, t);
6526 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6529 #ifdef CONFIG_TRACER_MAX_TRACE
6534 mutex_lock(&trace_types_lock);
6536 if (!ring_buffer_expanded) {
6537 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6538 RING_BUFFER_ALL_CPUS);
6544 for (t = trace_types; t; t = t->next) {
6545 if (strcmp(t->name, buf) == 0)
6552 if (t == tr->current_trace)
6555 #ifdef CONFIG_TRACER_SNAPSHOT
6556 if (t->use_max_tr) {
6557 local_irq_disable();
6558 arch_spin_lock(&tr->max_lock);
6559 if (tr->cond_snapshot)
6561 arch_spin_unlock(&tr->max_lock);
6567 /* Some tracers won't work on kernel command line */
6568 if (system_state < SYSTEM_RUNNING && t->noboot) {
6569 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6574 /* Some tracers are only allowed for the top level buffer */
6575 if (!trace_ok_for_array(t, tr)) {
6580 /* If trace pipe files are being read, we can't change the tracer */
6581 if (tr->trace_ref) {
6586 trace_branch_disable();
6588 tr->current_trace->enabled--;
6590 if (tr->current_trace->reset)
6591 tr->current_trace->reset(tr);
6593 #ifdef CONFIG_TRACER_MAX_TRACE
6594 had_max_tr = tr->current_trace->use_max_tr;
6596 /* Current trace needs to be nop_trace before synchronize_rcu */
6597 tr->current_trace = &nop_trace;
6599 if (had_max_tr && !t->use_max_tr) {
6601 * We need to make sure that the update_max_tr sees that
6602 * current_trace changed to nop_trace to keep it from
6603 * swapping the buffers after we resize it.
6604 * The update_max_tr is called from interrupts disabled
6605 * so a synchronized_sched() is sufficient.
6611 if (t->use_max_tr && !tr->allocated_snapshot) {
6612 ret = tracing_alloc_snapshot_instance(tr);
6617 tr->current_trace = &nop_trace;
6621 ret = tracer_init(t, tr);
6626 tr->current_trace = t;
6627 tr->current_trace->enabled++;
6628 trace_branch_enable(tr);
6630 mutex_unlock(&trace_types_lock);
6636 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6637 size_t cnt, loff_t *ppos)
6639 struct trace_array *tr = filp->private_data;
6640 char buf[MAX_TRACER_SIZE+1];
6647 if (cnt > MAX_TRACER_SIZE)
6648 cnt = MAX_TRACER_SIZE;
6650 if (copy_from_user(buf, ubuf, cnt))
6657 err = tracing_set_tracer(tr, name);
6667 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6668 size_t cnt, loff_t *ppos)
6673 r = snprintf(buf, sizeof(buf), "%ld\n",
6674 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6675 if (r > sizeof(buf))
6677 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6681 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6682 size_t cnt, loff_t *ppos)
6687 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6697 tracing_thresh_read(struct file *filp, char __user *ubuf,
6698 size_t cnt, loff_t *ppos)
6700 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6704 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6705 size_t cnt, loff_t *ppos)
6707 struct trace_array *tr = filp->private_data;
6710 mutex_lock(&trace_types_lock);
6711 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6715 if (tr->current_trace->update_thresh) {
6716 ret = tr->current_trace->update_thresh(tr);
6723 mutex_unlock(&trace_types_lock);
6728 #ifdef CONFIG_TRACER_MAX_TRACE
6731 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6732 size_t cnt, loff_t *ppos)
6734 struct trace_array *tr = filp->private_data;
6736 return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
6740 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6741 size_t cnt, loff_t *ppos)
6743 struct trace_array *tr = filp->private_data;
6745 return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
6750 static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
6752 if (cpu == RING_BUFFER_ALL_CPUS) {
6753 if (cpumask_empty(tr->pipe_cpumask)) {
6754 cpumask_setall(tr->pipe_cpumask);
6757 } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
6758 cpumask_set_cpu(cpu, tr->pipe_cpumask);
6764 static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
6766 if (cpu == RING_BUFFER_ALL_CPUS) {
6767 WARN_ON(!cpumask_full(tr->pipe_cpumask));
6768 cpumask_clear(tr->pipe_cpumask);
6770 WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
6771 cpumask_clear_cpu(cpu, tr->pipe_cpumask);
6775 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6777 struct trace_array *tr = inode->i_private;
6778 struct trace_iterator *iter;
6782 ret = tracing_check_open_get_tr(tr);
6786 mutex_lock(&trace_types_lock);
6787 cpu = tracing_get_cpu(inode);
6788 ret = open_pipe_on_cpu(tr, cpu);
6790 goto fail_pipe_on_cpu;
6792 /* create a buffer to store the information to pass to userspace */
6793 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6796 goto fail_alloc_iter;
6799 trace_seq_init(&iter->seq);
6800 iter->trace = tr->current_trace;
6802 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6807 /* trace pipe does not show start of buffer */
6808 cpumask_setall(iter->started);
6810 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6811 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6813 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6814 if (trace_clocks[tr->clock_id].in_ns)
6815 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6818 iter->array_buffer = &tr->array_buffer;
6819 iter->cpu_file = cpu;
6820 mutex_init(&iter->mutex);
6821 filp->private_data = iter;
6823 if (iter->trace->pipe_open)
6824 iter->trace->pipe_open(iter);
6826 nonseekable_open(inode, filp);
6830 mutex_unlock(&trace_types_lock);
6836 close_pipe_on_cpu(tr, cpu);
6838 __trace_array_put(tr);
6839 mutex_unlock(&trace_types_lock);
6843 static int tracing_release_pipe(struct inode *inode, struct file *file)
6845 struct trace_iterator *iter = file->private_data;
6846 struct trace_array *tr = inode->i_private;
6848 mutex_lock(&trace_types_lock);
6852 if (iter->trace->pipe_close)
6853 iter->trace->pipe_close(iter);
6854 close_pipe_on_cpu(tr, iter->cpu_file);
6855 mutex_unlock(&trace_types_lock);
6857 free_trace_iter_content(iter);
6860 trace_array_put(tr);
6866 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6868 struct trace_array *tr = iter->tr;
6870 /* Iterators are static, they should be filled or empty */
6871 if (trace_buffer_iter(iter, iter->cpu_file))
6872 return EPOLLIN | EPOLLRDNORM;
6874 if (tr->trace_flags & TRACE_ITER_BLOCK)
6876 * Always select as readable when in blocking mode
6878 return EPOLLIN | EPOLLRDNORM;
6880 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6881 filp, poll_table, iter->tr->buffer_percent);
6885 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6887 struct trace_iterator *iter = filp->private_data;
6889 return trace_poll(iter, filp, poll_table);
6892 /* Must be called with iter->mutex held. */
6893 static int tracing_wait_pipe(struct file *filp)
6895 struct trace_iterator *iter = filp->private_data;
6898 while (trace_empty(iter)) {
6900 if ((filp->f_flags & O_NONBLOCK)) {
6905 * We block until we read something and tracing is disabled.
6906 * We still block if tracing is disabled, but we have never
6907 * read anything. This allows a user to cat this file, and
6908 * then enable tracing. But after we have read something,
6909 * we give an EOF when tracing is again disabled.
6911 * iter->pos will be 0 if we haven't read anything.
6913 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6916 mutex_unlock(&iter->mutex);
6918 ret = wait_on_pipe(iter, 0);
6920 mutex_lock(&iter->mutex);
6933 tracing_read_pipe(struct file *filp, char __user *ubuf,
6934 size_t cnt, loff_t *ppos)
6936 struct trace_iterator *iter = filp->private_data;
6940 * Avoid more than one consumer on a single file descriptor
6941 * This is just a matter of traces coherency, the ring buffer itself
6944 mutex_lock(&iter->mutex);
6946 /* return any leftover data */
6947 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6951 trace_seq_init(&iter->seq);
6953 if (iter->trace->read) {
6954 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6960 sret = tracing_wait_pipe(filp);
6964 /* stop when tracing is finished */
6965 if (trace_empty(iter)) {
6970 if (cnt >= PAGE_SIZE)
6971 cnt = PAGE_SIZE - 1;
6973 /* reset all but tr, trace, and overruns */
6974 trace_iterator_reset(iter);
6975 cpumask_clear(iter->started);
6976 trace_seq_init(&iter->seq);
6978 trace_event_read_lock();
6979 trace_access_lock(iter->cpu_file);
6980 while (trace_find_next_entry_inc(iter) != NULL) {
6981 enum print_line_t ret;
6982 int save_len = iter->seq.seq.len;
6984 ret = print_trace_line(iter);
6985 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6987 * If one print_trace_line() fills entire trace_seq in one shot,
6988 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6989 * In this case, we need to consume it, otherwise, loop will peek
6990 * this event next time, resulting in an infinite loop.
6992 if (save_len == 0) {
6994 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6995 trace_consume(iter);
6999 /* In other cases, don't print partial lines */
7000 iter->seq.seq.len = save_len;
7003 if (ret != TRACE_TYPE_NO_CONSUME)
7004 trace_consume(iter);
7006 if (trace_seq_used(&iter->seq) >= cnt)
7010 * Setting the full flag means we reached the trace_seq buffer
7011 * size and we should leave by partial output condition above.
7012 * One of the trace_seq_* functions is not used properly.
7014 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
7017 trace_access_unlock(iter->cpu_file);
7018 trace_event_read_unlock();
7020 /* Now copy what we have to the user */
7021 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
7022 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
7023 trace_seq_init(&iter->seq);
7026 * If there was nothing to send to user, in spite of consuming trace
7027 * entries, go back to wait for more entries.
7033 mutex_unlock(&iter->mutex);
7038 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
7041 __free_page(spd->pages[idx]);
7045 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
7051 /* Seq buffer is page-sized, exactly what we need. */
7053 save_len = iter->seq.seq.len;
7054 ret = print_trace_line(iter);
7056 if (trace_seq_has_overflowed(&iter->seq)) {
7057 iter->seq.seq.len = save_len;
7062 * This should not be hit, because it should only
7063 * be set if the iter->seq overflowed. But check it
7064 * anyway to be safe.
7066 if (ret == TRACE_TYPE_PARTIAL_LINE) {
7067 iter->seq.seq.len = save_len;
7071 count = trace_seq_used(&iter->seq) - save_len;
7074 iter->seq.seq.len = save_len;
7078 if (ret != TRACE_TYPE_NO_CONSUME)
7079 trace_consume(iter);
7081 if (!trace_find_next_entry_inc(iter)) {
7091 static ssize_t tracing_splice_read_pipe(struct file *filp,
7093 struct pipe_inode_info *pipe,
7097 struct page *pages_def[PIPE_DEF_BUFFERS];
7098 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7099 struct trace_iterator *iter = filp->private_data;
7100 struct splice_pipe_desc spd = {
7102 .partial = partial_def,
7103 .nr_pages = 0, /* This gets updated below. */
7104 .nr_pages_max = PIPE_DEF_BUFFERS,
7105 .ops = &default_pipe_buf_ops,
7106 .spd_release = tracing_spd_release_pipe,
7112 if (splice_grow_spd(pipe, &spd))
7115 mutex_lock(&iter->mutex);
7117 if (iter->trace->splice_read) {
7118 ret = iter->trace->splice_read(iter, filp,
7119 ppos, pipe, len, flags);
7124 ret = tracing_wait_pipe(filp);
7128 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
7133 trace_event_read_lock();
7134 trace_access_lock(iter->cpu_file);
7136 /* Fill as many pages as possible. */
7137 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
7138 spd.pages[i] = alloc_page(GFP_KERNEL);
7142 rem = tracing_fill_pipe_page(rem, iter);
7144 /* Copy the data into the page, so we can start over. */
7145 ret = trace_seq_to_buffer(&iter->seq,
7146 page_address(spd.pages[i]),
7147 trace_seq_used(&iter->seq));
7149 __free_page(spd.pages[i]);
7152 spd.partial[i].offset = 0;
7153 spd.partial[i].len = trace_seq_used(&iter->seq);
7155 trace_seq_init(&iter->seq);
7158 trace_access_unlock(iter->cpu_file);
7159 trace_event_read_unlock();
7160 mutex_unlock(&iter->mutex);
7165 ret = splice_to_pipe(pipe, &spd);
7169 splice_shrink_spd(&spd);
7173 mutex_unlock(&iter->mutex);
7178 tracing_entries_read(struct file *filp, char __user *ubuf,
7179 size_t cnt, loff_t *ppos)
7181 struct inode *inode = file_inode(filp);
7182 struct trace_array *tr = inode->i_private;
7183 int cpu = tracing_get_cpu(inode);
7188 mutex_lock(&trace_types_lock);
7190 if (cpu == RING_BUFFER_ALL_CPUS) {
7191 int cpu, buf_size_same;
7196 /* check if all cpu sizes are same */
7197 for_each_tracing_cpu(cpu) {
7198 /* fill in the size from first enabled cpu */
7200 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
7201 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
7207 if (buf_size_same) {
7208 if (!ring_buffer_expanded)
7209 r = sprintf(buf, "%lu (expanded: %lu)\n",
7211 trace_buf_size >> 10);
7213 r = sprintf(buf, "%lu\n", size >> 10);
7215 r = sprintf(buf, "X\n");
7217 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
7219 mutex_unlock(&trace_types_lock);
7221 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7226 tracing_entries_write(struct file *filp, const char __user *ubuf,
7227 size_t cnt, loff_t *ppos)
7229 struct inode *inode = file_inode(filp);
7230 struct trace_array *tr = inode->i_private;
7234 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7238 /* must have at least 1 entry */
7242 /* value is in KB */
7244 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
7254 tracing_total_entries_read(struct file *filp, char __user *ubuf,
7255 size_t cnt, loff_t *ppos)
7257 struct trace_array *tr = filp->private_data;
7260 unsigned long size = 0, expanded_size = 0;
7262 mutex_lock(&trace_types_lock);
7263 for_each_tracing_cpu(cpu) {
7264 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7265 if (!ring_buffer_expanded)
7266 expanded_size += trace_buf_size >> 10;
7268 if (ring_buffer_expanded)
7269 r = sprintf(buf, "%lu\n", size);
7271 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
7272 mutex_unlock(&trace_types_lock);
7274 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7278 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7279 size_t cnt, loff_t *ppos)
7282 * There is no need to read what the user has written, this function
7283 * is just to make sure that there is no error when "echo" is used
7292 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7294 struct trace_array *tr = inode->i_private;
7296 /* disable tracing ? */
7297 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7298 tracer_tracing_off(tr);
7299 /* resize the ring buffer to 0 */
7300 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7302 trace_array_put(tr);
7308 tracing_mark_write(struct file *filp, const char __user *ubuf,
7309 size_t cnt, loff_t *fpos)
7311 struct trace_array *tr = filp->private_data;
7312 struct ring_buffer_event *event;
7313 enum event_trigger_type tt = ETT_NONE;
7314 struct trace_buffer *buffer;
7315 struct print_entry *entry;
7320 /* Used in tracing_mark_raw_write() as well */
7321 #define FAULTED_STR "<faulted>"
7322 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7324 if (tracing_disabled)
7327 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7330 if (cnt > TRACE_BUF_SIZE)
7331 cnt = TRACE_BUF_SIZE;
7333 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7335 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7337 /* If less than "<faulted>", then make sure we can still add that */
7338 if (cnt < FAULTED_SIZE)
7339 size += FAULTED_SIZE - cnt;
7341 buffer = tr->array_buffer.buffer;
7342 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7344 if (unlikely(!event))
7345 /* Ring buffer disabled, return as if not open for write */
7348 entry = ring_buffer_event_data(event);
7349 entry->ip = _THIS_IP_;
7351 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7353 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7359 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7360 /* do not add \n before testing triggers, but add \0 */
7361 entry->buf[cnt] = '\0';
7362 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7365 if (entry->buf[cnt - 1] != '\n') {
7366 entry->buf[cnt] = '\n';
7367 entry->buf[cnt + 1] = '\0';
7369 entry->buf[cnt] = '\0';
7371 if (static_branch_unlikely(&trace_marker_exports_enabled))
7372 ftrace_exports(event, TRACE_EXPORT_MARKER);
7373 __buffer_unlock_commit(buffer, event);
7376 event_triggers_post_call(tr->trace_marker_file, tt);
7381 /* Limit it for now to 3K (including tag) */
7382 #define RAW_DATA_MAX_SIZE (1024*3)
7385 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7386 size_t cnt, loff_t *fpos)
7388 struct trace_array *tr = filp->private_data;
7389 struct ring_buffer_event *event;
7390 struct trace_buffer *buffer;
7391 struct raw_data_entry *entry;
7396 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7398 if (tracing_disabled)
7401 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7404 /* The marker must at least have a tag id */
7405 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7408 if (cnt > TRACE_BUF_SIZE)
7409 cnt = TRACE_BUF_SIZE;
7411 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7413 size = sizeof(*entry) + cnt;
7414 if (cnt < FAULT_SIZE_ID)
7415 size += FAULT_SIZE_ID - cnt;
7417 buffer = tr->array_buffer.buffer;
7418 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7421 /* Ring buffer disabled, return as if not open for write */
7424 entry = ring_buffer_event_data(event);
7426 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7429 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7434 __buffer_unlock_commit(buffer, event);
7439 static int tracing_clock_show(struct seq_file *m, void *v)
7441 struct trace_array *tr = m->private;
7444 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7446 "%s%s%s%s", i ? " " : "",
7447 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7448 i == tr->clock_id ? "]" : "");
7454 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7458 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7459 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7462 if (i == ARRAY_SIZE(trace_clocks))
7465 mutex_lock(&trace_types_lock);
7469 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7472 * New clock may not be consistent with the previous clock.
7473 * Reset the buffer so that it doesn't have incomparable timestamps.
7475 tracing_reset_online_cpus(&tr->array_buffer);
7477 #ifdef CONFIG_TRACER_MAX_TRACE
7478 if (tr->max_buffer.buffer)
7479 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7480 tracing_reset_online_cpus(&tr->max_buffer);
7483 mutex_unlock(&trace_types_lock);
7488 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7489 size_t cnt, loff_t *fpos)
7491 struct seq_file *m = filp->private_data;
7492 struct trace_array *tr = m->private;
7494 const char *clockstr;
7497 if (cnt >= sizeof(buf))
7500 if (copy_from_user(buf, ubuf, cnt))
7505 clockstr = strstrip(buf);
7507 ret = tracing_set_clock(tr, clockstr);
7516 static int tracing_clock_open(struct inode *inode, struct file *file)
7518 struct trace_array *tr = inode->i_private;
7521 ret = tracing_check_open_get_tr(tr);
7525 ret = single_open(file, tracing_clock_show, inode->i_private);
7527 trace_array_put(tr);
7532 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7534 struct trace_array *tr = m->private;
7536 mutex_lock(&trace_types_lock);
7538 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7539 seq_puts(m, "delta [absolute]\n");
7541 seq_puts(m, "[delta] absolute\n");
7543 mutex_unlock(&trace_types_lock);
7548 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7550 struct trace_array *tr = inode->i_private;
7553 ret = tracing_check_open_get_tr(tr);
7557 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7559 trace_array_put(tr);
7564 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7566 if (rbe == this_cpu_read(trace_buffered_event))
7567 return ring_buffer_time_stamp(buffer);
7569 return ring_buffer_event_time_stamp(buffer, rbe);
7573 * Set or disable using the per CPU trace_buffer_event when possible.
7575 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7579 mutex_lock(&trace_types_lock);
7581 if (set && tr->no_filter_buffering_ref++)
7585 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7590 --tr->no_filter_buffering_ref;
7593 mutex_unlock(&trace_types_lock);
7598 struct ftrace_buffer_info {
7599 struct trace_iterator iter;
7601 unsigned int spare_cpu;
7605 #ifdef CONFIG_TRACER_SNAPSHOT
7606 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7608 struct trace_array *tr = inode->i_private;
7609 struct trace_iterator *iter;
7613 ret = tracing_check_open_get_tr(tr);
7617 if (file->f_mode & FMODE_READ) {
7618 iter = __tracing_open(inode, file, true);
7620 ret = PTR_ERR(iter);
7622 /* Writes still need the seq_file to hold the private data */
7624 m = kzalloc(sizeof(*m), GFP_KERNEL);
7627 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7635 iter->array_buffer = &tr->max_buffer;
7636 iter->cpu_file = tracing_get_cpu(inode);
7638 file->private_data = m;
7642 trace_array_put(tr);
7647 static void tracing_swap_cpu_buffer(void *tr)
7649 update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
7653 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7656 struct seq_file *m = filp->private_data;
7657 struct trace_iterator *iter = m->private;
7658 struct trace_array *tr = iter->tr;
7662 ret = tracing_update_buffers();
7666 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7670 mutex_lock(&trace_types_lock);
7672 if (tr->current_trace->use_max_tr) {
7677 local_irq_disable();
7678 arch_spin_lock(&tr->max_lock);
7679 if (tr->cond_snapshot)
7681 arch_spin_unlock(&tr->max_lock);
7688 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7692 if (tr->allocated_snapshot)
7696 /* Only allow per-cpu swap if the ring buffer supports it */
7697 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7698 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7703 if (tr->allocated_snapshot)
7704 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7705 &tr->array_buffer, iter->cpu_file);
7707 ret = tracing_alloc_snapshot_instance(tr);
7710 /* Now, we're going to swap */
7711 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
7712 local_irq_disable();
7713 update_max_tr(tr, current, smp_processor_id(), NULL);
7716 smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
7721 if (tr->allocated_snapshot) {
7722 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7723 tracing_reset_online_cpus(&tr->max_buffer);
7725 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7735 mutex_unlock(&trace_types_lock);
7739 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7741 struct seq_file *m = file->private_data;
7744 ret = tracing_release(inode, file);
7746 if (file->f_mode & FMODE_READ)
7749 /* If write only, the seq_file is just a stub */
7757 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7758 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7759 size_t count, loff_t *ppos);
7760 static int tracing_buffers_release(struct inode *inode, struct file *file);
7761 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7762 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7764 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7766 struct ftrace_buffer_info *info;
7769 /* The following checks for tracefs lockdown */
7770 ret = tracing_buffers_open(inode, filp);
7774 info = filp->private_data;
7776 if (info->iter.trace->use_max_tr) {
7777 tracing_buffers_release(inode, filp);
7781 info->iter.snapshot = true;
7782 info->iter.array_buffer = &info->iter.tr->max_buffer;
7787 #endif /* CONFIG_TRACER_SNAPSHOT */
7790 static const struct file_operations tracing_thresh_fops = {
7791 .open = tracing_open_generic,
7792 .read = tracing_thresh_read,
7793 .write = tracing_thresh_write,
7794 .llseek = generic_file_llseek,
7797 #ifdef CONFIG_TRACER_MAX_TRACE
7798 static const struct file_operations tracing_max_lat_fops = {
7799 .open = tracing_open_generic_tr,
7800 .read = tracing_max_lat_read,
7801 .write = tracing_max_lat_write,
7802 .llseek = generic_file_llseek,
7803 .release = tracing_release_generic_tr,
7807 static const struct file_operations set_tracer_fops = {
7808 .open = tracing_open_generic_tr,
7809 .read = tracing_set_trace_read,
7810 .write = tracing_set_trace_write,
7811 .llseek = generic_file_llseek,
7812 .release = tracing_release_generic_tr,
7815 static const struct file_operations tracing_pipe_fops = {
7816 .open = tracing_open_pipe,
7817 .poll = tracing_poll_pipe,
7818 .read = tracing_read_pipe,
7819 .splice_read = tracing_splice_read_pipe,
7820 .release = tracing_release_pipe,
7821 .llseek = no_llseek,
7824 static const struct file_operations tracing_entries_fops = {
7825 .open = tracing_open_generic_tr,
7826 .read = tracing_entries_read,
7827 .write = tracing_entries_write,
7828 .llseek = generic_file_llseek,
7829 .release = tracing_release_generic_tr,
7832 static const struct file_operations tracing_total_entries_fops = {
7833 .open = tracing_open_generic_tr,
7834 .read = tracing_total_entries_read,
7835 .llseek = generic_file_llseek,
7836 .release = tracing_release_generic_tr,
7839 static const struct file_operations tracing_free_buffer_fops = {
7840 .open = tracing_open_generic_tr,
7841 .write = tracing_free_buffer_write,
7842 .release = tracing_free_buffer_release,
7845 static const struct file_operations tracing_mark_fops = {
7846 .open = tracing_mark_open,
7847 .write = tracing_mark_write,
7848 .release = tracing_release_generic_tr,
7851 static const struct file_operations tracing_mark_raw_fops = {
7852 .open = tracing_mark_open,
7853 .write = tracing_mark_raw_write,
7854 .release = tracing_release_generic_tr,
7857 static const struct file_operations trace_clock_fops = {
7858 .open = tracing_clock_open,
7860 .llseek = seq_lseek,
7861 .release = tracing_single_release_tr,
7862 .write = tracing_clock_write,
7865 static const struct file_operations trace_time_stamp_mode_fops = {
7866 .open = tracing_time_stamp_mode_open,
7868 .llseek = seq_lseek,
7869 .release = tracing_single_release_tr,
7872 #ifdef CONFIG_TRACER_SNAPSHOT
7873 static const struct file_operations snapshot_fops = {
7874 .open = tracing_snapshot_open,
7876 .write = tracing_snapshot_write,
7877 .llseek = tracing_lseek,
7878 .release = tracing_snapshot_release,
7881 static const struct file_operations snapshot_raw_fops = {
7882 .open = snapshot_raw_open,
7883 .read = tracing_buffers_read,
7884 .release = tracing_buffers_release,
7885 .splice_read = tracing_buffers_splice_read,
7886 .llseek = no_llseek,
7889 #endif /* CONFIG_TRACER_SNAPSHOT */
7892 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7893 * @filp: The active open file structure
7894 * @ubuf: The userspace provided buffer to read value into
7895 * @cnt: The maximum number of bytes to read
7896 * @ppos: The current "file" position
7898 * This function implements the write interface for a struct trace_min_max_param.
7899 * The filp->private_data must point to a trace_min_max_param structure that
7900 * defines where to write the value, the min and the max acceptable values,
7901 * and a lock to protect the write.
7904 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7906 struct trace_min_max_param *param = filp->private_data;
7913 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7918 mutex_lock(param->lock);
7920 if (param->min && val < *param->min)
7923 if (param->max && val > *param->max)
7930 mutex_unlock(param->lock);
7939 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7940 * @filp: The active open file structure
7941 * @ubuf: The userspace provided buffer to read value into
7942 * @cnt: The maximum number of bytes to read
7943 * @ppos: The current "file" position
7945 * This function implements the read interface for a struct trace_min_max_param.
7946 * The filp->private_data must point to a trace_min_max_param struct with valid
7950 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7952 struct trace_min_max_param *param = filp->private_data;
7953 char buf[U64_STR_SIZE];
7962 if (cnt > sizeof(buf))
7965 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7967 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7970 const struct file_operations trace_min_max_fops = {
7971 .open = tracing_open_generic,
7972 .read = trace_min_max_read,
7973 .write = trace_min_max_write,
7976 #define TRACING_LOG_ERRS_MAX 8
7977 #define TRACING_LOG_LOC_MAX 128
7979 #define CMD_PREFIX " Command: "
7982 const char **errs; /* ptr to loc-specific array of err strings */
7983 u8 type; /* index into errs -> specific err string */
7984 u16 pos; /* caret position */
7988 struct tracing_log_err {
7989 struct list_head list;
7990 struct err_info info;
7991 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7992 char *cmd; /* what caused err */
7995 static DEFINE_MUTEX(tracing_err_log_lock);
7997 static struct tracing_log_err *alloc_tracing_log_err(int len)
7999 struct tracing_log_err *err;
8001 err = kzalloc(sizeof(*err), GFP_KERNEL);
8003 return ERR_PTR(-ENOMEM);
8005 err->cmd = kzalloc(len, GFP_KERNEL);
8008 return ERR_PTR(-ENOMEM);
8014 static void free_tracing_log_err(struct tracing_log_err *err)
8020 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
8023 struct tracing_log_err *err;
8026 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
8027 err = alloc_tracing_log_err(len);
8028 if (PTR_ERR(err) != -ENOMEM)
8029 tr->n_err_log_entries++;
8033 cmd = kzalloc(len, GFP_KERNEL);
8035 return ERR_PTR(-ENOMEM);
8036 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
8039 list_del(&err->list);
8045 * err_pos - find the position of a string within a command for error careting
8046 * @cmd: The tracing command that caused the error
8047 * @str: The string to position the caret at within @cmd
8049 * Finds the position of the first occurrence of @str within @cmd. The
8050 * return value can be passed to tracing_log_err() for caret placement
8053 * Returns the index within @cmd of the first occurrence of @str or 0
8054 * if @str was not found.
8056 unsigned int err_pos(char *cmd, const char *str)
8060 if (WARN_ON(!strlen(cmd)))
8063 found = strstr(cmd, str);
8071 * tracing_log_err - write an error to the tracing error log
8072 * @tr: The associated trace array for the error (NULL for top level array)
8073 * @loc: A string describing where the error occurred
8074 * @cmd: The tracing command that caused the error
8075 * @errs: The array of loc-specific static error strings
8076 * @type: The index into errs[], which produces the specific static err string
8077 * @pos: The position the caret should be placed in the cmd
8079 * Writes an error into tracing/error_log of the form:
8081 * <loc>: error: <text>
8085 * tracing/error_log is a small log file containing the last
8086 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
8087 * unless there has been a tracing error, and the error log can be
8088 * cleared and have its memory freed by writing the empty string in
8089 * truncation mode to it i.e. echo > tracing/error_log.
8091 * NOTE: the @errs array along with the @type param are used to
8092 * produce a static error string - this string is not copied and saved
8093 * when the error is logged - only a pointer to it is saved. See
8094 * existing callers for examples of how static strings are typically
8095 * defined for use with tracing_log_err().
8097 void tracing_log_err(struct trace_array *tr,
8098 const char *loc, const char *cmd,
8099 const char **errs, u8 type, u16 pos)
8101 struct tracing_log_err *err;
8107 len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
8109 mutex_lock(&tracing_err_log_lock);
8110 err = get_tracing_log_err(tr, len);
8111 if (PTR_ERR(err) == -ENOMEM) {
8112 mutex_unlock(&tracing_err_log_lock);
8116 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
8117 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
8119 err->info.errs = errs;
8120 err->info.type = type;
8121 err->info.pos = pos;
8122 err->info.ts = local_clock();
8124 list_add_tail(&err->list, &tr->err_log);
8125 mutex_unlock(&tracing_err_log_lock);
8128 static void clear_tracing_err_log(struct trace_array *tr)
8130 struct tracing_log_err *err, *next;
8132 mutex_lock(&tracing_err_log_lock);
8133 list_for_each_entry_safe(err, next, &tr->err_log, list) {
8134 list_del(&err->list);
8135 free_tracing_log_err(err);
8138 tr->n_err_log_entries = 0;
8139 mutex_unlock(&tracing_err_log_lock);
8142 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
8144 struct trace_array *tr = m->private;
8146 mutex_lock(&tracing_err_log_lock);
8148 return seq_list_start(&tr->err_log, *pos);
8151 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
8153 struct trace_array *tr = m->private;
8155 return seq_list_next(v, &tr->err_log, pos);
8158 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
8160 mutex_unlock(&tracing_err_log_lock);
8163 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
8167 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
8169 for (i = 0; i < pos; i++)
8174 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
8176 struct tracing_log_err *err = v;
8179 const char *err_text = err->info.errs[err->info.type];
8180 u64 sec = err->info.ts;
8183 nsec = do_div(sec, NSEC_PER_SEC);
8184 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
8185 err->loc, err_text);
8186 seq_printf(m, "%s", err->cmd);
8187 tracing_err_log_show_pos(m, err->info.pos);
8193 static const struct seq_operations tracing_err_log_seq_ops = {
8194 .start = tracing_err_log_seq_start,
8195 .next = tracing_err_log_seq_next,
8196 .stop = tracing_err_log_seq_stop,
8197 .show = tracing_err_log_seq_show
8200 static int tracing_err_log_open(struct inode *inode, struct file *file)
8202 struct trace_array *tr = inode->i_private;
8205 ret = tracing_check_open_get_tr(tr);
8209 /* If this file was opened for write, then erase contents */
8210 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
8211 clear_tracing_err_log(tr);
8213 if (file->f_mode & FMODE_READ) {
8214 ret = seq_open(file, &tracing_err_log_seq_ops);
8216 struct seq_file *m = file->private_data;
8219 trace_array_put(tr);
8225 static ssize_t tracing_err_log_write(struct file *file,
8226 const char __user *buffer,
8227 size_t count, loff_t *ppos)
8232 static int tracing_err_log_release(struct inode *inode, struct file *file)
8234 struct trace_array *tr = inode->i_private;
8236 trace_array_put(tr);
8238 if (file->f_mode & FMODE_READ)
8239 seq_release(inode, file);
8244 static const struct file_operations tracing_err_log_fops = {
8245 .open = tracing_err_log_open,
8246 .write = tracing_err_log_write,
8248 .llseek = tracing_lseek,
8249 .release = tracing_err_log_release,
8252 static int tracing_buffers_open(struct inode *inode, struct file *filp)
8254 struct trace_array *tr = inode->i_private;
8255 struct ftrace_buffer_info *info;
8258 ret = tracing_check_open_get_tr(tr);
8262 info = kvzalloc(sizeof(*info), GFP_KERNEL);
8264 trace_array_put(tr);
8268 mutex_lock(&trace_types_lock);
8271 info->iter.cpu_file = tracing_get_cpu(inode);
8272 info->iter.trace = tr->current_trace;
8273 info->iter.array_buffer = &tr->array_buffer;
8275 /* Force reading ring buffer for first read */
8276 info->read = (unsigned int)-1;
8278 filp->private_data = info;
8282 mutex_unlock(&trace_types_lock);
8284 ret = nonseekable_open(inode, filp);
8286 trace_array_put(tr);
8292 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8294 struct ftrace_buffer_info *info = filp->private_data;
8295 struct trace_iterator *iter = &info->iter;
8297 return trace_poll(iter, filp, poll_table);
8301 tracing_buffers_read(struct file *filp, char __user *ubuf,
8302 size_t count, loff_t *ppos)
8304 struct ftrace_buffer_info *info = filp->private_data;
8305 struct trace_iterator *iter = &info->iter;
8312 #ifdef CONFIG_TRACER_MAX_TRACE
8313 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8318 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8320 if (IS_ERR(info->spare)) {
8321 ret = PTR_ERR(info->spare);
8324 info->spare_cpu = iter->cpu_file;
8330 /* Do we have previous read data to read? */
8331 if (info->read < PAGE_SIZE)
8335 trace_access_lock(iter->cpu_file);
8336 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8340 trace_access_unlock(iter->cpu_file);
8343 if (trace_empty(iter)) {
8344 if ((filp->f_flags & O_NONBLOCK))
8347 ret = wait_on_pipe(iter, 0);
8358 size = PAGE_SIZE - info->read;
8362 ret = copy_to_user(ubuf, info->spare + info->read, size);
8374 static int tracing_buffers_release(struct inode *inode, struct file *file)
8376 struct ftrace_buffer_info *info = file->private_data;
8377 struct trace_iterator *iter = &info->iter;
8379 mutex_lock(&trace_types_lock);
8381 iter->tr->trace_ref--;
8383 __trace_array_put(iter->tr);
8386 /* Make sure the waiters see the new wait_index */
8389 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8392 ring_buffer_free_read_page(iter->array_buffer->buffer,
8393 info->spare_cpu, info->spare);
8396 mutex_unlock(&trace_types_lock);
8402 struct trace_buffer *buffer;
8405 refcount_t refcount;
8408 static void buffer_ref_release(struct buffer_ref *ref)
8410 if (!refcount_dec_and_test(&ref->refcount))
8412 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8416 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8417 struct pipe_buffer *buf)
8419 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8421 buffer_ref_release(ref);
8425 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8426 struct pipe_buffer *buf)
8428 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8430 if (refcount_read(&ref->refcount) > INT_MAX/2)
8433 refcount_inc(&ref->refcount);
8437 /* Pipe buffer operations for a buffer. */
8438 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8439 .release = buffer_pipe_buf_release,
8440 .get = buffer_pipe_buf_get,
8444 * Callback from splice_to_pipe(), if we need to release some pages
8445 * at the end of the spd in case we error'ed out in filling the pipe.
8447 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8449 struct buffer_ref *ref =
8450 (struct buffer_ref *)spd->partial[i].private;
8452 buffer_ref_release(ref);
8453 spd->partial[i].private = 0;
8457 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8458 struct pipe_inode_info *pipe, size_t len,
8461 struct ftrace_buffer_info *info = file->private_data;
8462 struct trace_iterator *iter = &info->iter;
8463 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8464 struct page *pages_def[PIPE_DEF_BUFFERS];
8465 struct splice_pipe_desc spd = {
8467 .partial = partial_def,
8468 .nr_pages_max = PIPE_DEF_BUFFERS,
8469 .ops = &buffer_pipe_buf_ops,
8470 .spd_release = buffer_spd_release,
8472 struct buffer_ref *ref;
8476 #ifdef CONFIG_TRACER_MAX_TRACE
8477 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8481 if (*ppos & (PAGE_SIZE - 1))
8484 if (len & (PAGE_SIZE - 1)) {
8485 if (len < PAGE_SIZE)
8490 if (splice_grow_spd(pipe, &spd))
8494 trace_access_lock(iter->cpu_file);
8495 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8497 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8501 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8507 refcount_set(&ref->refcount, 1);
8508 ref->buffer = iter->array_buffer->buffer;
8509 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8510 if (IS_ERR(ref->page)) {
8511 ret = PTR_ERR(ref->page);
8516 ref->cpu = iter->cpu_file;
8518 r = ring_buffer_read_page(ref->buffer, &ref->page,
8519 len, iter->cpu_file, 1);
8521 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8527 page = virt_to_page(ref->page);
8529 spd.pages[i] = page;
8530 spd.partial[i].len = PAGE_SIZE;
8531 spd.partial[i].offset = 0;
8532 spd.partial[i].private = (unsigned long)ref;
8536 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8539 trace_access_unlock(iter->cpu_file);
8542 /* did we read anything? */
8543 if (!spd.nr_pages) {
8550 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8553 wait_index = READ_ONCE(iter->wait_index);
8555 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8559 /* No need to wait after waking up when tracing is off */
8560 if (!tracer_tracing_is_on(iter->tr))
8563 /* Make sure we see the new wait_index */
8565 if (wait_index != iter->wait_index)
8571 ret = splice_to_pipe(pipe, &spd);
8573 splice_shrink_spd(&spd);
8578 /* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */
8579 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8581 struct ftrace_buffer_info *info = file->private_data;
8582 struct trace_iterator *iter = &info->iter;
8585 return -ENOIOCTLCMD;
8587 mutex_lock(&trace_types_lock);
8590 /* Make sure the waiters see the new wait_index */
8593 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8595 mutex_unlock(&trace_types_lock);
8599 static const struct file_operations tracing_buffers_fops = {
8600 .open = tracing_buffers_open,
8601 .read = tracing_buffers_read,
8602 .poll = tracing_buffers_poll,
8603 .release = tracing_buffers_release,
8604 .splice_read = tracing_buffers_splice_read,
8605 .unlocked_ioctl = tracing_buffers_ioctl,
8606 .llseek = no_llseek,
8610 tracing_stats_read(struct file *filp, char __user *ubuf,
8611 size_t count, loff_t *ppos)
8613 struct inode *inode = file_inode(filp);
8614 struct trace_array *tr = inode->i_private;
8615 struct array_buffer *trace_buf = &tr->array_buffer;
8616 int cpu = tracing_get_cpu(inode);
8617 struct trace_seq *s;
8619 unsigned long long t;
8620 unsigned long usec_rem;
8622 s = kmalloc(sizeof(*s), GFP_KERNEL);
8628 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8629 trace_seq_printf(s, "entries: %ld\n", cnt);
8631 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8632 trace_seq_printf(s, "overrun: %ld\n", cnt);
8634 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8635 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8637 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8638 trace_seq_printf(s, "bytes: %ld\n", cnt);
8640 if (trace_clocks[tr->clock_id].in_ns) {
8641 /* local or global for trace_clock */
8642 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8643 usec_rem = do_div(t, USEC_PER_SEC);
8644 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8647 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8648 usec_rem = do_div(t, USEC_PER_SEC);
8649 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8651 /* counter or tsc mode for trace_clock */
8652 trace_seq_printf(s, "oldest event ts: %llu\n",
8653 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8655 trace_seq_printf(s, "now ts: %llu\n",
8656 ring_buffer_time_stamp(trace_buf->buffer));
8659 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8660 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8662 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8663 trace_seq_printf(s, "read events: %ld\n", cnt);
8665 count = simple_read_from_buffer(ubuf, count, ppos,
8666 s->buffer, trace_seq_used(s));
8673 static const struct file_operations tracing_stats_fops = {
8674 .open = tracing_open_generic_tr,
8675 .read = tracing_stats_read,
8676 .llseek = generic_file_llseek,
8677 .release = tracing_release_generic_tr,
8680 #ifdef CONFIG_DYNAMIC_FTRACE
8683 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8684 size_t cnt, loff_t *ppos)
8690 /* 256 should be plenty to hold the amount needed */
8691 buf = kmalloc(256, GFP_KERNEL);
8695 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8696 ftrace_update_tot_cnt,
8697 ftrace_number_of_pages,
8698 ftrace_number_of_groups);
8700 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8705 static const struct file_operations tracing_dyn_info_fops = {
8706 .open = tracing_open_generic,
8707 .read = tracing_read_dyn_info,
8708 .llseek = generic_file_llseek,
8710 #endif /* CONFIG_DYNAMIC_FTRACE */
8712 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8714 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8715 struct trace_array *tr, struct ftrace_probe_ops *ops,
8718 tracing_snapshot_instance(tr);
8722 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8723 struct trace_array *tr, struct ftrace_probe_ops *ops,
8726 struct ftrace_func_mapper *mapper = data;
8730 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8740 tracing_snapshot_instance(tr);
8744 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8745 struct ftrace_probe_ops *ops, void *data)
8747 struct ftrace_func_mapper *mapper = data;
8750 seq_printf(m, "%ps:", (void *)ip);
8752 seq_puts(m, "snapshot");
8755 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8758 seq_printf(m, ":count=%ld\n", *count);
8760 seq_puts(m, ":unlimited\n");
8766 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8767 unsigned long ip, void *init_data, void **data)
8769 struct ftrace_func_mapper *mapper = *data;
8772 mapper = allocate_ftrace_func_mapper();
8778 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8782 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8783 unsigned long ip, void *data)
8785 struct ftrace_func_mapper *mapper = data;
8790 free_ftrace_func_mapper(mapper, NULL);
8794 ftrace_func_mapper_remove_ip(mapper, ip);
8797 static struct ftrace_probe_ops snapshot_probe_ops = {
8798 .func = ftrace_snapshot,
8799 .print = ftrace_snapshot_print,
8802 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8803 .func = ftrace_count_snapshot,
8804 .print = ftrace_snapshot_print,
8805 .init = ftrace_snapshot_init,
8806 .free = ftrace_snapshot_free,
8810 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8811 char *glob, char *cmd, char *param, int enable)
8813 struct ftrace_probe_ops *ops;
8814 void *count = (void *)-1;
8821 /* hash funcs only work with set_ftrace_filter */
8825 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8828 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8833 number = strsep(¶m, ":");
8835 if (!strlen(number))
8839 * We use the callback data field (which is a pointer)
8842 ret = kstrtoul(number, 0, (unsigned long *)&count);
8847 ret = tracing_alloc_snapshot_instance(tr);
8851 ret = register_ftrace_function_probe(glob, tr, ops, count);
8854 return ret < 0 ? ret : 0;
8857 static struct ftrace_func_command ftrace_snapshot_cmd = {
8859 .func = ftrace_trace_snapshot_callback,
8862 static __init int register_snapshot_cmd(void)
8864 return register_ftrace_command(&ftrace_snapshot_cmd);
8867 static inline __init int register_snapshot_cmd(void) { return 0; }
8868 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8870 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8872 if (WARN_ON(!tr->dir))
8873 return ERR_PTR(-ENODEV);
8875 /* Top directory uses NULL as the parent */
8876 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8879 /* All sub buffers have a descriptor */
8883 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8885 struct dentry *d_tracer;
8888 return tr->percpu_dir;
8890 d_tracer = tracing_get_dentry(tr);
8891 if (IS_ERR(d_tracer))
8894 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8896 MEM_FAIL(!tr->percpu_dir,
8897 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8899 return tr->percpu_dir;
8902 static struct dentry *
8903 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8904 void *data, long cpu, const struct file_operations *fops)
8906 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8908 if (ret) /* See tracing_get_cpu() */
8909 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8914 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8916 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8917 struct dentry *d_cpu;
8918 char cpu_dir[30]; /* 30 characters should be more than enough */
8923 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8924 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8926 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8930 /* per cpu trace_pipe */
8931 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8932 tr, cpu, &tracing_pipe_fops);
8935 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8936 tr, cpu, &tracing_fops);
8938 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8939 tr, cpu, &tracing_buffers_fops);
8941 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8942 tr, cpu, &tracing_stats_fops);
8944 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8945 tr, cpu, &tracing_entries_fops);
8947 #ifdef CONFIG_TRACER_SNAPSHOT
8948 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8949 tr, cpu, &snapshot_fops);
8951 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8952 tr, cpu, &snapshot_raw_fops);
8956 #ifdef CONFIG_FTRACE_SELFTEST
8957 /* Let selftest have access to static functions in this file */
8958 #include "trace_selftest.c"
8962 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8965 struct trace_option_dentry *topt = filp->private_data;
8968 if (topt->flags->val & topt->opt->bit)
8973 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8977 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8980 struct trace_option_dentry *topt = filp->private_data;
8984 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8988 if (val != 0 && val != 1)
8991 if (!!(topt->flags->val & topt->opt->bit) != val) {
8992 mutex_lock(&trace_types_lock);
8993 ret = __set_tracer_option(topt->tr, topt->flags,
8995 mutex_unlock(&trace_types_lock);
9005 static int tracing_open_options(struct inode *inode, struct file *filp)
9007 struct trace_option_dentry *topt = inode->i_private;
9010 ret = tracing_check_open_get_tr(topt->tr);
9014 filp->private_data = inode->i_private;
9018 static int tracing_release_options(struct inode *inode, struct file *file)
9020 struct trace_option_dentry *topt = file->private_data;
9022 trace_array_put(topt->tr);
9026 static const struct file_operations trace_options_fops = {
9027 .open = tracing_open_options,
9028 .read = trace_options_read,
9029 .write = trace_options_write,
9030 .llseek = generic_file_llseek,
9031 .release = tracing_release_options,
9035 * In order to pass in both the trace_array descriptor as well as the index
9036 * to the flag that the trace option file represents, the trace_array
9037 * has a character array of trace_flags_index[], which holds the index
9038 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
9039 * The address of this character array is passed to the flag option file
9040 * read/write callbacks.
9042 * In order to extract both the index and the trace_array descriptor,
9043 * get_tr_index() uses the following algorithm.
9047 * As the pointer itself contains the address of the index (remember
9050 * Then to get the trace_array descriptor, by subtracting that index
9051 * from the ptr, we get to the start of the index itself.
9053 * ptr - idx == &index[0]
9055 * Then a simple container_of() from that pointer gets us to the
9056 * trace_array descriptor.
9058 static void get_tr_index(void *data, struct trace_array **ptr,
9059 unsigned int *pindex)
9061 *pindex = *(unsigned char *)data;
9063 *ptr = container_of(data - *pindex, struct trace_array,
9068 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
9071 void *tr_index = filp->private_data;
9072 struct trace_array *tr;
9076 get_tr_index(tr_index, &tr, &index);
9078 if (tr->trace_flags & (1 << index))
9083 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
9087 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
9090 void *tr_index = filp->private_data;
9091 struct trace_array *tr;
9096 get_tr_index(tr_index, &tr, &index);
9098 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9102 if (val != 0 && val != 1)
9105 mutex_lock(&event_mutex);
9106 mutex_lock(&trace_types_lock);
9107 ret = set_tracer_flag(tr, 1 << index, val);
9108 mutex_unlock(&trace_types_lock);
9109 mutex_unlock(&event_mutex);
9119 static const struct file_operations trace_options_core_fops = {
9120 .open = tracing_open_generic,
9121 .read = trace_options_core_read,
9122 .write = trace_options_core_write,
9123 .llseek = generic_file_llseek,
9126 struct dentry *trace_create_file(const char *name,
9128 struct dentry *parent,
9130 const struct file_operations *fops)
9134 ret = tracefs_create_file(name, mode, parent, data, fops);
9136 pr_warn("Could not create tracefs '%s' entry\n", name);
9142 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
9144 struct dentry *d_tracer;
9149 d_tracer = tracing_get_dentry(tr);
9150 if (IS_ERR(d_tracer))
9153 tr->options = tracefs_create_dir("options", d_tracer);
9155 pr_warn("Could not create tracefs directory 'options'\n");
9163 create_trace_option_file(struct trace_array *tr,
9164 struct trace_option_dentry *topt,
9165 struct tracer_flags *flags,
9166 struct tracer_opt *opt)
9168 struct dentry *t_options;
9170 t_options = trace_options_init_dentry(tr);
9174 topt->flags = flags;
9178 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
9179 t_options, topt, &trace_options_fops);
9184 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
9186 struct trace_option_dentry *topts;
9187 struct trace_options *tr_topts;
9188 struct tracer_flags *flags;
9189 struct tracer_opt *opts;
9196 flags = tracer->flags;
9198 if (!flags || !flags->opts)
9202 * If this is an instance, only create flags for tracers
9203 * the instance may have.
9205 if (!trace_ok_for_array(tracer, tr))
9208 for (i = 0; i < tr->nr_topts; i++) {
9209 /* Make sure there's no duplicate flags. */
9210 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
9216 for (cnt = 0; opts[cnt].name; cnt++)
9219 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
9223 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
9230 tr->topts = tr_topts;
9231 tr->topts[tr->nr_topts].tracer = tracer;
9232 tr->topts[tr->nr_topts].topts = topts;
9235 for (cnt = 0; opts[cnt].name; cnt++) {
9236 create_trace_option_file(tr, &topts[cnt], flags,
9238 MEM_FAIL(topts[cnt].entry == NULL,
9239 "Failed to create trace option: %s",
9244 static struct dentry *
9245 create_trace_option_core_file(struct trace_array *tr,
9246 const char *option, long index)
9248 struct dentry *t_options;
9250 t_options = trace_options_init_dentry(tr);
9254 return trace_create_file(option, TRACE_MODE_WRITE, t_options,
9255 (void *)&tr->trace_flags_index[index],
9256 &trace_options_core_fops);
9259 static void create_trace_options_dir(struct trace_array *tr)
9261 struct dentry *t_options;
9262 bool top_level = tr == &global_trace;
9265 t_options = trace_options_init_dentry(tr);
9269 for (i = 0; trace_options[i]; i++) {
9271 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
9272 create_trace_option_core_file(tr, trace_options[i], i);
9277 rb_simple_read(struct file *filp, char __user *ubuf,
9278 size_t cnt, loff_t *ppos)
9280 struct trace_array *tr = filp->private_data;
9284 r = tracer_tracing_is_on(tr);
9285 r = sprintf(buf, "%d\n", r);
9287 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9291 rb_simple_write(struct file *filp, const char __user *ubuf,
9292 size_t cnt, loff_t *ppos)
9294 struct trace_array *tr = filp->private_data;
9295 struct trace_buffer *buffer = tr->array_buffer.buffer;
9299 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9304 mutex_lock(&trace_types_lock);
9305 if (!!val == tracer_tracing_is_on(tr)) {
9306 val = 0; /* do nothing */
9308 tracer_tracing_on(tr);
9309 if (tr->current_trace->start)
9310 tr->current_trace->start(tr);
9312 tracer_tracing_off(tr);
9313 if (tr->current_trace->stop)
9314 tr->current_trace->stop(tr);
9315 /* Wake up any waiters */
9316 ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
9318 mutex_unlock(&trace_types_lock);
9326 static const struct file_operations rb_simple_fops = {
9327 .open = tracing_open_generic_tr,
9328 .read = rb_simple_read,
9329 .write = rb_simple_write,
9330 .release = tracing_release_generic_tr,
9331 .llseek = default_llseek,
9335 buffer_percent_read(struct file *filp, char __user *ubuf,
9336 size_t cnt, loff_t *ppos)
9338 struct trace_array *tr = filp->private_data;
9342 r = tr->buffer_percent;
9343 r = sprintf(buf, "%d\n", r);
9345 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9349 buffer_percent_write(struct file *filp, const char __user *ubuf,
9350 size_t cnt, loff_t *ppos)
9352 struct trace_array *tr = filp->private_data;
9356 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9363 tr->buffer_percent = val;
9370 static const struct file_operations buffer_percent_fops = {
9371 .open = tracing_open_generic_tr,
9372 .read = buffer_percent_read,
9373 .write = buffer_percent_write,
9374 .release = tracing_release_generic_tr,
9375 .llseek = default_llseek,
9378 static struct dentry *trace_instance_dir;
9381 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9384 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9386 enum ring_buffer_flags rb_flags;
9388 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9392 buf->buffer = ring_buffer_alloc(size, rb_flags);
9396 buf->data = alloc_percpu(struct trace_array_cpu);
9398 ring_buffer_free(buf->buffer);
9403 /* Allocate the first page for all buffers */
9404 set_buffer_entries(&tr->array_buffer,
9405 ring_buffer_size(tr->array_buffer.buffer, 0));
9410 static void free_trace_buffer(struct array_buffer *buf)
9413 ring_buffer_free(buf->buffer);
9415 free_percpu(buf->data);
9420 static int allocate_trace_buffers(struct trace_array *tr, int size)
9424 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9428 #ifdef CONFIG_TRACER_MAX_TRACE
9429 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9430 allocate_snapshot ? size : 1);
9431 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9432 free_trace_buffer(&tr->array_buffer);
9435 tr->allocated_snapshot = allocate_snapshot;
9437 allocate_snapshot = false;
9443 static void free_trace_buffers(struct trace_array *tr)
9448 free_trace_buffer(&tr->array_buffer);
9450 #ifdef CONFIG_TRACER_MAX_TRACE
9451 free_trace_buffer(&tr->max_buffer);
9455 static void init_trace_flags_index(struct trace_array *tr)
9459 /* Used by the trace options files */
9460 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9461 tr->trace_flags_index[i] = i;
9464 static void __update_tracer_options(struct trace_array *tr)
9468 for (t = trace_types; t; t = t->next)
9469 add_tracer_options(tr, t);
9472 static void update_tracer_options(struct trace_array *tr)
9474 mutex_lock(&trace_types_lock);
9475 tracer_options_updated = true;
9476 __update_tracer_options(tr);
9477 mutex_unlock(&trace_types_lock);
9480 /* Must have trace_types_lock held */
9481 struct trace_array *trace_array_find(const char *instance)
9483 struct trace_array *tr, *found = NULL;
9485 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9486 if (tr->name && strcmp(tr->name, instance) == 0) {
9495 struct trace_array *trace_array_find_get(const char *instance)
9497 struct trace_array *tr;
9499 mutex_lock(&trace_types_lock);
9500 tr = trace_array_find(instance);
9503 mutex_unlock(&trace_types_lock);
9508 static int trace_array_create_dir(struct trace_array *tr)
9512 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9516 ret = event_trace_add_tracer(tr->dir, tr);
9518 tracefs_remove(tr->dir);
9522 init_tracer_tracefs(tr, tr->dir);
9523 __update_tracer_options(tr);
9528 static struct trace_array *trace_array_create(const char *name)
9530 struct trace_array *tr;
9534 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9536 return ERR_PTR(ret);
9538 tr->name = kstrdup(name, GFP_KERNEL);
9542 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9545 if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
9548 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9550 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9552 raw_spin_lock_init(&tr->start_lock);
9554 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9556 tr->current_trace = &nop_trace;
9558 INIT_LIST_HEAD(&tr->systems);
9559 INIT_LIST_HEAD(&tr->events);
9560 INIT_LIST_HEAD(&tr->hist_vars);
9561 INIT_LIST_HEAD(&tr->err_log);
9563 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9566 if (ftrace_allocate_ftrace_ops(tr) < 0)
9569 ftrace_init_trace_array(tr);
9571 init_trace_flags_index(tr);
9573 if (trace_instance_dir) {
9574 ret = trace_array_create_dir(tr);
9578 __trace_early_add_events(tr);
9580 list_add(&tr->list, &ftrace_trace_arrays);
9587 ftrace_free_ftrace_ops(tr);
9588 free_trace_buffers(tr);
9589 free_cpumask_var(tr->pipe_cpumask);
9590 free_cpumask_var(tr->tracing_cpumask);
9594 return ERR_PTR(ret);
9597 static int instance_mkdir(const char *name)
9599 struct trace_array *tr;
9602 mutex_lock(&event_mutex);
9603 mutex_lock(&trace_types_lock);
9606 if (trace_array_find(name))
9609 tr = trace_array_create(name);
9611 ret = PTR_ERR_OR_ZERO(tr);
9614 mutex_unlock(&trace_types_lock);
9615 mutex_unlock(&event_mutex);
9620 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9621 * @name: The name of the trace array to be looked up/created.
9623 * Returns pointer to trace array with given name.
9624 * NULL, if it cannot be created.
9626 * NOTE: This function increments the reference counter associated with the
9627 * trace array returned. This makes sure it cannot be freed while in use.
9628 * Use trace_array_put() once the trace array is no longer needed.
9629 * If the trace_array is to be freed, trace_array_destroy() needs to
9630 * be called after the trace_array_put(), or simply let user space delete
9631 * it from the tracefs instances directory. But until the
9632 * trace_array_put() is called, user space can not delete it.
9635 struct trace_array *trace_array_get_by_name(const char *name)
9637 struct trace_array *tr;
9639 mutex_lock(&event_mutex);
9640 mutex_lock(&trace_types_lock);
9642 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9643 if (tr->name && strcmp(tr->name, name) == 0)
9647 tr = trace_array_create(name);
9655 mutex_unlock(&trace_types_lock);
9656 mutex_unlock(&event_mutex);
9659 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9661 static int __remove_instance(struct trace_array *tr)
9665 /* Reference counter for a newly created trace array = 1. */
9666 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9669 list_del(&tr->list);
9671 /* Disable all the flags that were enabled coming in */
9672 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9673 if ((1 << i) & ZEROED_TRACE_FLAGS)
9674 set_tracer_flag(tr, 1 << i, 0);
9677 tracing_set_nop(tr);
9678 clear_ftrace_function_probes(tr);
9679 event_trace_del_tracer(tr);
9680 ftrace_clear_pids(tr);
9681 ftrace_destroy_function_files(tr);
9682 tracefs_remove(tr->dir);
9683 free_percpu(tr->last_func_repeats);
9684 free_trace_buffers(tr);
9685 clear_tracing_err_log(tr);
9687 for (i = 0; i < tr->nr_topts; i++) {
9688 kfree(tr->topts[i].topts);
9692 free_cpumask_var(tr->pipe_cpumask);
9693 free_cpumask_var(tr->tracing_cpumask);
9700 int trace_array_destroy(struct trace_array *this_tr)
9702 struct trace_array *tr;
9708 mutex_lock(&event_mutex);
9709 mutex_lock(&trace_types_lock);
9713 /* Making sure trace array exists before destroying it. */
9714 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9715 if (tr == this_tr) {
9716 ret = __remove_instance(tr);
9721 mutex_unlock(&trace_types_lock);
9722 mutex_unlock(&event_mutex);
9726 EXPORT_SYMBOL_GPL(trace_array_destroy);
9728 static int instance_rmdir(const char *name)
9730 struct trace_array *tr;
9733 mutex_lock(&event_mutex);
9734 mutex_lock(&trace_types_lock);
9737 tr = trace_array_find(name);
9739 ret = __remove_instance(tr);
9741 mutex_unlock(&trace_types_lock);
9742 mutex_unlock(&event_mutex);
9747 static __init void create_trace_instances(struct dentry *d_tracer)
9749 struct trace_array *tr;
9751 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9754 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9757 mutex_lock(&event_mutex);
9758 mutex_lock(&trace_types_lock);
9760 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9763 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9764 "Failed to create instance directory\n"))
9768 mutex_unlock(&trace_types_lock);
9769 mutex_unlock(&event_mutex);
9773 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9775 struct trace_event_file *file;
9778 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9779 tr, &show_traces_fops);
9781 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9782 tr, &set_tracer_fops);
9784 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9785 tr, &tracing_cpumask_fops);
9787 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9788 tr, &tracing_iter_fops);
9790 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9793 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9794 tr, &tracing_pipe_fops);
9796 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9797 tr, &tracing_entries_fops);
9799 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9800 tr, &tracing_total_entries_fops);
9802 trace_create_file("free_buffer", 0200, d_tracer,
9803 tr, &tracing_free_buffer_fops);
9805 trace_create_file("trace_marker", 0220, d_tracer,
9806 tr, &tracing_mark_fops);
9808 file = __find_event_file(tr, "ftrace", "print");
9809 if (file && file->ef)
9810 eventfs_add_file("trigger", TRACE_MODE_WRITE, file->ef,
9811 file, &event_trigger_fops);
9812 tr->trace_marker_file = file;
9814 trace_create_file("trace_marker_raw", 0220, d_tracer,
9815 tr, &tracing_mark_raw_fops);
9817 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9820 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9821 tr, &rb_simple_fops);
9823 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9824 &trace_time_stamp_mode_fops);
9826 tr->buffer_percent = 50;
9828 trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
9829 tr, &buffer_percent_fops);
9831 create_trace_options_dir(tr);
9833 #ifdef CONFIG_TRACER_MAX_TRACE
9834 trace_create_maxlat_file(tr, d_tracer);
9837 if (ftrace_create_function_files(tr, d_tracer))
9838 MEM_FAIL(1, "Could not allocate function filter files");
9840 #ifdef CONFIG_TRACER_SNAPSHOT
9841 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9842 tr, &snapshot_fops);
9845 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9846 tr, &tracing_err_log_fops);
9848 for_each_tracing_cpu(cpu)
9849 tracing_init_tracefs_percpu(tr, cpu);
9851 ftrace_init_tracefs(tr, d_tracer);
9854 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9856 struct vfsmount *mnt;
9857 struct file_system_type *type;
9860 * To maintain backward compatibility for tools that mount
9861 * debugfs to get to the tracing facility, tracefs is automatically
9862 * mounted to the debugfs/tracing directory.
9864 type = get_fs_type("tracefs");
9867 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9868 put_filesystem(type);
9877 * tracing_init_dentry - initialize top level trace array
9879 * This is called when creating files or directories in the tracing
9880 * directory. It is called via fs_initcall() by any of the boot up code
9881 * and expects to return the dentry of the top level tracing directory.
9883 int tracing_init_dentry(void)
9885 struct trace_array *tr = &global_trace;
9887 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9888 pr_warn("Tracing disabled due to lockdown\n");
9892 /* The top level trace array uses NULL as parent */
9896 if (WARN_ON(!tracefs_initialized()))
9900 * As there may still be users that expect the tracing
9901 * files to exist in debugfs/tracing, we must automount
9902 * the tracefs file system there, so older tools still
9903 * work with the newer kernel.
9905 tr->dir = debugfs_create_automount("tracing", NULL,
9906 trace_automount, NULL);
9911 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9912 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9914 static struct workqueue_struct *eval_map_wq __initdata;
9915 static struct work_struct eval_map_work __initdata;
9916 static struct work_struct tracerfs_init_work __initdata;
9918 static void __init eval_map_work_func(struct work_struct *work)
9922 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9923 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9926 static int __init trace_eval_init(void)
9928 INIT_WORK(&eval_map_work, eval_map_work_func);
9930 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9932 pr_err("Unable to allocate eval_map_wq\n");
9934 eval_map_work_func(&eval_map_work);
9938 queue_work(eval_map_wq, &eval_map_work);
9942 subsys_initcall(trace_eval_init);
9944 static int __init trace_eval_sync(void)
9946 /* Make sure the eval map updates are finished */
9948 destroy_workqueue(eval_map_wq);
9952 late_initcall_sync(trace_eval_sync);
9955 #ifdef CONFIG_MODULES
9956 static void trace_module_add_evals(struct module *mod)
9958 if (!mod->num_trace_evals)
9962 * Modules with bad taint do not have events created, do
9963 * not bother with enums either.
9965 if (trace_module_has_bad_taint(mod))
9968 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9971 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9972 static void trace_module_remove_evals(struct module *mod)
9974 union trace_eval_map_item *map;
9975 union trace_eval_map_item **last = &trace_eval_maps;
9977 if (!mod->num_trace_evals)
9980 mutex_lock(&trace_eval_mutex);
9982 map = trace_eval_maps;
9985 if (map->head.mod == mod)
9987 map = trace_eval_jmp_to_tail(map);
9988 last = &map->tail.next;
9989 map = map->tail.next;
9994 *last = trace_eval_jmp_to_tail(map)->tail.next;
9997 mutex_unlock(&trace_eval_mutex);
10000 static inline void trace_module_remove_evals(struct module *mod) { }
10001 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
10003 static int trace_module_notify(struct notifier_block *self,
10004 unsigned long val, void *data)
10006 struct module *mod = data;
10009 case MODULE_STATE_COMING:
10010 trace_module_add_evals(mod);
10012 case MODULE_STATE_GOING:
10013 trace_module_remove_evals(mod);
10020 static struct notifier_block trace_module_nb = {
10021 .notifier_call = trace_module_notify,
10024 #endif /* CONFIG_MODULES */
10026 static __init void tracer_init_tracefs_work_func(struct work_struct *work)
10029 event_trace_init();
10031 init_tracer_tracefs(&global_trace, NULL);
10032 ftrace_init_tracefs_toplevel(&global_trace, NULL);
10034 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
10035 &global_trace, &tracing_thresh_fops);
10037 trace_create_file("README", TRACE_MODE_READ, NULL,
10038 NULL, &tracing_readme_fops);
10040 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
10041 NULL, &tracing_saved_cmdlines_fops);
10043 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
10044 NULL, &tracing_saved_cmdlines_size_fops);
10046 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
10047 NULL, &tracing_saved_tgids_fops);
10049 trace_create_eval_file(NULL);
10051 #ifdef CONFIG_MODULES
10052 register_module_notifier(&trace_module_nb);
10055 #ifdef CONFIG_DYNAMIC_FTRACE
10056 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
10057 NULL, &tracing_dyn_info_fops);
10060 create_trace_instances(NULL);
10062 update_tracer_options(&global_trace);
10065 static __init int tracer_init_tracefs(void)
10069 trace_access_lock_init();
10071 ret = tracing_init_dentry();
10076 INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
10077 queue_work(eval_map_wq, &tracerfs_init_work);
10079 tracer_init_tracefs_work_func(NULL);
10082 rv_init_interface();
10087 fs_initcall(tracer_init_tracefs);
10089 static int trace_die_panic_handler(struct notifier_block *self,
10090 unsigned long ev, void *unused);
10092 static struct notifier_block trace_panic_notifier = {
10093 .notifier_call = trace_die_panic_handler,
10094 .priority = INT_MAX - 1,
10097 static struct notifier_block trace_die_notifier = {
10098 .notifier_call = trace_die_panic_handler,
10099 .priority = INT_MAX - 1,
10103 * The idea is to execute the following die/panic callback early, in order
10104 * to avoid showing irrelevant information in the trace (like other panic
10105 * notifier functions); we are the 2nd to run, after hung_task/rcu_stall
10106 * warnings get disabled (to prevent potential log flooding).
10108 static int trace_die_panic_handler(struct notifier_block *self,
10109 unsigned long ev, void *unused)
10111 if (!ftrace_dump_on_oops)
10112 return NOTIFY_DONE;
10114 /* The die notifier requires DIE_OOPS to trigger */
10115 if (self == &trace_die_notifier && ev != DIE_OOPS)
10116 return NOTIFY_DONE;
10118 ftrace_dump(ftrace_dump_on_oops);
10120 return NOTIFY_DONE;
10124 * printk is set to max of 1024, we really don't need it that big.
10125 * Nothing should be printing 1000 characters anyway.
10127 #define TRACE_MAX_PRINT 1000
10130 * Define here KERN_TRACE so that we have one place to modify
10131 * it if we decide to change what log level the ftrace dump
10134 #define KERN_TRACE KERN_EMERG
10137 trace_printk_seq(struct trace_seq *s)
10139 /* Probably should print a warning here. */
10140 if (s->seq.len >= TRACE_MAX_PRINT)
10141 s->seq.len = TRACE_MAX_PRINT;
10144 * More paranoid code. Although the buffer size is set to
10145 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
10146 * an extra layer of protection.
10148 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
10149 s->seq.len = s->seq.size - 1;
10151 /* should be zero ended, but we are paranoid. */
10152 s->buffer[s->seq.len] = 0;
10154 printk(KERN_TRACE "%s", s->buffer);
10159 void trace_init_global_iter(struct trace_iterator *iter)
10161 iter->tr = &global_trace;
10162 iter->trace = iter->tr->current_trace;
10163 iter->cpu_file = RING_BUFFER_ALL_CPUS;
10164 iter->array_buffer = &global_trace.array_buffer;
10166 if (iter->trace && iter->trace->open)
10167 iter->trace->open(iter);
10169 /* Annotate start of buffers if we had overruns */
10170 if (ring_buffer_overruns(iter->array_buffer->buffer))
10171 iter->iter_flags |= TRACE_FILE_ANNOTATE;
10173 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
10174 if (trace_clocks[iter->tr->clock_id].in_ns)
10175 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
10177 /* Can not use kmalloc for iter.temp and iter.fmt */
10178 iter->temp = static_temp_buf;
10179 iter->temp_size = STATIC_TEMP_BUF_SIZE;
10180 iter->fmt = static_fmt_buf;
10181 iter->fmt_size = STATIC_FMT_BUF_SIZE;
10184 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
10186 /* use static because iter can be a bit big for the stack */
10187 static struct trace_iterator iter;
10188 static atomic_t dump_running;
10189 struct trace_array *tr = &global_trace;
10190 unsigned int old_userobj;
10191 unsigned long flags;
10194 /* Only allow one dump user at a time. */
10195 if (atomic_inc_return(&dump_running) != 1) {
10196 atomic_dec(&dump_running);
10201 * Always turn off tracing when we dump.
10202 * We don't need to show trace output of what happens
10203 * between multiple crashes.
10205 * If the user does a sysrq-z, then they can re-enable
10206 * tracing with echo 1 > tracing_on.
10210 local_irq_save(flags);
10212 /* Simulate the iterator */
10213 trace_init_global_iter(&iter);
10215 for_each_tracing_cpu(cpu) {
10216 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10219 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10221 /* don't look at user memory in panic mode */
10222 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10224 switch (oops_dump_mode) {
10226 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10229 iter.cpu_file = raw_smp_processor_id();
10234 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
10235 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10238 printk(KERN_TRACE "Dumping ftrace buffer:\n");
10240 /* Did function tracer already get disabled? */
10241 if (ftrace_is_dead()) {
10242 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
10243 printk("# MAY BE MISSING FUNCTION EVENTS\n");
10247 * We need to stop all tracing on all CPUS to read
10248 * the next buffer. This is a bit expensive, but is
10249 * not done often. We fill all what we can read,
10250 * and then release the locks again.
10253 while (!trace_empty(&iter)) {
10256 printk(KERN_TRACE "---------------------------------\n");
10260 trace_iterator_reset(&iter);
10261 iter.iter_flags |= TRACE_FILE_LAT_FMT;
10263 if (trace_find_next_entry_inc(&iter) != NULL) {
10266 ret = print_trace_line(&iter);
10267 if (ret != TRACE_TYPE_NO_CONSUME)
10268 trace_consume(&iter);
10270 touch_nmi_watchdog();
10272 trace_printk_seq(&iter.seq);
10276 printk(KERN_TRACE " (ftrace buffer empty)\n");
10278 printk(KERN_TRACE "---------------------------------\n");
10281 tr->trace_flags |= old_userobj;
10283 for_each_tracing_cpu(cpu) {
10284 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10286 atomic_dec(&dump_running);
10287 local_irq_restore(flags);
10289 EXPORT_SYMBOL_GPL(ftrace_dump);
10291 #define WRITE_BUFSIZE 4096
10293 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
10294 size_t count, loff_t *ppos,
10295 int (*createfn)(const char *))
10297 char *kbuf, *buf, *tmp;
10302 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
10306 while (done < count) {
10307 size = count - done;
10309 if (size >= WRITE_BUFSIZE)
10310 size = WRITE_BUFSIZE - 1;
10312 if (copy_from_user(kbuf, buffer + done, size)) {
10319 tmp = strchr(buf, '\n');
10322 size = tmp - buf + 1;
10324 size = strlen(buf);
10325 if (done + size < count) {
10328 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10329 pr_warn("Line length is too long: Should be less than %d\n",
10330 WRITE_BUFSIZE - 2);
10337 /* Remove comments */
10338 tmp = strchr(buf, '#');
10343 ret = createfn(buf);
10348 } while (done < count);
10358 #ifdef CONFIG_TRACER_MAX_TRACE
10359 __init static bool tr_needs_alloc_snapshot(const char *name)
10362 int len = strlen(name);
10365 if (!boot_snapshot_index)
10368 if (strncmp(name, boot_snapshot_info, len) == 0 &&
10369 boot_snapshot_info[len] == '\t')
10372 test = kmalloc(strlen(name) + 3, GFP_KERNEL);
10376 sprintf(test, "\t%s\t", name);
10377 ret = strstr(boot_snapshot_info, test) == NULL;
10382 __init static void do_allocate_snapshot(const char *name)
10384 if (!tr_needs_alloc_snapshot(name))
10388 * When allocate_snapshot is set, the next call to
10389 * allocate_trace_buffers() (called by trace_array_get_by_name())
10390 * will allocate the snapshot buffer. That will alse clear
10393 allocate_snapshot = true;
10396 static inline void do_allocate_snapshot(const char *name) { }
10399 __init static void enable_instances(void)
10401 struct trace_array *tr;
10406 /* A tab is always appended */
10407 boot_instance_info[boot_instance_index - 1] = '\0';
10408 str = boot_instance_info;
10410 while ((curr_str = strsep(&str, "\t"))) {
10412 tok = strsep(&curr_str, ",");
10414 if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
10415 do_allocate_snapshot(tok);
10417 tr = trace_array_get_by_name(tok);
10419 pr_warn("Failed to create instance buffer %s\n", curr_str);
10422 /* Allow user space to delete it */
10423 trace_array_put(tr);
10425 while ((tok = strsep(&curr_str, ","))) {
10426 early_enable_events(tr, tok, true);
10431 __init static int tracer_alloc_buffers(void)
10437 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10438 pr_warn("Tracing disabled due to lockdown\n");
10443 * Make sure we don't accidentally add more trace options
10444 * than we have bits for.
10446 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10448 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10451 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10452 goto out_free_buffer_mask;
10454 /* Only allocate trace_printk buffers if a trace_printk exists */
10455 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10456 /* Must be called before global_trace.buffer is allocated */
10457 trace_printk_init_buffers();
10459 /* To save memory, keep the ring buffer size to its minimum */
10460 if (ring_buffer_expanded)
10461 ring_buf_size = trace_buf_size;
10465 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10466 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10468 raw_spin_lock_init(&global_trace.start_lock);
10471 * The prepare callbacks allocates some memory for the ring buffer. We
10472 * don't free the buffer if the CPU goes down. If we were to free
10473 * the buffer, then the user would lose any trace that was in the
10474 * buffer. The memory will be removed once the "instance" is removed.
10476 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10477 "trace/RB:prepare", trace_rb_cpu_prepare,
10480 goto out_free_cpumask;
10481 /* Used for event triggers */
10483 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10485 goto out_rm_hp_state;
10487 if (trace_create_savedcmd() < 0)
10488 goto out_free_temp_buffer;
10490 if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
10491 goto out_free_savedcmd;
10493 /* TODO: make the number of buffers hot pluggable with CPUS */
10494 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10495 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10496 goto out_free_pipe_cpumask;
10498 if (global_trace.buffer_disabled)
10501 if (trace_boot_clock) {
10502 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10504 pr_warn("Trace clock %s not defined, going back to default\n",
10509 * register_tracer() might reference current_trace, so it
10510 * needs to be set before we register anything. This is
10511 * just a bootstrap of current_trace anyway.
10513 global_trace.current_trace = &nop_trace;
10515 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10517 ftrace_init_global_array_ops(&global_trace);
10519 init_trace_flags_index(&global_trace);
10521 register_tracer(&nop_trace);
10523 /* Function tracing may start here (via kernel command line) */
10524 init_function_trace();
10526 /* All seems OK, enable tracing */
10527 tracing_disabled = 0;
10529 atomic_notifier_chain_register(&panic_notifier_list,
10530 &trace_panic_notifier);
10532 register_die_notifier(&trace_die_notifier);
10534 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10536 INIT_LIST_HEAD(&global_trace.systems);
10537 INIT_LIST_HEAD(&global_trace.events);
10538 INIT_LIST_HEAD(&global_trace.hist_vars);
10539 INIT_LIST_HEAD(&global_trace.err_log);
10540 list_add(&global_trace.list, &ftrace_trace_arrays);
10542 apply_trace_boot_options();
10544 register_snapshot_cmd();
10550 out_free_pipe_cpumask:
10551 free_cpumask_var(global_trace.pipe_cpumask);
10553 free_saved_cmdlines_buffer(savedcmd);
10554 out_free_temp_buffer:
10555 ring_buffer_free(temp_buffer);
10557 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10559 free_cpumask_var(global_trace.tracing_cpumask);
10560 out_free_buffer_mask:
10561 free_cpumask_var(tracing_buffer_mask);
10566 void __init ftrace_boot_snapshot(void)
10568 #ifdef CONFIG_TRACER_MAX_TRACE
10569 struct trace_array *tr;
10571 if (!snapshot_at_boot)
10574 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
10575 if (!tr->allocated_snapshot)
10578 tracing_snapshot_instance(tr);
10579 trace_array_puts(tr, "** Boot snapshot taken **\n");
10584 void __init early_trace_init(void)
10586 if (tracepoint_printk) {
10587 tracepoint_print_iter =
10588 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10589 if (MEM_FAIL(!tracepoint_print_iter,
10590 "Failed to allocate trace iterator\n"))
10591 tracepoint_printk = 0;
10593 static_key_enable(&tracepoint_printk_key.key);
10595 tracer_alloc_buffers();
10600 void __init trace_init(void)
10602 trace_event_init();
10604 if (boot_instance_index)
10605 enable_instances();
10608 __init static void clear_boot_tracer(void)
10611 * The default tracer at boot buffer is an init section.
10612 * This function is called in lateinit. If we did not
10613 * find the boot tracer, then clear it out, to prevent
10614 * later registration from accessing the buffer that is
10615 * about to be freed.
10617 if (!default_bootup_tracer)
10620 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10621 default_bootup_tracer);
10622 default_bootup_tracer = NULL;
10625 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10626 __init static void tracing_set_default_clock(void)
10628 /* sched_clock_stable() is determined in late_initcall */
10629 if (!trace_boot_clock && !sched_clock_stable()) {
10630 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10631 pr_warn("Can not set tracing clock due to lockdown\n");
10635 printk(KERN_WARNING
10636 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10637 "If you want to keep using the local clock, then add:\n"
10638 " \"trace_clock=local\"\n"
10639 "on the kernel command line\n");
10640 tracing_set_clock(&global_trace, "global");
10644 static inline void tracing_set_default_clock(void) { }
10647 __init static int late_trace_init(void)
10649 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10650 static_key_disable(&tracepoint_printk_key.key);
10651 tracepoint_printk = 0;
10654 tracing_set_default_clock();
10655 clear_boot_tracer();
10659 late_initcall_sync(late_trace_init);