1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/panic_notifier.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
52 #include <asm/setup.h> /* COMMAND_LINE_SIZE */
55 #include "trace_output.h"
58 * On boot up, the ring buffer is set to the minimum size, so that
59 * we do not waste memory on systems that are not using tracing.
61 bool ring_buffer_expanded;
63 #ifdef CONFIG_FTRACE_STARTUP_TEST
65 * We need to change this state when a selftest is running.
66 * A selftest will lurk into the ring-buffer to count the
67 * entries inserted during the selftest although some concurrent
68 * insertions into the ring-buffer such as trace_printk could occurred
69 * at the same time, giving false positive or negative results.
71 static bool __read_mostly tracing_selftest_running;
74 * If boot-time tracing including tracers/events via kernel cmdline
75 * is running, we do not want to run SELFTEST.
77 bool __read_mostly tracing_selftest_disabled;
79 void __init disable_tracing_selftest(const char *reason)
81 if (!tracing_selftest_disabled) {
82 tracing_selftest_disabled = true;
83 pr_info("Ftrace startup test is disabled due to %s\n", reason);
87 #define tracing_selftest_running 0
88 #define tracing_selftest_disabled 0
91 /* Pipe tracepoints to printk */
92 static struct trace_iterator *tracepoint_print_iter;
93 int tracepoint_printk;
94 static bool tracepoint_printk_stop_on_boot __initdata;
95 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
97 /* For tracers that don't implement custom flags */
98 static struct tracer_opt dummy_tracer_opt[] = {
103 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
109 * To prevent the comm cache from being overwritten when no
110 * tracing is active, only save the comm when a trace event
113 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
116 * Kill all tracing for good (never come back).
117 * It is initialized to 1 but will turn to zero if the initialization
118 * of the tracer is successful. But that is the only place that sets
121 static int tracing_disabled = 1;
123 cpumask_var_t __read_mostly tracing_buffer_mask;
126 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
128 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
129 * is set, then ftrace_dump is called. This will output the contents
130 * of the ftrace buffers to the console. This is very useful for
131 * capturing traces that lead to crashes and outputing it to a
134 * It is default off, but you can enable it with either specifying
135 * "ftrace_dump_on_oops" in the kernel command line, or setting
136 * /proc/sys/kernel/ftrace_dump_on_oops
137 * Set 1 if you want to dump buffers of all CPUs
138 * Set 2 if you want to dump the buffer of the CPU that triggered oops
141 enum ftrace_dump_mode ftrace_dump_on_oops;
143 /* When set, tracing will stop when a WARN*() is hit */
144 int __disable_trace_on_warning;
146 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
147 /* Map of enums to their values, for "eval_map" file */
148 struct trace_eval_map_head {
150 unsigned long length;
153 union trace_eval_map_item;
155 struct trace_eval_map_tail {
157 * "end" is first and points to NULL as it must be different
158 * than "mod" or "eval_string"
160 union trace_eval_map_item *next;
161 const char *end; /* points to NULL */
164 static DEFINE_MUTEX(trace_eval_mutex);
167 * The trace_eval_maps are saved in an array with two extra elements,
168 * one at the beginning, and one at the end. The beginning item contains
169 * the count of the saved maps (head.length), and the module they
170 * belong to if not built in (head.mod). The ending item contains a
171 * pointer to the next array of saved eval_map items.
173 union trace_eval_map_item {
174 struct trace_eval_map map;
175 struct trace_eval_map_head head;
176 struct trace_eval_map_tail tail;
179 static union trace_eval_map_item *trace_eval_maps;
180 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
182 int tracing_set_tracer(struct trace_array *tr, const char *buf);
183 static void ftrace_trace_userstack(struct trace_array *tr,
184 struct trace_buffer *buffer,
185 unsigned int trace_ctx);
187 #define MAX_TRACER_SIZE 100
188 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
189 static char *default_bootup_tracer;
191 static bool allocate_snapshot;
192 static bool snapshot_at_boot;
194 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata;
195 static int boot_instance_index;
197 static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
198 static int boot_snapshot_index;
200 static int __init set_cmdline_ftrace(char *str)
202 strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
203 default_bootup_tracer = bootup_tracer_buf;
204 /* We are using ftrace early, expand it */
205 ring_buffer_expanded = true;
208 __setup("ftrace=", set_cmdline_ftrace);
210 static int __init set_ftrace_dump_on_oops(char *str)
212 if (*str++ != '=' || !*str || !strcmp("1", str)) {
213 ftrace_dump_on_oops = DUMP_ALL;
217 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
218 ftrace_dump_on_oops = DUMP_ORIG;
224 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
226 static int __init stop_trace_on_warning(char *str)
228 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
229 __disable_trace_on_warning = 1;
232 __setup("traceoff_on_warning", stop_trace_on_warning);
234 static int __init boot_alloc_snapshot(char *str)
236 char *slot = boot_snapshot_info + boot_snapshot_index;
237 int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
242 if (strlen(str) >= left)
245 ret = snprintf(slot, left, "%s\t", str);
246 boot_snapshot_index += ret;
248 allocate_snapshot = true;
249 /* We also need the main ring buffer expanded */
250 ring_buffer_expanded = true;
254 __setup("alloc_snapshot", boot_alloc_snapshot);
257 static int __init boot_snapshot(char *str)
259 snapshot_at_boot = true;
260 boot_alloc_snapshot(str);
263 __setup("ftrace_boot_snapshot", boot_snapshot);
266 static int __init boot_instance(char *str)
268 char *slot = boot_instance_info + boot_instance_index;
269 int left = sizeof(boot_instance_info) - boot_instance_index;
272 if (strlen(str) >= left)
275 ret = snprintf(slot, left, "%s\t", str);
276 boot_instance_index += ret;
280 __setup("trace_instance=", boot_instance);
283 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
285 static int __init set_trace_boot_options(char *str)
287 strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
290 __setup("trace_options=", set_trace_boot_options);
292 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
293 static char *trace_boot_clock __initdata;
295 static int __init set_trace_boot_clock(char *str)
297 strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
298 trace_boot_clock = trace_boot_clock_buf;
301 __setup("trace_clock=", set_trace_boot_clock);
303 static int __init set_tracepoint_printk(char *str)
305 /* Ignore the "tp_printk_stop_on_boot" param */
309 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
310 tracepoint_printk = 1;
313 __setup("tp_printk", set_tracepoint_printk);
315 static int __init set_tracepoint_printk_stop(char *str)
317 tracepoint_printk_stop_on_boot = true;
320 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
322 unsigned long long ns2usecs(u64 nsec)
330 trace_process_export(struct trace_export *export,
331 struct ring_buffer_event *event, int flag)
333 struct trace_entry *entry;
334 unsigned int size = 0;
336 if (export->flags & flag) {
337 entry = ring_buffer_event_data(event);
338 size = ring_buffer_event_length(event);
339 export->write(export, entry, size);
343 static DEFINE_MUTEX(ftrace_export_lock);
345 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
347 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
348 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
349 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
351 static inline void ftrace_exports_enable(struct trace_export *export)
353 if (export->flags & TRACE_EXPORT_FUNCTION)
354 static_branch_inc(&trace_function_exports_enabled);
356 if (export->flags & TRACE_EXPORT_EVENT)
357 static_branch_inc(&trace_event_exports_enabled);
359 if (export->flags & TRACE_EXPORT_MARKER)
360 static_branch_inc(&trace_marker_exports_enabled);
363 static inline void ftrace_exports_disable(struct trace_export *export)
365 if (export->flags & TRACE_EXPORT_FUNCTION)
366 static_branch_dec(&trace_function_exports_enabled);
368 if (export->flags & TRACE_EXPORT_EVENT)
369 static_branch_dec(&trace_event_exports_enabled);
371 if (export->flags & TRACE_EXPORT_MARKER)
372 static_branch_dec(&trace_marker_exports_enabled);
375 static void ftrace_exports(struct ring_buffer_event *event, int flag)
377 struct trace_export *export;
379 preempt_disable_notrace();
381 export = rcu_dereference_raw_check(ftrace_exports_list);
383 trace_process_export(export, event, flag);
384 export = rcu_dereference_raw_check(export->next);
387 preempt_enable_notrace();
391 add_trace_export(struct trace_export **list, struct trace_export *export)
393 rcu_assign_pointer(export->next, *list);
395 * We are entering export into the list but another
396 * CPU might be walking that list. We need to make sure
397 * the export->next pointer is valid before another CPU sees
398 * the export pointer included into the list.
400 rcu_assign_pointer(*list, export);
404 rm_trace_export(struct trace_export **list, struct trace_export *export)
406 struct trace_export **p;
408 for (p = list; *p != NULL; p = &(*p)->next)
415 rcu_assign_pointer(*p, (*p)->next);
421 add_ftrace_export(struct trace_export **list, struct trace_export *export)
423 ftrace_exports_enable(export);
425 add_trace_export(list, export);
429 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
433 ret = rm_trace_export(list, export);
434 ftrace_exports_disable(export);
439 int register_ftrace_export(struct trace_export *export)
441 if (WARN_ON_ONCE(!export->write))
444 mutex_lock(&ftrace_export_lock);
446 add_ftrace_export(&ftrace_exports_list, export);
448 mutex_unlock(&ftrace_export_lock);
452 EXPORT_SYMBOL_GPL(register_ftrace_export);
454 int unregister_ftrace_export(struct trace_export *export)
458 mutex_lock(&ftrace_export_lock);
460 ret = rm_ftrace_export(&ftrace_exports_list, export);
462 mutex_unlock(&ftrace_export_lock);
466 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
468 /* trace_flags holds trace_options default values */
469 #define TRACE_DEFAULT_FLAGS \
470 (FUNCTION_DEFAULT_FLAGS | \
471 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
472 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
473 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
474 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
477 /* trace_options that are only supported by global_trace */
478 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
479 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
481 /* trace_flags that are default zero for instances */
482 #define ZEROED_TRACE_FLAGS \
483 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
486 * The global_trace is the descriptor that holds the top-level tracing
487 * buffers for the live tracing.
489 static struct trace_array global_trace = {
490 .trace_flags = TRACE_DEFAULT_FLAGS,
493 LIST_HEAD(ftrace_trace_arrays);
495 int trace_array_get(struct trace_array *this_tr)
497 struct trace_array *tr;
500 mutex_lock(&trace_types_lock);
501 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
508 mutex_unlock(&trace_types_lock);
513 static void __trace_array_put(struct trace_array *this_tr)
515 WARN_ON(!this_tr->ref);
520 * trace_array_put - Decrement the reference counter for this trace array.
521 * @this_tr : pointer to the trace array
523 * NOTE: Use this when we no longer need the trace array returned by
524 * trace_array_get_by_name(). This ensures the trace array can be later
528 void trace_array_put(struct trace_array *this_tr)
533 mutex_lock(&trace_types_lock);
534 __trace_array_put(this_tr);
535 mutex_unlock(&trace_types_lock);
537 EXPORT_SYMBOL_GPL(trace_array_put);
539 int tracing_check_open_get_tr(struct trace_array *tr)
543 ret = security_locked_down(LOCKDOWN_TRACEFS);
547 if (tracing_disabled)
550 if (tr && trace_array_get(tr) < 0)
556 int call_filter_check_discard(struct trace_event_call *call, void *rec,
557 struct trace_buffer *buffer,
558 struct ring_buffer_event *event)
560 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
561 !filter_match_preds(call->filter, rec)) {
562 __trace_event_discard_commit(buffer, event);
570 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
571 * @filtered_pids: The list of pids to check
572 * @search_pid: The PID to find in @filtered_pids
574 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
577 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
579 return trace_pid_list_is_set(filtered_pids, search_pid);
583 * trace_ignore_this_task - should a task be ignored for tracing
584 * @filtered_pids: The list of pids to check
585 * @filtered_no_pids: The list of pids not to be traced
586 * @task: The task that should be ignored if not filtered
588 * Checks if @task should be traced or not from @filtered_pids.
589 * Returns true if @task should *NOT* be traced.
590 * Returns false if @task should be traced.
593 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
594 struct trace_pid_list *filtered_no_pids,
595 struct task_struct *task)
598 * If filtered_no_pids is not empty, and the task's pid is listed
599 * in filtered_no_pids, then return true.
600 * Otherwise, if filtered_pids is empty, that means we can
601 * trace all tasks. If it has content, then only trace pids
602 * within filtered_pids.
605 return (filtered_pids &&
606 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
608 trace_find_filtered_pid(filtered_no_pids, task->pid));
612 * trace_filter_add_remove_task - Add or remove a task from a pid_list
613 * @pid_list: The list to modify
614 * @self: The current task for fork or NULL for exit
615 * @task: The task to add or remove
617 * If adding a task, if @self is defined, the task is only added if @self
618 * is also included in @pid_list. This happens on fork and tasks should
619 * only be added when the parent is listed. If @self is NULL, then the
620 * @task pid will be removed from the list, which would happen on exit
623 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
624 struct task_struct *self,
625 struct task_struct *task)
630 /* For forks, we only add if the forking task is listed */
632 if (!trace_find_filtered_pid(pid_list, self->pid))
636 /* "self" is set for forks, and NULL for exits */
638 trace_pid_list_set(pid_list, task->pid);
640 trace_pid_list_clear(pid_list, task->pid);
644 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
645 * @pid_list: The pid list to show
646 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
647 * @pos: The position of the file
649 * This is used by the seq_file "next" operation to iterate the pids
650 * listed in a trace_pid_list structure.
652 * Returns the pid+1 as we want to display pid of zero, but NULL would
653 * stop the iteration.
655 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
657 long pid = (unsigned long)v;
662 /* pid already is +1 of the actual previous bit */
663 if (trace_pid_list_next(pid_list, pid, &next) < 0)
668 /* Return pid + 1 to allow zero to be represented */
669 return (void *)(pid + 1);
673 * trace_pid_start - Used for seq_file to start reading pid lists
674 * @pid_list: The pid list to show
675 * @pos: The position of the file
677 * This is used by seq_file "start" operation to start the iteration
680 * Returns the pid+1 as we want to display pid of zero, but NULL would
681 * stop the iteration.
683 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
689 if (trace_pid_list_first(pid_list, &first) < 0)
694 /* Return pid + 1 so that zero can be the exit value */
695 for (pid++; pid && l < *pos;
696 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
702 * trace_pid_show - show the current pid in seq_file processing
703 * @m: The seq_file structure to write into
704 * @v: A void pointer of the pid (+1) value to display
706 * Can be directly used by seq_file operations to display the current
709 int trace_pid_show(struct seq_file *m, void *v)
711 unsigned long pid = (unsigned long)v - 1;
713 seq_printf(m, "%lu\n", pid);
717 /* 128 should be much more than enough */
718 #define PID_BUF_SIZE 127
720 int trace_pid_write(struct trace_pid_list *filtered_pids,
721 struct trace_pid_list **new_pid_list,
722 const char __user *ubuf, size_t cnt)
724 struct trace_pid_list *pid_list;
725 struct trace_parser parser;
733 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
737 * Always recreate a new array. The write is an all or nothing
738 * operation. Always create a new array when adding new pids by
739 * the user. If the operation fails, then the current list is
742 pid_list = trace_pid_list_alloc();
744 trace_parser_put(&parser);
749 /* copy the current bits to the new max */
750 ret = trace_pid_list_first(filtered_pids, &pid);
752 trace_pid_list_set(pid_list, pid);
753 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
763 ret = trace_get_user(&parser, ubuf, cnt, &pos);
771 if (!trace_parser_loaded(&parser))
775 if (kstrtoul(parser.buffer, 0, &val))
780 if (trace_pid_list_set(pid_list, pid) < 0) {
786 trace_parser_clear(&parser);
789 trace_parser_put(&parser);
792 trace_pid_list_free(pid_list);
797 /* Cleared the list of pids */
798 trace_pid_list_free(pid_list);
802 *new_pid_list = pid_list;
807 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
811 /* Early boot up does not have a buffer yet */
813 return trace_clock_local();
815 ts = ring_buffer_time_stamp(buf->buffer);
816 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
821 u64 ftrace_now(int cpu)
823 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
827 * tracing_is_enabled - Show if global_trace has been enabled
829 * Shows if the global trace has been enabled or not. It uses the
830 * mirror flag "buffer_disabled" to be used in fast paths such as for
831 * the irqsoff tracer. But it may be inaccurate due to races. If you
832 * need to know the accurate state, use tracing_is_on() which is a little
833 * slower, but accurate.
835 int tracing_is_enabled(void)
838 * For quick access (irqsoff uses this in fast path), just
839 * return the mirror variable of the state of the ring buffer.
840 * It's a little racy, but we don't really care.
843 return !global_trace.buffer_disabled;
847 * trace_buf_size is the size in bytes that is allocated
848 * for a buffer. Note, the number of bytes is always rounded
851 * This number is purposely set to a low number of 16384.
852 * If the dump on oops happens, it will be much appreciated
853 * to not have to wait for all that output. Anyway this can be
854 * boot time and run time configurable.
856 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
858 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
860 /* trace_types holds a link list of available tracers. */
861 static struct tracer *trace_types __read_mostly;
864 * trace_types_lock is used to protect the trace_types list.
866 DEFINE_MUTEX(trace_types_lock);
869 * serialize the access of the ring buffer
871 * ring buffer serializes readers, but it is low level protection.
872 * The validity of the events (which returns by ring_buffer_peek() ..etc)
873 * are not protected by ring buffer.
875 * The content of events may become garbage if we allow other process consumes
876 * these events concurrently:
877 * A) the page of the consumed events may become a normal page
878 * (not reader page) in ring buffer, and this page will be rewritten
879 * by events producer.
880 * B) The page of the consumed events may become a page for splice_read,
881 * and this page will be returned to system.
883 * These primitives allow multi process access to different cpu ring buffer
886 * These primitives don't distinguish read-only and read-consume access.
887 * Multi read-only access are also serialized.
891 static DECLARE_RWSEM(all_cpu_access_lock);
892 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
894 static inline void trace_access_lock(int cpu)
896 if (cpu == RING_BUFFER_ALL_CPUS) {
897 /* gain it for accessing the whole ring buffer. */
898 down_write(&all_cpu_access_lock);
900 /* gain it for accessing a cpu ring buffer. */
902 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
903 down_read(&all_cpu_access_lock);
905 /* Secondly block other access to this @cpu ring buffer. */
906 mutex_lock(&per_cpu(cpu_access_lock, cpu));
910 static inline void trace_access_unlock(int cpu)
912 if (cpu == RING_BUFFER_ALL_CPUS) {
913 up_write(&all_cpu_access_lock);
915 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
916 up_read(&all_cpu_access_lock);
920 static inline void trace_access_lock_init(void)
924 for_each_possible_cpu(cpu)
925 mutex_init(&per_cpu(cpu_access_lock, cpu));
930 static DEFINE_MUTEX(access_lock);
932 static inline void trace_access_lock(int cpu)
935 mutex_lock(&access_lock);
938 static inline void trace_access_unlock(int cpu)
941 mutex_unlock(&access_lock);
944 static inline void trace_access_lock_init(void)
950 #ifdef CONFIG_STACKTRACE
951 static void __ftrace_trace_stack(struct trace_buffer *buffer,
952 unsigned int trace_ctx,
953 int skip, struct pt_regs *regs);
954 static inline void ftrace_trace_stack(struct trace_array *tr,
955 struct trace_buffer *buffer,
956 unsigned int trace_ctx,
957 int skip, struct pt_regs *regs);
960 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
961 unsigned int trace_ctx,
962 int skip, struct pt_regs *regs)
965 static inline void ftrace_trace_stack(struct trace_array *tr,
966 struct trace_buffer *buffer,
967 unsigned long trace_ctx,
968 int skip, struct pt_regs *regs)
974 static __always_inline void
975 trace_event_setup(struct ring_buffer_event *event,
976 int type, unsigned int trace_ctx)
978 struct trace_entry *ent = ring_buffer_event_data(event);
980 tracing_generic_entry_update(ent, type, trace_ctx);
983 static __always_inline struct ring_buffer_event *
984 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
987 unsigned int trace_ctx)
989 struct ring_buffer_event *event;
991 event = ring_buffer_lock_reserve(buffer, len);
993 trace_event_setup(event, type, trace_ctx);
998 void tracer_tracing_on(struct trace_array *tr)
1000 if (tr->array_buffer.buffer)
1001 ring_buffer_record_on(tr->array_buffer.buffer);
1003 * This flag is looked at when buffers haven't been allocated
1004 * yet, or by some tracers (like irqsoff), that just want to
1005 * know if the ring buffer has been disabled, but it can handle
1006 * races of where it gets disabled but we still do a record.
1007 * As the check is in the fast path of the tracers, it is more
1008 * important to be fast than accurate.
1010 tr->buffer_disabled = 0;
1011 /* Make the flag seen by readers */
1016 * tracing_on - enable tracing buffers
1018 * This function enables tracing buffers that may have been
1019 * disabled with tracing_off.
1021 void tracing_on(void)
1023 tracer_tracing_on(&global_trace);
1025 EXPORT_SYMBOL_GPL(tracing_on);
1028 static __always_inline void
1029 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
1031 __this_cpu_write(trace_taskinfo_save, true);
1033 /* If this is the temp buffer, we need to commit fully */
1034 if (this_cpu_read(trace_buffered_event) == event) {
1035 /* Length is in event->array[0] */
1036 ring_buffer_write(buffer, event->array[0], &event->array[1]);
1037 /* Release the temp buffer */
1038 this_cpu_dec(trace_buffered_event_cnt);
1039 /* ring_buffer_unlock_commit() enables preemption */
1040 preempt_enable_notrace();
1042 ring_buffer_unlock_commit(buffer);
1045 int __trace_array_puts(struct trace_array *tr, unsigned long ip,
1046 const char *str, int size)
1048 struct ring_buffer_event *event;
1049 struct trace_buffer *buffer;
1050 struct print_entry *entry;
1051 unsigned int trace_ctx;
1054 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1057 if (unlikely(tracing_selftest_running && tr == &global_trace))
1060 if (unlikely(tracing_disabled))
1063 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1065 trace_ctx = tracing_gen_ctx();
1066 buffer = tr->array_buffer.buffer;
1067 ring_buffer_nest_start(buffer);
1068 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1075 entry = ring_buffer_event_data(event);
1078 memcpy(&entry->buf, str, size);
1080 /* Add a newline if necessary */
1081 if (entry->buf[size - 1] != '\n') {
1082 entry->buf[size] = '\n';
1083 entry->buf[size + 1] = '\0';
1085 entry->buf[size] = '\0';
1087 __buffer_unlock_commit(buffer, event);
1088 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1090 ring_buffer_nest_end(buffer);
1093 EXPORT_SYMBOL_GPL(__trace_array_puts);
1096 * __trace_puts - write a constant string into the trace buffer.
1097 * @ip: The address of the caller
1098 * @str: The constant string to write
1099 * @size: The size of the string.
1101 int __trace_puts(unsigned long ip, const char *str, int size)
1103 return __trace_array_puts(&global_trace, ip, str, size);
1105 EXPORT_SYMBOL_GPL(__trace_puts);
1108 * __trace_bputs - write the pointer to a constant string into trace buffer
1109 * @ip: The address of the caller
1110 * @str: The constant string to write to the buffer to
1112 int __trace_bputs(unsigned long ip, const char *str)
1114 struct ring_buffer_event *event;
1115 struct trace_buffer *buffer;
1116 struct bputs_entry *entry;
1117 unsigned int trace_ctx;
1118 int size = sizeof(struct bputs_entry);
1121 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1124 if (unlikely(tracing_selftest_running || tracing_disabled))
1127 trace_ctx = tracing_gen_ctx();
1128 buffer = global_trace.array_buffer.buffer;
1130 ring_buffer_nest_start(buffer);
1131 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1136 entry = ring_buffer_event_data(event);
1140 __buffer_unlock_commit(buffer, event);
1141 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1145 ring_buffer_nest_end(buffer);
1148 EXPORT_SYMBOL_GPL(__trace_bputs);
1150 #ifdef CONFIG_TRACER_SNAPSHOT
1151 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1154 struct tracer *tracer = tr->current_trace;
1155 unsigned long flags;
1158 trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1159 trace_array_puts(tr, "*** snapshot is being ignored ***\n");
1163 if (!tr->allocated_snapshot) {
1164 trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
1165 trace_array_puts(tr, "*** stopping trace here! ***\n");
1166 tracer_tracing_off(tr);
1170 /* Note, snapshot can not be used when the tracer uses it */
1171 if (tracer->use_max_tr) {
1172 trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
1173 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1177 local_irq_save(flags);
1178 update_max_tr(tr, current, smp_processor_id(), cond_data);
1179 local_irq_restore(flags);
1182 void tracing_snapshot_instance(struct trace_array *tr)
1184 tracing_snapshot_instance_cond(tr, NULL);
1188 * tracing_snapshot - take a snapshot of the current buffer.
1190 * This causes a swap between the snapshot buffer and the current live
1191 * tracing buffer. You can use this to take snapshots of the live
1192 * trace when some condition is triggered, but continue to trace.
1194 * Note, make sure to allocate the snapshot with either
1195 * a tracing_snapshot_alloc(), or by doing it manually
1196 * with: echo 1 > /sys/kernel/tracing/snapshot
1198 * If the snapshot buffer is not allocated, it will stop tracing.
1199 * Basically making a permanent snapshot.
1201 void tracing_snapshot(void)
1203 struct trace_array *tr = &global_trace;
1205 tracing_snapshot_instance(tr);
1207 EXPORT_SYMBOL_GPL(tracing_snapshot);
1210 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1211 * @tr: The tracing instance to snapshot
1212 * @cond_data: The data to be tested conditionally, and possibly saved
1214 * This is the same as tracing_snapshot() except that the snapshot is
1215 * conditional - the snapshot will only happen if the
1216 * cond_snapshot.update() implementation receiving the cond_data
1217 * returns true, which means that the trace array's cond_snapshot
1218 * update() operation used the cond_data to determine whether the
1219 * snapshot should be taken, and if it was, presumably saved it along
1220 * with the snapshot.
1222 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1224 tracing_snapshot_instance_cond(tr, cond_data);
1226 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1229 * tracing_cond_snapshot_data - get the user data associated with a snapshot
1230 * @tr: The tracing instance
1232 * When the user enables a conditional snapshot using
1233 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1234 * with the snapshot. This accessor is used to retrieve it.
1236 * Should not be called from cond_snapshot.update(), since it takes
1237 * the tr->max_lock lock, which the code calling
1238 * cond_snapshot.update() has already done.
1240 * Returns the cond_data associated with the trace array's snapshot.
1242 void *tracing_cond_snapshot_data(struct trace_array *tr)
1244 void *cond_data = NULL;
1246 local_irq_disable();
1247 arch_spin_lock(&tr->max_lock);
1249 if (tr->cond_snapshot)
1250 cond_data = tr->cond_snapshot->cond_data;
1252 arch_spin_unlock(&tr->max_lock);
1257 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1259 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1260 struct array_buffer *size_buf, int cpu_id);
1261 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1263 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1267 if (!tr->allocated_snapshot) {
1269 /* allocate spare buffer */
1270 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1271 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1275 tr->allocated_snapshot = true;
1281 static void free_snapshot(struct trace_array *tr)
1284 * We don't free the ring buffer. instead, resize it because
1285 * The max_tr ring buffer has some state (e.g. ring->clock) and
1286 * we want preserve it.
1288 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1289 set_buffer_entries(&tr->max_buffer, 1);
1290 tracing_reset_online_cpus(&tr->max_buffer);
1291 tr->allocated_snapshot = false;
1295 * tracing_alloc_snapshot - allocate snapshot buffer.
1297 * This only allocates the snapshot buffer if it isn't already
1298 * allocated - it doesn't also take a snapshot.
1300 * This is meant to be used in cases where the snapshot buffer needs
1301 * to be set up for events that can't sleep but need to be able to
1302 * trigger a snapshot.
1304 int tracing_alloc_snapshot(void)
1306 struct trace_array *tr = &global_trace;
1309 ret = tracing_alloc_snapshot_instance(tr);
1314 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1317 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1319 * This is similar to tracing_snapshot(), but it will allocate the
1320 * snapshot buffer if it isn't already allocated. Use this only
1321 * where it is safe to sleep, as the allocation may sleep.
1323 * This causes a swap between the snapshot buffer and the current live
1324 * tracing buffer. You can use this to take snapshots of the live
1325 * trace when some condition is triggered, but continue to trace.
1327 void tracing_snapshot_alloc(void)
1331 ret = tracing_alloc_snapshot();
1337 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1340 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1341 * @tr: The tracing instance
1342 * @cond_data: User data to associate with the snapshot
1343 * @update: Implementation of the cond_snapshot update function
1345 * Check whether the conditional snapshot for the given instance has
1346 * already been enabled, or if the current tracer is already using a
1347 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1348 * save the cond_data and update function inside.
1350 * Returns 0 if successful, error otherwise.
1352 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1353 cond_update_fn_t update)
1355 struct cond_snapshot *cond_snapshot;
1358 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1362 cond_snapshot->cond_data = cond_data;
1363 cond_snapshot->update = update;
1365 mutex_lock(&trace_types_lock);
1367 ret = tracing_alloc_snapshot_instance(tr);
1371 if (tr->current_trace->use_max_tr) {
1377 * The cond_snapshot can only change to NULL without the
1378 * trace_types_lock. We don't care if we race with it going
1379 * to NULL, but we want to make sure that it's not set to
1380 * something other than NULL when we get here, which we can
1381 * do safely with only holding the trace_types_lock and not
1382 * having to take the max_lock.
1384 if (tr->cond_snapshot) {
1389 local_irq_disable();
1390 arch_spin_lock(&tr->max_lock);
1391 tr->cond_snapshot = cond_snapshot;
1392 arch_spin_unlock(&tr->max_lock);
1395 mutex_unlock(&trace_types_lock);
1400 mutex_unlock(&trace_types_lock);
1401 kfree(cond_snapshot);
1404 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1407 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1408 * @tr: The tracing instance
1410 * Check whether the conditional snapshot for the given instance is
1411 * enabled; if so, free the cond_snapshot associated with it,
1412 * otherwise return -EINVAL.
1414 * Returns 0 if successful, error otherwise.
1416 int tracing_snapshot_cond_disable(struct trace_array *tr)
1420 local_irq_disable();
1421 arch_spin_lock(&tr->max_lock);
1423 if (!tr->cond_snapshot)
1426 kfree(tr->cond_snapshot);
1427 tr->cond_snapshot = NULL;
1430 arch_spin_unlock(&tr->max_lock);
1435 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1437 void tracing_snapshot(void)
1439 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1441 EXPORT_SYMBOL_GPL(tracing_snapshot);
1442 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1444 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1446 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1447 int tracing_alloc_snapshot(void)
1449 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1452 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1453 void tracing_snapshot_alloc(void)
1458 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1459 void *tracing_cond_snapshot_data(struct trace_array *tr)
1463 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1464 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1468 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1469 int tracing_snapshot_cond_disable(struct trace_array *tr)
1473 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1474 #define free_snapshot(tr) do { } while (0)
1475 #endif /* CONFIG_TRACER_SNAPSHOT */
1477 void tracer_tracing_off(struct trace_array *tr)
1479 if (tr->array_buffer.buffer)
1480 ring_buffer_record_off(tr->array_buffer.buffer);
1482 * This flag is looked at when buffers haven't been allocated
1483 * yet, or by some tracers (like irqsoff), that just want to
1484 * know if the ring buffer has been disabled, but it can handle
1485 * races of where it gets disabled but we still do a record.
1486 * As the check is in the fast path of the tracers, it is more
1487 * important to be fast than accurate.
1489 tr->buffer_disabled = 1;
1490 /* Make the flag seen by readers */
1495 * tracing_off - turn off tracing buffers
1497 * This function stops the tracing buffers from recording data.
1498 * It does not disable any overhead the tracers themselves may
1499 * be causing. This function simply causes all recording to
1500 * the ring buffers to fail.
1502 void tracing_off(void)
1504 tracer_tracing_off(&global_trace);
1506 EXPORT_SYMBOL_GPL(tracing_off);
1508 void disable_trace_on_warning(void)
1510 if (__disable_trace_on_warning) {
1511 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1512 "Disabling tracing due to warning\n");
1518 * tracer_tracing_is_on - show real state of ring buffer enabled
1519 * @tr : the trace array to know if ring buffer is enabled
1521 * Shows real state of the ring buffer if it is enabled or not.
1523 bool tracer_tracing_is_on(struct trace_array *tr)
1525 if (tr->array_buffer.buffer)
1526 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1527 return !tr->buffer_disabled;
1531 * tracing_is_on - show state of ring buffers enabled
1533 int tracing_is_on(void)
1535 return tracer_tracing_is_on(&global_trace);
1537 EXPORT_SYMBOL_GPL(tracing_is_on);
1539 static int __init set_buf_size(char *str)
1541 unsigned long buf_size;
1545 buf_size = memparse(str, &str);
1547 * nr_entries can not be zero and the startup
1548 * tests require some buffer space. Therefore
1549 * ensure we have at least 4096 bytes of buffer.
1551 trace_buf_size = max(4096UL, buf_size);
1554 __setup("trace_buf_size=", set_buf_size);
1556 static int __init set_tracing_thresh(char *str)
1558 unsigned long threshold;
1563 ret = kstrtoul(str, 0, &threshold);
1566 tracing_thresh = threshold * 1000;
1569 __setup("tracing_thresh=", set_tracing_thresh);
1571 unsigned long nsecs_to_usecs(unsigned long nsecs)
1573 return nsecs / 1000;
1577 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1578 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1579 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1580 * of strings in the order that the evals (enum) were defined.
1585 /* These must match the bit positions in trace_iterator_flags */
1586 static const char *trace_options[] = {
1594 int in_ns; /* is this clock in nanoseconds? */
1595 } trace_clocks[] = {
1596 { trace_clock_local, "local", 1 },
1597 { trace_clock_global, "global", 1 },
1598 { trace_clock_counter, "counter", 0 },
1599 { trace_clock_jiffies, "uptime", 0 },
1600 { trace_clock, "perf", 1 },
1601 { ktime_get_mono_fast_ns, "mono", 1 },
1602 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1603 { ktime_get_boot_fast_ns, "boot", 1 },
1604 { ktime_get_tai_fast_ns, "tai", 1 },
1608 bool trace_clock_in_ns(struct trace_array *tr)
1610 if (trace_clocks[tr->clock_id].in_ns)
1617 * trace_parser_get_init - gets the buffer for trace parser
1619 int trace_parser_get_init(struct trace_parser *parser, int size)
1621 memset(parser, 0, sizeof(*parser));
1623 parser->buffer = kmalloc(size, GFP_KERNEL);
1624 if (!parser->buffer)
1627 parser->size = size;
1632 * trace_parser_put - frees the buffer for trace parser
1634 void trace_parser_put(struct trace_parser *parser)
1636 kfree(parser->buffer);
1637 parser->buffer = NULL;
1641 * trace_get_user - reads the user input string separated by space
1642 * (matched by isspace(ch))
1644 * For each string found the 'struct trace_parser' is updated,
1645 * and the function returns.
1647 * Returns number of bytes read.
1649 * See kernel/trace/trace.h for 'struct trace_parser' details.
1651 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1652 size_t cnt, loff_t *ppos)
1659 trace_parser_clear(parser);
1661 ret = get_user(ch, ubuf++);
1669 * The parser is not finished with the last write,
1670 * continue reading the user input without skipping spaces.
1672 if (!parser->cont) {
1673 /* skip white space */
1674 while (cnt && isspace(ch)) {
1675 ret = get_user(ch, ubuf++);
1684 /* only spaces were written */
1685 if (isspace(ch) || !ch) {
1692 /* read the non-space input */
1693 while (cnt && !isspace(ch) && ch) {
1694 if (parser->idx < parser->size - 1)
1695 parser->buffer[parser->idx++] = ch;
1700 ret = get_user(ch, ubuf++);
1707 /* We either got finished input or we have to wait for another call. */
1708 if (isspace(ch) || !ch) {
1709 parser->buffer[parser->idx] = 0;
1710 parser->cont = false;
1711 } else if (parser->idx < parser->size - 1) {
1712 parser->cont = true;
1713 parser->buffer[parser->idx++] = ch;
1714 /* Make sure the parsed string always terminates with '\0'. */
1715 parser->buffer[parser->idx] = 0;
1728 /* TODO add a seq_buf_to_buffer() */
1729 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1733 if (trace_seq_used(s) <= s->seq.readpos)
1736 len = trace_seq_used(s) - s->seq.readpos;
1739 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1741 s->seq.readpos += cnt;
1745 unsigned long __read_mostly tracing_thresh;
1747 #ifdef CONFIG_TRACER_MAX_TRACE
1748 static const struct file_operations tracing_max_lat_fops;
1750 #ifdef LATENCY_FS_NOTIFY
1752 static struct workqueue_struct *fsnotify_wq;
1754 static void latency_fsnotify_workfn(struct work_struct *work)
1756 struct trace_array *tr = container_of(work, struct trace_array,
1758 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1761 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1763 struct trace_array *tr = container_of(iwork, struct trace_array,
1765 queue_work(fsnotify_wq, &tr->fsnotify_work);
1768 static void trace_create_maxlat_file(struct trace_array *tr,
1769 struct dentry *d_tracer)
1771 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1772 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1773 tr->d_max_latency = trace_create_file("tracing_max_latency",
1776 &tracing_max_lat_fops);
1779 __init static int latency_fsnotify_init(void)
1781 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1782 WQ_UNBOUND | WQ_HIGHPRI, 0);
1784 pr_err("Unable to allocate tr_max_lat_wq\n");
1790 late_initcall_sync(latency_fsnotify_init);
1792 void latency_fsnotify(struct trace_array *tr)
1797 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1798 * possible that we are called from __schedule() or do_idle(), which
1799 * could cause a deadlock.
1801 irq_work_queue(&tr->fsnotify_irqwork);
1804 #else /* !LATENCY_FS_NOTIFY */
1806 #define trace_create_maxlat_file(tr, d_tracer) \
1807 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1808 d_tracer, tr, &tracing_max_lat_fops)
1813 * Copy the new maximum trace into the separate maximum-trace
1814 * structure. (this way the maximum trace is permanently saved,
1815 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1818 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1820 struct array_buffer *trace_buf = &tr->array_buffer;
1821 struct array_buffer *max_buf = &tr->max_buffer;
1822 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1823 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1826 max_buf->time_start = data->preempt_timestamp;
1828 max_data->saved_latency = tr->max_latency;
1829 max_data->critical_start = data->critical_start;
1830 max_data->critical_end = data->critical_end;
1832 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1833 max_data->pid = tsk->pid;
1835 * If tsk == current, then use current_uid(), as that does not use
1836 * RCU. The irq tracer can be called out of RCU scope.
1839 max_data->uid = current_uid();
1841 max_data->uid = task_uid(tsk);
1843 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1844 max_data->policy = tsk->policy;
1845 max_data->rt_priority = tsk->rt_priority;
1847 /* record this tasks comm */
1848 tracing_record_cmdline(tsk);
1849 latency_fsnotify(tr);
1853 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1855 * @tsk: the task with the latency
1856 * @cpu: The cpu that initiated the trace.
1857 * @cond_data: User data associated with a conditional snapshot
1859 * Flip the buffers between the @tr and the max_tr and record information
1860 * about which task was the cause of this latency.
1863 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1869 WARN_ON_ONCE(!irqs_disabled());
1871 if (!tr->allocated_snapshot) {
1872 /* Only the nop tracer should hit this when disabling */
1873 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1877 arch_spin_lock(&tr->max_lock);
1879 /* Inherit the recordable setting from array_buffer */
1880 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1881 ring_buffer_record_on(tr->max_buffer.buffer);
1883 ring_buffer_record_off(tr->max_buffer.buffer);
1885 #ifdef CONFIG_TRACER_SNAPSHOT
1886 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1887 arch_spin_unlock(&tr->max_lock);
1891 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1893 __update_max_tr(tr, tsk, cpu);
1895 arch_spin_unlock(&tr->max_lock);
1899 * update_max_tr_single - only copy one trace over, and reset the rest
1901 * @tsk: task with the latency
1902 * @cpu: the cpu of the buffer to copy.
1904 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1907 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1914 WARN_ON_ONCE(!irqs_disabled());
1915 if (!tr->allocated_snapshot) {
1916 /* Only the nop tracer should hit this when disabling */
1917 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1921 arch_spin_lock(&tr->max_lock);
1923 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1925 if (ret == -EBUSY) {
1927 * We failed to swap the buffer due to a commit taking
1928 * place on this CPU. We fail to record, but we reset
1929 * the max trace buffer (no one writes directly to it)
1930 * and flag that it failed.
1931 * Another reason is resize is in progress.
1933 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1934 "Failed to swap buffers due to commit or resize in progress\n");
1937 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1939 __update_max_tr(tr, tsk, cpu);
1940 arch_spin_unlock(&tr->max_lock);
1943 #endif /* CONFIG_TRACER_MAX_TRACE */
1945 static int wait_on_pipe(struct trace_iterator *iter, int full)
1947 /* Iterators are static, they should be filled or empty */
1948 if (trace_buffer_iter(iter, iter->cpu_file))
1951 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1955 #ifdef CONFIG_FTRACE_STARTUP_TEST
1956 static bool selftests_can_run;
1958 struct trace_selftests {
1959 struct list_head list;
1960 struct tracer *type;
1963 static LIST_HEAD(postponed_selftests);
1965 static int save_selftest(struct tracer *type)
1967 struct trace_selftests *selftest;
1969 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1973 selftest->type = type;
1974 list_add(&selftest->list, &postponed_selftests);
1978 static int run_tracer_selftest(struct tracer *type)
1980 struct trace_array *tr = &global_trace;
1981 struct tracer *saved_tracer = tr->current_trace;
1984 if (!type->selftest || tracing_selftest_disabled)
1988 * If a tracer registers early in boot up (before scheduling is
1989 * initialized and such), then do not run its selftests yet.
1990 * Instead, run it a little later in the boot process.
1992 if (!selftests_can_run)
1993 return save_selftest(type);
1995 if (!tracing_is_on()) {
1996 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
2002 * Run a selftest on this tracer.
2003 * Here we reset the trace buffer, and set the current
2004 * tracer to be this tracer. The tracer can then run some
2005 * internal tracing to verify that everything is in order.
2006 * If we fail, we do not register this tracer.
2008 tracing_reset_online_cpus(&tr->array_buffer);
2010 tr->current_trace = type;
2012 #ifdef CONFIG_TRACER_MAX_TRACE
2013 if (type->use_max_tr) {
2014 /* If we expanded the buffers, make sure the max is expanded too */
2015 if (ring_buffer_expanded)
2016 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
2017 RING_BUFFER_ALL_CPUS);
2018 tr->allocated_snapshot = true;
2022 /* the test is responsible for initializing and enabling */
2023 pr_info("Testing tracer %s: ", type->name);
2024 ret = type->selftest(type, tr);
2025 /* the test is responsible for resetting too */
2026 tr->current_trace = saved_tracer;
2028 printk(KERN_CONT "FAILED!\n");
2029 /* Add the warning after printing 'FAILED' */
2033 /* Only reset on passing, to avoid touching corrupted buffers */
2034 tracing_reset_online_cpus(&tr->array_buffer);
2036 #ifdef CONFIG_TRACER_MAX_TRACE
2037 if (type->use_max_tr) {
2038 tr->allocated_snapshot = false;
2040 /* Shrink the max buffer again */
2041 if (ring_buffer_expanded)
2042 ring_buffer_resize(tr->max_buffer.buffer, 1,
2043 RING_BUFFER_ALL_CPUS);
2047 printk(KERN_CONT "PASSED\n");
2051 static int do_run_tracer_selftest(struct tracer *type)
2056 * Tests can take a long time, especially if they are run one after the
2057 * other, as does happen during bootup when all the tracers are
2058 * registered. This could cause the soft lockup watchdog to trigger.
2062 tracing_selftest_running = true;
2063 ret = run_tracer_selftest(type);
2064 tracing_selftest_running = false;
2069 static __init int init_trace_selftests(void)
2071 struct trace_selftests *p, *n;
2072 struct tracer *t, **last;
2075 selftests_can_run = true;
2077 mutex_lock(&trace_types_lock);
2079 if (list_empty(&postponed_selftests))
2082 pr_info("Running postponed tracer tests:\n");
2084 tracing_selftest_running = true;
2085 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2086 /* This loop can take minutes when sanitizers are enabled, so
2087 * lets make sure we allow RCU processing.
2090 ret = run_tracer_selftest(p->type);
2091 /* If the test fails, then warn and remove from available_tracers */
2093 WARN(1, "tracer: %s failed selftest, disabling\n",
2095 last = &trace_types;
2096 for (t = trace_types; t; t = t->next) {
2107 tracing_selftest_running = false;
2110 mutex_unlock(&trace_types_lock);
2114 core_initcall(init_trace_selftests);
2116 static inline int run_tracer_selftest(struct tracer *type)
2120 static inline int do_run_tracer_selftest(struct tracer *type)
2124 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2126 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2128 static void __init apply_trace_boot_options(void);
2131 * register_tracer - register a tracer with the ftrace system.
2132 * @type: the plugin for the tracer
2134 * Register a new plugin tracer.
2136 int __init register_tracer(struct tracer *type)
2142 pr_info("Tracer must have a name\n");
2146 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2147 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2151 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2152 pr_warn("Can not register tracer %s due to lockdown\n",
2157 mutex_lock(&trace_types_lock);
2159 for (t = trace_types; t; t = t->next) {
2160 if (strcmp(type->name, t->name) == 0) {
2162 pr_info("Tracer %s already registered\n",
2169 if (!type->set_flag)
2170 type->set_flag = &dummy_set_flag;
2172 /*allocate a dummy tracer_flags*/
2173 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2178 type->flags->val = 0;
2179 type->flags->opts = dummy_tracer_opt;
2181 if (!type->flags->opts)
2182 type->flags->opts = dummy_tracer_opt;
2184 /* store the tracer for __set_tracer_option */
2185 type->flags->trace = type;
2187 ret = do_run_tracer_selftest(type);
2191 type->next = trace_types;
2193 add_tracer_options(&global_trace, type);
2196 mutex_unlock(&trace_types_lock);
2198 if (ret || !default_bootup_tracer)
2201 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2204 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2205 /* Do we want this tracer to start on bootup? */
2206 tracing_set_tracer(&global_trace, type->name);
2207 default_bootup_tracer = NULL;
2209 apply_trace_boot_options();
2211 /* disable other selftests, since this will break it. */
2212 disable_tracing_selftest("running a tracer");
2218 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2220 struct trace_buffer *buffer = buf->buffer;
2225 ring_buffer_record_disable(buffer);
2227 /* Make sure all commits have finished */
2229 ring_buffer_reset_cpu(buffer, cpu);
2231 ring_buffer_record_enable(buffer);
2234 void tracing_reset_online_cpus(struct array_buffer *buf)
2236 struct trace_buffer *buffer = buf->buffer;
2241 ring_buffer_record_disable(buffer);
2243 /* Make sure all commits have finished */
2246 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2248 ring_buffer_reset_online_cpus(buffer);
2250 ring_buffer_record_enable(buffer);
2253 /* Must have trace_types_lock held */
2254 void tracing_reset_all_online_cpus_unlocked(void)
2256 struct trace_array *tr;
2258 lockdep_assert_held(&trace_types_lock);
2260 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2261 if (!tr->clear_trace)
2263 tr->clear_trace = false;
2264 tracing_reset_online_cpus(&tr->array_buffer);
2265 #ifdef CONFIG_TRACER_MAX_TRACE
2266 tracing_reset_online_cpus(&tr->max_buffer);
2271 void tracing_reset_all_online_cpus(void)
2273 mutex_lock(&trace_types_lock);
2274 tracing_reset_all_online_cpus_unlocked();
2275 mutex_unlock(&trace_types_lock);
2279 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2280 * is the tgid last observed corresponding to pid=i.
2282 static int *tgid_map;
2284 /* The maximum valid index into tgid_map. */
2285 static size_t tgid_map_max;
2287 #define SAVED_CMDLINES_DEFAULT 128
2288 #define NO_CMDLINE_MAP UINT_MAX
2290 * Preemption must be disabled before acquiring trace_cmdline_lock.
2291 * The various trace_arrays' max_lock must be acquired in a context
2292 * where interrupt is disabled.
2294 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2295 struct saved_cmdlines_buffer {
2296 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2297 unsigned *map_cmdline_to_pid;
2298 unsigned cmdline_num;
2300 char *saved_cmdlines;
2302 static struct saved_cmdlines_buffer *savedcmd;
2304 static inline char *get_saved_cmdlines(int idx)
2306 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2309 static inline void set_cmdline(int idx, const char *cmdline)
2311 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2314 static int allocate_cmdlines_buffer(unsigned int val,
2315 struct saved_cmdlines_buffer *s)
2317 s->map_cmdline_to_pid = kmalloc_array(val,
2318 sizeof(*s->map_cmdline_to_pid),
2320 if (!s->map_cmdline_to_pid)
2323 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2324 if (!s->saved_cmdlines) {
2325 kfree(s->map_cmdline_to_pid);
2330 s->cmdline_num = val;
2331 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2332 sizeof(s->map_pid_to_cmdline));
2333 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2334 val * sizeof(*s->map_cmdline_to_pid));
2339 static int trace_create_savedcmd(void)
2343 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2347 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2357 int is_tracing_stopped(void)
2359 return global_trace.stop_count;
2362 static void tracing_start_tr(struct trace_array *tr)
2364 struct trace_buffer *buffer;
2365 unsigned long flags;
2367 if (tracing_disabled)
2370 raw_spin_lock_irqsave(&tr->start_lock, flags);
2371 if (--tr->stop_count) {
2372 if (WARN_ON_ONCE(tr->stop_count < 0)) {
2373 /* Someone screwed up their debugging */
2379 /* Prevent the buffers from switching */
2380 arch_spin_lock(&tr->max_lock);
2382 buffer = tr->array_buffer.buffer;
2384 ring_buffer_record_enable(buffer);
2386 #ifdef CONFIG_TRACER_MAX_TRACE
2387 buffer = tr->max_buffer.buffer;
2389 ring_buffer_record_enable(buffer);
2392 arch_spin_unlock(&tr->max_lock);
2395 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2399 * tracing_start - quick start of the tracer
2401 * If tracing is enabled but was stopped by tracing_stop,
2402 * this will start the tracer back up.
2404 void tracing_start(void)
2407 return tracing_start_tr(&global_trace);
2410 static void tracing_stop_tr(struct trace_array *tr)
2412 struct trace_buffer *buffer;
2413 unsigned long flags;
2415 raw_spin_lock_irqsave(&tr->start_lock, flags);
2416 if (tr->stop_count++)
2419 /* Prevent the buffers from switching */
2420 arch_spin_lock(&tr->max_lock);
2422 buffer = tr->array_buffer.buffer;
2424 ring_buffer_record_disable(buffer);
2426 #ifdef CONFIG_TRACER_MAX_TRACE
2427 buffer = tr->max_buffer.buffer;
2429 ring_buffer_record_disable(buffer);
2432 arch_spin_unlock(&tr->max_lock);
2435 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2439 * tracing_stop - quick stop of the tracer
2441 * Light weight way to stop tracing. Use in conjunction with
2444 void tracing_stop(void)
2446 return tracing_stop_tr(&global_trace);
2449 static int trace_save_cmdline(struct task_struct *tsk)
2453 /* treat recording of idle task as a success */
2457 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2460 * It's not the end of the world if we don't get
2461 * the lock, but we also don't want to spin
2462 * nor do we want to disable interrupts,
2463 * so if we miss here, then better luck next time.
2465 * This is called within the scheduler and wake up, so interrupts
2466 * had better been disabled and run queue lock been held.
2468 lockdep_assert_preemption_disabled();
2469 if (!arch_spin_trylock(&trace_cmdline_lock))
2472 idx = savedcmd->map_pid_to_cmdline[tpid];
2473 if (idx == NO_CMDLINE_MAP) {
2474 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2476 savedcmd->map_pid_to_cmdline[tpid] = idx;
2477 savedcmd->cmdline_idx = idx;
2480 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2481 set_cmdline(idx, tsk->comm);
2483 arch_spin_unlock(&trace_cmdline_lock);
2488 static void __trace_find_cmdline(int pid, char comm[])
2494 strcpy(comm, "<idle>");
2498 if (WARN_ON_ONCE(pid < 0)) {
2499 strcpy(comm, "<XXX>");
2503 tpid = pid & (PID_MAX_DEFAULT - 1);
2504 map = savedcmd->map_pid_to_cmdline[tpid];
2505 if (map != NO_CMDLINE_MAP) {
2506 tpid = savedcmd->map_cmdline_to_pid[map];
2508 strscpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2512 strcpy(comm, "<...>");
2515 void trace_find_cmdline(int pid, char comm[])
2518 arch_spin_lock(&trace_cmdline_lock);
2520 __trace_find_cmdline(pid, comm);
2522 arch_spin_unlock(&trace_cmdline_lock);
2526 static int *trace_find_tgid_ptr(int pid)
2529 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2530 * if we observe a non-NULL tgid_map then we also observe the correct
2533 int *map = smp_load_acquire(&tgid_map);
2535 if (unlikely(!map || pid > tgid_map_max))
2541 int trace_find_tgid(int pid)
2543 int *ptr = trace_find_tgid_ptr(pid);
2545 return ptr ? *ptr : 0;
2548 static int trace_save_tgid(struct task_struct *tsk)
2552 /* treat recording of idle task as a success */
2556 ptr = trace_find_tgid_ptr(tsk->pid);
2564 static bool tracing_record_taskinfo_skip(int flags)
2566 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2568 if (!__this_cpu_read(trace_taskinfo_save))
2574 * tracing_record_taskinfo - record the task info of a task
2576 * @task: task to record
2577 * @flags: TRACE_RECORD_CMDLINE for recording comm
2578 * TRACE_RECORD_TGID for recording tgid
2580 void tracing_record_taskinfo(struct task_struct *task, int flags)
2584 if (tracing_record_taskinfo_skip(flags))
2588 * Record as much task information as possible. If some fail, continue
2589 * to try to record the others.
2591 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2592 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2594 /* If recording any information failed, retry again soon. */
2598 __this_cpu_write(trace_taskinfo_save, false);
2602 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2604 * @prev: previous task during sched_switch
2605 * @next: next task during sched_switch
2606 * @flags: TRACE_RECORD_CMDLINE for recording comm
2607 * TRACE_RECORD_TGID for recording tgid
2609 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2610 struct task_struct *next, int flags)
2614 if (tracing_record_taskinfo_skip(flags))
2618 * Record as much task information as possible. If some fail, continue
2619 * to try to record the others.
2621 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2622 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2623 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2624 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2626 /* If recording any information failed, retry again soon. */
2630 __this_cpu_write(trace_taskinfo_save, false);
2633 /* Helpers to record a specific task information */
2634 void tracing_record_cmdline(struct task_struct *task)
2636 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2639 void tracing_record_tgid(struct task_struct *task)
2641 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2645 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2646 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2647 * simplifies those functions and keeps them in sync.
2649 enum print_line_t trace_handle_return(struct trace_seq *s)
2651 return trace_seq_has_overflowed(s) ?
2652 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2654 EXPORT_SYMBOL_GPL(trace_handle_return);
2656 static unsigned short migration_disable_value(void)
2658 #if defined(CONFIG_SMP)
2659 return current->migration_disabled;
2665 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2667 unsigned int trace_flags = irqs_status;
2670 pc = preempt_count();
2673 trace_flags |= TRACE_FLAG_NMI;
2674 if (pc & HARDIRQ_MASK)
2675 trace_flags |= TRACE_FLAG_HARDIRQ;
2676 if (in_serving_softirq())
2677 trace_flags |= TRACE_FLAG_SOFTIRQ;
2678 if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2679 trace_flags |= TRACE_FLAG_BH_OFF;
2681 if (tif_need_resched())
2682 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2683 if (test_preempt_need_resched())
2684 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2685 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2686 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2689 struct ring_buffer_event *
2690 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2693 unsigned int trace_ctx)
2695 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2698 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2699 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2700 static int trace_buffered_event_ref;
2703 * trace_buffered_event_enable - enable buffering events
2705 * When events are being filtered, it is quicker to use a temporary
2706 * buffer to write the event data into if there's a likely chance
2707 * that it will not be committed. The discard of the ring buffer
2708 * is not as fast as committing, and is much slower than copying
2711 * When an event is to be filtered, allocate per cpu buffers to
2712 * write the event data into, and if the event is filtered and discarded
2713 * it is simply dropped, otherwise, the entire data is to be committed
2716 void trace_buffered_event_enable(void)
2718 struct ring_buffer_event *event;
2722 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2724 if (trace_buffered_event_ref++)
2727 for_each_tracing_cpu(cpu) {
2728 page = alloc_pages_node(cpu_to_node(cpu),
2729 GFP_KERNEL | __GFP_NORETRY, 0);
2730 /* This is just an optimization and can handle failures */
2732 pr_err("Failed to allocate event buffer\n");
2736 event = page_address(page);
2737 memset(event, 0, sizeof(*event));
2739 per_cpu(trace_buffered_event, cpu) = event;
2742 if (cpu == smp_processor_id() &&
2743 __this_cpu_read(trace_buffered_event) !=
2744 per_cpu(trace_buffered_event, cpu))
2750 static void enable_trace_buffered_event(void *data)
2752 /* Probably not needed, but do it anyway */
2754 this_cpu_dec(trace_buffered_event_cnt);
2757 static void disable_trace_buffered_event(void *data)
2759 this_cpu_inc(trace_buffered_event_cnt);
2763 * trace_buffered_event_disable - disable buffering events
2765 * When a filter is removed, it is faster to not use the buffered
2766 * events, and to commit directly into the ring buffer. Free up
2767 * the temp buffers when there are no more users. This requires
2768 * special synchronization with current events.
2770 void trace_buffered_event_disable(void)
2774 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2776 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2779 if (--trace_buffered_event_ref)
2782 /* For each CPU, set the buffer as used. */
2783 on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
2786 /* Wait for all current users to finish */
2789 for_each_tracing_cpu(cpu) {
2790 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2791 per_cpu(trace_buffered_event, cpu) = NULL;
2794 * Make sure trace_buffered_event is NULL before clearing
2795 * trace_buffered_event_cnt.
2799 /* Do the work on each cpu */
2800 on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
2804 static struct trace_buffer *temp_buffer;
2806 struct ring_buffer_event *
2807 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2808 struct trace_event_file *trace_file,
2809 int type, unsigned long len,
2810 unsigned int trace_ctx)
2812 struct ring_buffer_event *entry;
2813 struct trace_array *tr = trace_file->tr;
2816 *current_rb = tr->array_buffer.buffer;
2818 if (!tr->no_filter_buffering_ref &&
2819 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2820 preempt_disable_notrace();
2822 * Filtering is on, so try to use the per cpu buffer first.
2823 * This buffer will simulate a ring_buffer_event,
2824 * where the type_len is zero and the array[0] will
2825 * hold the full length.
2826 * (see include/linux/ring-buffer.h for details on
2827 * how the ring_buffer_event is structured).
2829 * Using a temp buffer during filtering and copying it
2830 * on a matched filter is quicker than writing directly
2831 * into the ring buffer and then discarding it when
2832 * it doesn't match. That is because the discard
2833 * requires several atomic operations to get right.
2834 * Copying on match and doing nothing on a failed match
2835 * is still quicker than no copy on match, but having
2836 * to discard out of the ring buffer on a failed match.
2838 if ((entry = __this_cpu_read(trace_buffered_event))) {
2839 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2841 val = this_cpu_inc_return(trace_buffered_event_cnt);
2844 * Preemption is disabled, but interrupts and NMIs
2845 * can still come in now. If that happens after
2846 * the above increment, then it will have to go
2847 * back to the old method of allocating the event
2848 * on the ring buffer, and if the filter fails, it
2849 * will have to call ring_buffer_discard_commit()
2852 * Need to also check the unlikely case that the
2853 * length is bigger than the temp buffer size.
2854 * If that happens, then the reserve is pretty much
2855 * guaranteed to fail, as the ring buffer currently
2856 * only allows events less than a page. But that may
2857 * change in the future, so let the ring buffer reserve
2858 * handle the failure in that case.
2860 if (val == 1 && likely(len <= max_len)) {
2861 trace_event_setup(entry, type, trace_ctx);
2862 entry->array[0] = len;
2863 /* Return with preemption disabled */
2866 this_cpu_dec(trace_buffered_event_cnt);
2868 /* __trace_buffer_lock_reserve() disables preemption */
2869 preempt_enable_notrace();
2872 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2875 * If tracing is off, but we have triggers enabled
2876 * we still need to look at the event data. Use the temp_buffer
2877 * to store the trace event for the trigger to use. It's recursive
2878 * safe and will not be recorded anywhere.
2880 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2881 *current_rb = temp_buffer;
2882 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2887 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2889 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2890 static DEFINE_MUTEX(tracepoint_printk_mutex);
2892 static void output_printk(struct trace_event_buffer *fbuffer)
2894 struct trace_event_call *event_call;
2895 struct trace_event_file *file;
2896 struct trace_event *event;
2897 unsigned long flags;
2898 struct trace_iterator *iter = tracepoint_print_iter;
2900 /* We should never get here if iter is NULL */
2901 if (WARN_ON_ONCE(!iter))
2904 event_call = fbuffer->trace_file->event_call;
2905 if (!event_call || !event_call->event.funcs ||
2906 !event_call->event.funcs->trace)
2909 file = fbuffer->trace_file;
2910 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2911 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2912 !filter_match_preds(file->filter, fbuffer->entry)))
2915 event = &fbuffer->trace_file->event_call->event;
2917 raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2918 trace_seq_init(&iter->seq);
2919 iter->ent = fbuffer->entry;
2920 event_call->event.funcs->trace(iter, 0, event);
2921 trace_seq_putc(&iter->seq, 0);
2922 printk("%s", iter->seq.buffer);
2924 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2927 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2928 void *buffer, size_t *lenp,
2931 int save_tracepoint_printk;
2934 mutex_lock(&tracepoint_printk_mutex);
2935 save_tracepoint_printk = tracepoint_printk;
2937 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2940 * This will force exiting early, as tracepoint_printk
2941 * is always zero when tracepoint_printk_iter is not allocated
2943 if (!tracepoint_print_iter)
2944 tracepoint_printk = 0;
2946 if (save_tracepoint_printk == tracepoint_printk)
2949 if (tracepoint_printk)
2950 static_key_enable(&tracepoint_printk_key.key);
2952 static_key_disable(&tracepoint_printk_key.key);
2955 mutex_unlock(&tracepoint_printk_mutex);
2960 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2962 enum event_trigger_type tt = ETT_NONE;
2963 struct trace_event_file *file = fbuffer->trace_file;
2965 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2966 fbuffer->entry, &tt))
2969 if (static_key_false(&tracepoint_printk_key.key))
2970 output_printk(fbuffer);
2972 if (static_branch_unlikely(&trace_event_exports_enabled))
2973 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2975 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2976 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2980 event_triggers_post_call(file, tt);
2983 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2988 * trace_buffer_unlock_commit_regs()
2989 * trace_event_buffer_commit()
2990 * trace_event_raw_event_xxx()
2992 # define STACK_SKIP 3
2994 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2995 struct trace_buffer *buffer,
2996 struct ring_buffer_event *event,
2997 unsigned int trace_ctx,
2998 struct pt_regs *regs)
3000 __buffer_unlock_commit(buffer, event);
3003 * If regs is not set, then skip the necessary functions.
3004 * Note, we can still get here via blktrace, wakeup tracer
3005 * and mmiotrace, but that's ok if they lose a function or
3006 * two. They are not that meaningful.
3008 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
3009 ftrace_trace_userstack(tr, buffer, trace_ctx);
3013 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
3016 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
3017 struct ring_buffer_event *event)
3019 __buffer_unlock_commit(buffer, event);
3023 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
3024 parent_ip, unsigned int trace_ctx)
3026 struct trace_event_call *call = &event_function;
3027 struct trace_buffer *buffer = tr->array_buffer.buffer;
3028 struct ring_buffer_event *event;
3029 struct ftrace_entry *entry;
3031 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
3035 entry = ring_buffer_event_data(event);
3037 entry->parent_ip = parent_ip;
3039 if (!call_filter_check_discard(call, entry, buffer, event)) {
3040 if (static_branch_unlikely(&trace_function_exports_enabled))
3041 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
3042 __buffer_unlock_commit(buffer, event);
3046 #ifdef CONFIG_STACKTRACE
3048 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
3049 #define FTRACE_KSTACK_NESTING 4
3051 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
3053 struct ftrace_stack {
3054 unsigned long calls[FTRACE_KSTACK_ENTRIES];
3058 struct ftrace_stacks {
3059 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
3062 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
3063 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
3065 static void __ftrace_trace_stack(struct trace_buffer *buffer,
3066 unsigned int trace_ctx,
3067 int skip, struct pt_regs *regs)
3069 struct trace_event_call *call = &event_kernel_stack;
3070 struct ring_buffer_event *event;
3071 unsigned int size, nr_entries;
3072 struct ftrace_stack *fstack;
3073 struct stack_entry *entry;
3077 * Add one, for this function and the call to save_stack_trace()
3078 * If regs is set, then these functions will not be in the way.
3080 #ifndef CONFIG_UNWINDER_ORC
3085 preempt_disable_notrace();
3087 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3089 /* This should never happen. If it does, yell once and skip */
3090 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3094 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3095 * interrupt will either see the value pre increment or post
3096 * increment. If the interrupt happens pre increment it will have
3097 * restored the counter when it returns. We just need a barrier to
3098 * keep gcc from moving things around.
3102 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3103 size = ARRAY_SIZE(fstack->calls);
3106 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3109 nr_entries = stack_trace_save(fstack->calls, size, skip);
3112 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3113 struct_size(entry, caller, nr_entries),
3117 entry = ring_buffer_event_data(event);
3119 entry->size = nr_entries;
3120 memcpy(&entry->caller, fstack->calls,
3121 flex_array_size(entry, caller, nr_entries));
3123 if (!call_filter_check_discard(call, entry, buffer, event))
3124 __buffer_unlock_commit(buffer, event);
3127 /* Again, don't let gcc optimize things here */
3129 __this_cpu_dec(ftrace_stack_reserve);
3130 preempt_enable_notrace();
3134 static inline void ftrace_trace_stack(struct trace_array *tr,
3135 struct trace_buffer *buffer,
3136 unsigned int trace_ctx,
3137 int skip, struct pt_regs *regs)
3139 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3142 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3145 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3148 struct trace_buffer *buffer = tr->array_buffer.buffer;
3150 if (rcu_is_watching()) {
3151 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3155 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY)))
3159 * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
3160 * but if the above rcu_is_watching() failed, then the NMI
3161 * triggered someplace critical, and ct_irq_enter() should
3162 * not be called from NMI.
3164 if (unlikely(in_nmi()))
3167 ct_irq_enter_irqson();
3168 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3169 ct_irq_exit_irqson();
3173 * trace_dump_stack - record a stack back trace in the trace buffer
3174 * @skip: Number of functions to skip (helper handlers)
3176 void trace_dump_stack(int skip)
3178 if (tracing_disabled || tracing_selftest_running)
3181 #ifndef CONFIG_UNWINDER_ORC
3182 /* Skip 1 to skip this function. */
3185 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3186 tracing_gen_ctx(), skip, NULL);
3188 EXPORT_SYMBOL_GPL(trace_dump_stack);
3190 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3191 static DEFINE_PER_CPU(int, user_stack_count);
3194 ftrace_trace_userstack(struct trace_array *tr,
3195 struct trace_buffer *buffer, unsigned int trace_ctx)
3197 struct trace_event_call *call = &event_user_stack;
3198 struct ring_buffer_event *event;
3199 struct userstack_entry *entry;
3201 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3205 * NMIs can not handle page faults, even with fix ups.
3206 * The save user stack can (and often does) fault.
3208 if (unlikely(in_nmi()))
3212 * prevent recursion, since the user stack tracing may
3213 * trigger other kernel events.
3216 if (__this_cpu_read(user_stack_count))
3219 __this_cpu_inc(user_stack_count);
3221 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3222 sizeof(*entry), trace_ctx);
3224 goto out_drop_count;
3225 entry = ring_buffer_event_data(event);
3227 entry->tgid = current->tgid;
3228 memset(&entry->caller, 0, sizeof(entry->caller));
3230 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3231 if (!call_filter_check_discard(call, entry, buffer, event))
3232 __buffer_unlock_commit(buffer, event);
3235 __this_cpu_dec(user_stack_count);
3239 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3240 static void ftrace_trace_userstack(struct trace_array *tr,
3241 struct trace_buffer *buffer,
3242 unsigned int trace_ctx)
3245 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3247 #endif /* CONFIG_STACKTRACE */
3250 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3251 unsigned long long delta)
3253 entry->bottom_delta_ts = delta & U32_MAX;
3254 entry->top_delta_ts = (delta >> 32);
3257 void trace_last_func_repeats(struct trace_array *tr,
3258 struct trace_func_repeats *last_info,
3259 unsigned int trace_ctx)
3261 struct trace_buffer *buffer = tr->array_buffer.buffer;
3262 struct func_repeats_entry *entry;
3263 struct ring_buffer_event *event;
3266 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3267 sizeof(*entry), trace_ctx);
3271 delta = ring_buffer_event_time_stamp(buffer, event) -
3272 last_info->ts_last_call;
3274 entry = ring_buffer_event_data(event);
3275 entry->ip = last_info->ip;
3276 entry->parent_ip = last_info->parent_ip;
3277 entry->count = last_info->count;
3278 func_repeats_set_delta_ts(entry, delta);
3280 __buffer_unlock_commit(buffer, event);
3283 /* created for use with alloc_percpu */
3284 struct trace_buffer_struct {
3286 char buffer[4][TRACE_BUF_SIZE];
3289 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3292 * This allows for lockless recording. If we're nested too deeply, then
3293 * this returns NULL.
3295 static char *get_trace_buf(void)
3297 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3299 if (!trace_percpu_buffer || buffer->nesting >= 4)
3304 /* Interrupts must see nesting incremented before we use the buffer */
3306 return &buffer->buffer[buffer->nesting - 1][0];
3309 static void put_trace_buf(void)
3311 /* Don't let the decrement of nesting leak before this */
3313 this_cpu_dec(trace_percpu_buffer->nesting);
3316 static int alloc_percpu_trace_buffer(void)
3318 struct trace_buffer_struct __percpu *buffers;
3320 if (trace_percpu_buffer)
3323 buffers = alloc_percpu(struct trace_buffer_struct);
3324 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3327 trace_percpu_buffer = buffers;
3331 static int buffers_allocated;
3333 void trace_printk_init_buffers(void)
3335 if (buffers_allocated)
3338 if (alloc_percpu_trace_buffer())
3341 /* trace_printk() is for debug use only. Don't use it in production. */
3344 pr_warn("**********************************************************\n");
3345 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3347 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3349 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3350 pr_warn("** unsafe for production use. **\n");
3352 pr_warn("** If you see this message and you are not debugging **\n");
3353 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3355 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3356 pr_warn("**********************************************************\n");
3358 /* Expand the buffers to set size */
3359 tracing_update_buffers();
3361 buffers_allocated = 1;
3364 * trace_printk_init_buffers() can be called by modules.
3365 * If that happens, then we need to start cmdline recording
3366 * directly here. If the global_trace.buffer is already
3367 * allocated here, then this was called by module code.
3369 if (global_trace.array_buffer.buffer)
3370 tracing_start_cmdline_record();
3372 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3374 void trace_printk_start_comm(void)
3376 /* Start tracing comms if trace printk is set */
3377 if (!buffers_allocated)
3379 tracing_start_cmdline_record();
3382 static void trace_printk_start_stop_comm(int enabled)
3384 if (!buffers_allocated)
3388 tracing_start_cmdline_record();
3390 tracing_stop_cmdline_record();
3394 * trace_vbprintk - write binary msg to tracing buffer
3395 * @ip: The address of the caller
3396 * @fmt: The string format to write to the buffer
3397 * @args: Arguments for @fmt
3399 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3401 struct trace_event_call *call = &event_bprint;
3402 struct ring_buffer_event *event;
3403 struct trace_buffer *buffer;
3404 struct trace_array *tr = &global_trace;
3405 struct bprint_entry *entry;
3406 unsigned int trace_ctx;
3410 if (unlikely(tracing_selftest_running || tracing_disabled))
3413 /* Don't pollute graph traces with trace_vprintk internals */
3414 pause_graph_tracing();
3416 trace_ctx = tracing_gen_ctx();
3417 preempt_disable_notrace();
3419 tbuffer = get_trace_buf();
3425 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3427 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3430 size = sizeof(*entry) + sizeof(u32) * len;
3431 buffer = tr->array_buffer.buffer;
3432 ring_buffer_nest_start(buffer);
3433 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3437 entry = ring_buffer_event_data(event);
3441 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3442 if (!call_filter_check_discard(call, entry, buffer, event)) {
3443 __buffer_unlock_commit(buffer, event);
3444 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3448 ring_buffer_nest_end(buffer);
3453 preempt_enable_notrace();
3454 unpause_graph_tracing();
3458 EXPORT_SYMBOL_GPL(trace_vbprintk);
3462 __trace_array_vprintk(struct trace_buffer *buffer,
3463 unsigned long ip, const char *fmt, va_list args)
3465 struct trace_event_call *call = &event_print;
3466 struct ring_buffer_event *event;
3468 struct print_entry *entry;
3469 unsigned int trace_ctx;
3472 if (tracing_disabled)
3475 /* Don't pollute graph traces with trace_vprintk internals */
3476 pause_graph_tracing();
3478 trace_ctx = tracing_gen_ctx();
3479 preempt_disable_notrace();
3482 tbuffer = get_trace_buf();
3488 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3490 size = sizeof(*entry) + len + 1;
3491 ring_buffer_nest_start(buffer);
3492 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3496 entry = ring_buffer_event_data(event);
3499 memcpy(&entry->buf, tbuffer, len + 1);
3500 if (!call_filter_check_discard(call, entry, buffer, event)) {
3501 __buffer_unlock_commit(buffer, event);
3502 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3506 ring_buffer_nest_end(buffer);
3510 preempt_enable_notrace();
3511 unpause_graph_tracing();
3517 int trace_array_vprintk(struct trace_array *tr,
3518 unsigned long ip, const char *fmt, va_list args)
3520 if (tracing_selftest_running && tr == &global_trace)
3523 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3527 * trace_array_printk - Print a message to a specific instance
3528 * @tr: The instance trace_array descriptor
3529 * @ip: The instruction pointer that this is called from.
3530 * @fmt: The format to print (printf format)
3532 * If a subsystem sets up its own instance, they have the right to
3533 * printk strings into their tracing instance buffer using this
3534 * function. Note, this function will not write into the top level
3535 * buffer (use trace_printk() for that), as writing into the top level
3536 * buffer should only have events that can be individually disabled.
3537 * trace_printk() is only used for debugging a kernel, and should not
3538 * be ever incorporated in normal use.
3540 * trace_array_printk() can be used, as it will not add noise to the
3541 * top level tracing buffer.
3543 * Note, trace_array_init_printk() must be called on @tr before this
3547 int trace_array_printk(struct trace_array *tr,
3548 unsigned long ip, const char *fmt, ...)
3556 /* This is only allowed for created instances */
3557 if (tr == &global_trace)
3560 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3564 ret = trace_array_vprintk(tr, ip, fmt, ap);
3568 EXPORT_SYMBOL_GPL(trace_array_printk);
3571 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3572 * @tr: The trace array to initialize the buffers for
3574 * As trace_array_printk() only writes into instances, they are OK to
3575 * have in the kernel (unlike trace_printk()). This needs to be called
3576 * before trace_array_printk() can be used on a trace_array.
3578 int trace_array_init_printk(struct trace_array *tr)
3583 /* This is only allowed for created instances */
3584 if (tr == &global_trace)
3587 return alloc_percpu_trace_buffer();
3589 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3592 int trace_array_printk_buf(struct trace_buffer *buffer,
3593 unsigned long ip, const char *fmt, ...)
3598 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3602 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3608 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3610 return trace_array_vprintk(&global_trace, ip, fmt, args);
3612 EXPORT_SYMBOL_GPL(trace_vprintk);
3614 static void trace_iterator_increment(struct trace_iterator *iter)
3616 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3620 ring_buffer_iter_advance(buf_iter);
3623 static struct trace_entry *
3624 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3625 unsigned long *lost_events)
3627 struct ring_buffer_event *event;
3628 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3631 event = ring_buffer_iter_peek(buf_iter, ts);
3633 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3634 (unsigned long)-1 : 0;
3636 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3641 iter->ent_size = ring_buffer_event_length(event);
3642 return ring_buffer_event_data(event);
3648 static struct trace_entry *
3649 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3650 unsigned long *missing_events, u64 *ent_ts)
3652 struct trace_buffer *buffer = iter->array_buffer->buffer;
3653 struct trace_entry *ent, *next = NULL;
3654 unsigned long lost_events = 0, next_lost = 0;
3655 int cpu_file = iter->cpu_file;
3656 u64 next_ts = 0, ts;
3662 * If we are in a per_cpu trace file, don't bother by iterating over
3663 * all cpu and peek directly.
3665 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3666 if (ring_buffer_empty_cpu(buffer, cpu_file))
3668 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3670 *ent_cpu = cpu_file;
3675 for_each_tracing_cpu(cpu) {
3677 if (ring_buffer_empty_cpu(buffer, cpu))
3680 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3683 * Pick the entry with the smallest timestamp:
3685 if (ent && (!next || ts < next_ts)) {
3689 next_lost = lost_events;
3690 next_size = iter->ent_size;
3694 iter->ent_size = next_size;
3697 *ent_cpu = next_cpu;
3703 *missing_events = next_lost;
3708 #define STATIC_FMT_BUF_SIZE 128
3709 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3711 char *trace_iter_expand_format(struct trace_iterator *iter)
3716 * iter->tr is NULL when used with tp_printk, which makes
3717 * this get called where it is not safe to call krealloc().
3719 if (!iter->tr || iter->fmt == static_fmt_buf)
3722 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3725 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3732 /* Returns true if the string is safe to dereference from an event */
3733 static bool trace_safe_str(struct trace_iterator *iter, const char *str,
3736 unsigned long addr = (unsigned long)str;
3737 struct trace_event *trace_event;
3738 struct trace_event_call *event;
3740 /* Ignore strings with no length */
3744 /* OK if part of the event data */
3745 if ((addr >= (unsigned long)iter->ent) &&
3746 (addr < (unsigned long)iter->ent + iter->ent_size))
3749 /* OK if part of the temp seq buffer */
3750 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3751 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3754 /* Core rodata can not be freed */
3755 if (is_kernel_rodata(addr))
3758 if (trace_is_tracepoint_string(str))
3762 * Now this could be a module event, referencing core module
3763 * data, which is OK.
3768 trace_event = ftrace_find_event(iter->ent->type);
3772 event = container_of(trace_event, struct trace_event_call, event);
3773 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3776 /* Would rather have rodata, but this will suffice */
3777 if (within_module_core(addr, event->module))
3783 static const char *show_buffer(struct trace_seq *s)
3785 struct seq_buf *seq = &s->seq;
3787 seq_buf_terminate(seq);
3792 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3794 static int test_can_verify_check(const char *fmt, ...)
3801 * The verifier is dependent on vsnprintf() modifies the va_list
3802 * passed to it, where it is sent as a reference. Some architectures
3803 * (like x86_32) passes it by value, which means that vsnprintf()
3804 * does not modify the va_list passed to it, and the verifier
3805 * would then need to be able to understand all the values that
3806 * vsnprintf can use. If it is passed by value, then the verifier
3810 vsnprintf(buf, 16, "%d", ap);
3811 ret = va_arg(ap, int);
3817 static void test_can_verify(void)
3819 if (!test_can_verify_check("%d %d", 0, 1)) {
3820 pr_info("trace event string verifier disabled\n");
3821 static_branch_inc(&trace_no_verify);
3826 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3827 * @iter: The iterator that holds the seq buffer and the event being printed
3828 * @fmt: The format used to print the event
3829 * @ap: The va_list holding the data to print from @fmt.
3831 * This writes the data into the @iter->seq buffer using the data from
3832 * @fmt and @ap. If the format has a %s, then the source of the string
3833 * is examined to make sure it is safe to print, otherwise it will
3834 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3837 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3840 const char *p = fmt;
3844 if (WARN_ON_ONCE(!fmt))
3847 if (static_branch_unlikely(&trace_no_verify))
3850 /* Don't bother checking when doing a ftrace_dump() */
3851 if (iter->fmt == static_fmt_buf)
3860 /* We only care about %s and variants */
3861 for (i = 0; p[i]; i++) {
3862 if (i + 1 >= iter->fmt_size) {
3864 * If we can't expand the copy buffer,
3867 if (!trace_iter_expand_format(iter))
3871 if (p[i] == '\\' && p[i+1]) {
3876 /* Need to test cases like %08.*s */
3877 for (j = 1; p[i+j]; j++) {
3878 if (isdigit(p[i+j]) ||
3881 if (p[i+j] == '*') {
3893 /* If no %s found then just print normally */
3897 /* Copy up to the %s, and print that */
3898 strncpy(iter->fmt, p, i);
3899 iter->fmt[i] = '\0';
3900 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3903 * If iter->seq is full, the above call no longer guarantees
3904 * that ap is in sync with fmt processing, and further calls
3905 * to va_arg() can return wrong positional arguments.
3907 * Ensure that ap is no longer used in this case.
3909 if (iter->seq.full) {
3915 len = va_arg(ap, int);
3917 /* The ap now points to the string data of the %s */
3918 str = va_arg(ap, const char *);
3921 * If you hit this warning, it is likely that the
3922 * trace event in question used %s on a string that
3923 * was saved at the time of the event, but may not be
3924 * around when the trace is read. Use __string(),
3925 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3926 * instead. See samples/trace_events/trace-events-sample.h
3929 if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
3930 "fmt: '%s' current_buffer: '%s'",
3931 fmt, show_buffer(&iter->seq))) {
3934 /* Try to safely read the string */
3936 if (len + 1 > iter->fmt_size)
3937 len = iter->fmt_size - 1;
3940 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3944 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3948 trace_seq_printf(&iter->seq, "(0x%px)", str);
3950 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3952 str = "[UNSAFE-MEMORY]";
3953 strcpy(iter->fmt, "%s");
3955 strncpy(iter->fmt, p + i, j + 1);
3956 iter->fmt[j+1] = '\0';
3959 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3961 trace_seq_printf(&iter->seq, iter->fmt, str);
3967 trace_seq_vprintf(&iter->seq, p, ap);
3970 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3972 const char *p, *new_fmt;
3975 if (WARN_ON_ONCE(!fmt))
3978 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3982 new_fmt = q = iter->fmt;
3984 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3985 if (!trace_iter_expand_format(iter))
3988 q += iter->fmt - new_fmt;
3989 new_fmt = iter->fmt;
3994 /* Replace %p with %px */
3998 } else if (p[0] == 'p' && !isalnum(p[1])) {
4009 #define STATIC_TEMP_BUF_SIZE 128
4010 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
4012 /* Find the next real entry, without updating the iterator itself */
4013 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
4014 int *ent_cpu, u64 *ent_ts)
4016 /* __find_next_entry will reset ent_size */
4017 int ent_size = iter->ent_size;
4018 struct trace_entry *entry;
4021 * If called from ftrace_dump(), then the iter->temp buffer
4022 * will be the static_temp_buf and not created from kmalloc.
4023 * If the entry size is greater than the buffer, we can
4024 * not save it. Just return NULL in that case. This is only
4025 * used to add markers when two consecutive events' time
4026 * stamps have a large delta. See trace_print_lat_context()
4028 if (iter->temp == static_temp_buf &&
4029 STATIC_TEMP_BUF_SIZE < ent_size)
4033 * The __find_next_entry() may call peek_next_entry(), which may
4034 * call ring_buffer_peek() that may make the contents of iter->ent
4035 * undefined. Need to copy iter->ent now.
4037 if (iter->ent && iter->ent != iter->temp) {
4038 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
4039 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
4041 temp = kmalloc(iter->ent_size, GFP_KERNEL);
4046 iter->temp_size = iter->ent_size;
4048 memcpy(iter->temp, iter->ent, iter->ent_size);
4049 iter->ent = iter->temp;
4051 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
4052 /* Put back the original ent_size */
4053 iter->ent_size = ent_size;
4058 /* Find the next real entry, and increment the iterator to the next entry */
4059 void *trace_find_next_entry_inc(struct trace_iterator *iter)
4061 iter->ent = __find_next_entry(iter, &iter->cpu,
4062 &iter->lost_events, &iter->ts);
4065 trace_iterator_increment(iter);
4067 return iter->ent ? iter : NULL;
4070 static void trace_consume(struct trace_iterator *iter)
4072 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
4073 &iter->lost_events);
4076 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
4078 struct trace_iterator *iter = m->private;
4082 WARN_ON_ONCE(iter->leftover);
4086 /* can't go backwards */
4091 ent = trace_find_next_entry_inc(iter);
4095 while (ent && iter->idx < i)
4096 ent = trace_find_next_entry_inc(iter);
4103 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4105 struct ring_buffer_iter *buf_iter;
4106 unsigned long entries = 0;
4109 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4111 buf_iter = trace_buffer_iter(iter, cpu);
4115 ring_buffer_iter_reset(buf_iter);
4118 * We could have the case with the max latency tracers
4119 * that a reset never took place on a cpu. This is evident
4120 * by the timestamp being before the start of the buffer.
4122 while (ring_buffer_iter_peek(buf_iter, &ts)) {
4123 if (ts >= iter->array_buffer->time_start)
4126 ring_buffer_iter_advance(buf_iter);
4129 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4133 * The current tracer is copied to avoid a global locking
4136 static void *s_start(struct seq_file *m, loff_t *pos)
4138 struct trace_iterator *iter = m->private;
4139 struct trace_array *tr = iter->tr;
4140 int cpu_file = iter->cpu_file;
4145 mutex_lock(&trace_types_lock);
4146 if (unlikely(tr->current_trace != iter->trace)) {
4147 /* Close iter->trace before switching to the new current tracer */
4148 if (iter->trace->close)
4149 iter->trace->close(iter);
4150 iter->trace = tr->current_trace;
4151 /* Reopen the new current tracer */
4152 if (iter->trace->open)
4153 iter->trace->open(iter);
4155 mutex_unlock(&trace_types_lock);
4157 #ifdef CONFIG_TRACER_MAX_TRACE
4158 if (iter->snapshot && iter->trace->use_max_tr)
4159 return ERR_PTR(-EBUSY);
4162 if (*pos != iter->pos) {
4167 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4168 for_each_tracing_cpu(cpu)
4169 tracing_iter_reset(iter, cpu);
4171 tracing_iter_reset(iter, cpu_file);
4174 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4179 * If we overflowed the seq_file before, then we want
4180 * to just reuse the trace_seq buffer again.
4186 p = s_next(m, p, &l);
4190 trace_event_read_lock();
4191 trace_access_lock(cpu_file);
4195 static void s_stop(struct seq_file *m, void *p)
4197 struct trace_iterator *iter = m->private;
4199 #ifdef CONFIG_TRACER_MAX_TRACE
4200 if (iter->snapshot && iter->trace->use_max_tr)
4204 trace_access_unlock(iter->cpu_file);
4205 trace_event_read_unlock();
4209 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4210 unsigned long *entries, int cpu)
4212 unsigned long count;
4214 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4216 * If this buffer has skipped entries, then we hold all
4217 * entries for the trace and we need to ignore the
4218 * ones before the time stamp.
4220 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4221 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4222 /* total is the same as the entries */
4226 ring_buffer_overrun_cpu(buf->buffer, cpu);
4231 get_total_entries(struct array_buffer *buf,
4232 unsigned long *total, unsigned long *entries)
4240 for_each_tracing_cpu(cpu) {
4241 get_total_entries_cpu(buf, &t, &e, cpu);
4247 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4249 unsigned long total, entries;
4254 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4259 unsigned long trace_total_entries(struct trace_array *tr)
4261 unsigned long total, entries;
4266 get_total_entries(&tr->array_buffer, &total, &entries);
4271 static void print_lat_help_header(struct seq_file *m)
4273 seq_puts(m, "# _------=> CPU# \n"
4274 "# / _-----=> irqs-off/BH-disabled\n"
4275 "# | / _----=> need-resched \n"
4276 "# || / _---=> hardirq/softirq \n"
4277 "# ||| / _--=> preempt-depth \n"
4278 "# |||| / _-=> migrate-disable \n"
4279 "# ||||| / delay \n"
4280 "# cmd pid |||||| time | caller \n"
4281 "# \\ / |||||| \\ | / \n");
4284 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4286 unsigned long total;
4287 unsigned long entries;
4289 get_total_entries(buf, &total, &entries);
4290 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4291 entries, total, num_online_cpus());
4295 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4298 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4300 print_event_info(buf, m);
4302 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4303 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4306 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4309 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4310 static const char space[] = " ";
4311 int prec = tgid ? 12 : 2;
4313 print_event_info(buf, m);
4315 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
4316 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4317 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4318 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4319 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4320 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4321 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4322 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
4326 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4328 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4329 struct array_buffer *buf = iter->array_buffer;
4330 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4331 struct tracer *type = iter->trace;
4332 unsigned long entries;
4333 unsigned long total;
4334 const char *name = type->name;
4336 get_total_entries(buf, &total, &entries);
4338 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4340 seq_puts(m, "# -----------------------------------"
4341 "---------------------------------\n");
4342 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4343 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4344 nsecs_to_usecs(data->saved_latency),
4348 preempt_model_none() ? "server" :
4349 preempt_model_voluntary() ? "desktop" :
4350 preempt_model_full() ? "preempt" :
4351 preempt_model_rt() ? "preempt_rt" :
4353 /* These are reserved for later use */
4356 seq_printf(m, " #P:%d)\n", num_online_cpus());
4360 seq_puts(m, "# -----------------\n");
4361 seq_printf(m, "# | task: %.16s-%d "
4362 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4363 data->comm, data->pid,
4364 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4365 data->policy, data->rt_priority);
4366 seq_puts(m, "# -----------------\n");
4368 if (data->critical_start) {
4369 seq_puts(m, "# => started at: ");
4370 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4371 trace_print_seq(m, &iter->seq);
4372 seq_puts(m, "\n# => ended at: ");
4373 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4374 trace_print_seq(m, &iter->seq);
4375 seq_puts(m, "\n#\n");
4381 static void test_cpu_buff_start(struct trace_iterator *iter)
4383 struct trace_seq *s = &iter->seq;
4384 struct trace_array *tr = iter->tr;
4386 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4389 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4392 if (cpumask_available(iter->started) &&
4393 cpumask_test_cpu(iter->cpu, iter->started))
4396 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4399 if (cpumask_available(iter->started))
4400 cpumask_set_cpu(iter->cpu, iter->started);
4402 /* Don't print started cpu buffer for the first entry of the trace */
4404 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4408 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4410 struct trace_array *tr = iter->tr;
4411 struct trace_seq *s = &iter->seq;
4412 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4413 struct trace_entry *entry;
4414 struct trace_event *event;
4418 test_cpu_buff_start(iter);
4420 event = ftrace_find_event(entry->type);
4422 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4423 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4424 trace_print_lat_context(iter);
4426 trace_print_context(iter);
4429 if (trace_seq_has_overflowed(s))
4430 return TRACE_TYPE_PARTIAL_LINE;
4433 if (tr->trace_flags & TRACE_ITER_FIELDS)
4434 return print_event_fields(iter, event);
4435 return event->funcs->trace(iter, sym_flags, event);
4438 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4440 return trace_handle_return(s);
4443 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4445 struct trace_array *tr = iter->tr;
4446 struct trace_seq *s = &iter->seq;
4447 struct trace_entry *entry;
4448 struct trace_event *event;
4452 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4453 trace_seq_printf(s, "%d %d %llu ",
4454 entry->pid, iter->cpu, iter->ts);
4456 if (trace_seq_has_overflowed(s))
4457 return TRACE_TYPE_PARTIAL_LINE;
4459 event = ftrace_find_event(entry->type);
4461 return event->funcs->raw(iter, 0, event);
4463 trace_seq_printf(s, "%d ?\n", entry->type);
4465 return trace_handle_return(s);
4468 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4470 struct trace_array *tr = iter->tr;
4471 struct trace_seq *s = &iter->seq;
4472 unsigned char newline = '\n';
4473 struct trace_entry *entry;
4474 struct trace_event *event;
4478 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4479 SEQ_PUT_HEX_FIELD(s, entry->pid);
4480 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4481 SEQ_PUT_HEX_FIELD(s, iter->ts);
4482 if (trace_seq_has_overflowed(s))
4483 return TRACE_TYPE_PARTIAL_LINE;
4486 event = ftrace_find_event(entry->type);
4488 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4489 if (ret != TRACE_TYPE_HANDLED)
4493 SEQ_PUT_FIELD(s, newline);
4495 return trace_handle_return(s);
4498 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4500 struct trace_array *tr = iter->tr;
4501 struct trace_seq *s = &iter->seq;
4502 struct trace_entry *entry;
4503 struct trace_event *event;
4507 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4508 SEQ_PUT_FIELD(s, entry->pid);
4509 SEQ_PUT_FIELD(s, iter->cpu);
4510 SEQ_PUT_FIELD(s, iter->ts);
4511 if (trace_seq_has_overflowed(s))
4512 return TRACE_TYPE_PARTIAL_LINE;
4515 event = ftrace_find_event(entry->type);
4516 return event ? event->funcs->binary(iter, 0, event) :
4520 int trace_empty(struct trace_iterator *iter)
4522 struct ring_buffer_iter *buf_iter;
4525 /* If we are looking at one CPU buffer, only check that one */
4526 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4527 cpu = iter->cpu_file;
4528 buf_iter = trace_buffer_iter(iter, cpu);
4530 if (!ring_buffer_iter_empty(buf_iter))
4533 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4539 for_each_tracing_cpu(cpu) {
4540 buf_iter = trace_buffer_iter(iter, cpu);
4542 if (!ring_buffer_iter_empty(buf_iter))
4545 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4553 /* Called with trace_event_read_lock() held. */
4554 enum print_line_t print_trace_line(struct trace_iterator *iter)
4556 struct trace_array *tr = iter->tr;
4557 unsigned long trace_flags = tr->trace_flags;
4558 enum print_line_t ret;
4560 if (iter->lost_events) {
4561 if (iter->lost_events == (unsigned long)-1)
4562 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4565 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4566 iter->cpu, iter->lost_events);
4567 if (trace_seq_has_overflowed(&iter->seq))
4568 return TRACE_TYPE_PARTIAL_LINE;
4571 if (iter->trace && iter->trace->print_line) {
4572 ret = iter->trace->print_line(iter);
4573 if (ret != TRACE_TYPE_UNHANDLED)
4577 if (iter->ent->type == TRACE_BPUTS &&
4578 trace_flags & TRACE_ITER_PRINTK &&
4579 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4580 return trace_print_bputs_msg_only(iter);
4582 if (iter->ent->type == TRACE_BPRINT &&
4583 trace_flags & TRACE_ITER_PRINTK &&
4584 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4585 return trace_print_bprintk_msg_only(iter);
4587 if (iter->ent->type == TRACE_PRINT &&
4588 trace_flags & TRACE_ITER_PRINTK &&
4589 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4590 return trace_print_printk_msg_only(iter);
4592 if (trace_flags & TRACE_ITER_BIN)
4593 return print_bin_fmt(iter);
4595 if (trace_flags & TRACE_ITER_HEX)
4596 return print_hex_fmt(iter);
4598 if (trace_flags & TRACE_ITER_RAW)
4599 return print_raw_fmt(iter);
4601 return print_trace_fmt(iter);
4604 void trace_latency_header(struct seq_file *m)
4606 struct trace_iterator *iter = m->private;
4607 struct trace_array *tr = iter->tr;
4609 /* print nothing if the buffers are empty */
4610 if (trace_empty(iter))
4613 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4614 print_trace_header(m, iter);
4616 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4617 print_lat_help_header(m);
4620 void trace_default_header(struct seq_file *m)
4622 struct trace_iterator *iter = m->private;
4623 struct trace_array *tr = iter->tr;
4624 unsigned long trace_flags = tr->trace_flags;
4626 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4629 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4630 /* print nothing if the buffers are empty */
4631 if (trace_empty(iter))
4633 print_trace_header(m, iter);
4634 if (!(trace_flags & TRACE_ITER_VERBOSE))
4635 print_lat_help_header(m);
4637 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4638 if (trace_flags & TRACE_ITER_IRQ_INFO)
4639 print_func_help_header_irq(iter->array_buffer,
4642 print_func_help_header(iter->array_buffer, m,
4648 static void test_ftrace_alive(struct seq_file *m)
4650 if (!ftrace_is_dead())
4652 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4653 "# MAY BE MISSING FUNCTION EVENTS\n");
4656 #ifdef CONFIG_TRACER_MAX_TRACE
4657 static void show_snapshot_main_help(struct seq_file *m)
4659 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4660 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4661 "# Takes a snapshot of the main buffer.\n"
4662 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4663 "# (Doesn't have to be '2' works with any number that\n"
4664 "# is not a '0' or '1')\n");
4667 static void show_snapshot_percpu_help(struct seq_file *m)
4669 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4670 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4671 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4672 "# Takes a snapshot of the main buffer for this cpu.\n");
4674 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4675 "# Must use main snapshot file to allocate.\n");
4677 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4678 "# (Doesn't have to be '2' works with any number that\n"
4679 "# is not a '0' or '1')\n");
4682 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4684 if (iter->tr->allocated_snapshot)
4685 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4687 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4689 seq_puts(m, "# Snapshot commands:\n");
4690 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4691 show_snapshot_main_help(m);
4693 show_snapshot_percpu_help(m);
4696 /* Should never be called */
4697 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4700 static int s_show(struct seq_file *m, void *v)
4702 struct trace_iterator *iter = v;
4705 if (iter->ent == NULL) {
4707 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4709 test_ftrace_alive(m);
4711 if (iter->snapshot && trace_empty(iter))
4712 print_snapshot_help(m, iter);
4713 else if (iter->trace && iter->trace->print_header)
4714 iter->trace->print_header(m);
4716 trace_default_header(m);
4718 } else if (iter->leftover) {
4720 * If we filled the seq_file buffer earlier, we
4721 * want to just show it now.
4723 ret = trace_print_seq(m, &iter->seq);
4725 /* ret should this time be zero, but you never know */
4726 iter->leftover = ret;
4729 print_trace_line(iter);
4730 ret = trace_print_seq(m, &iter->seq);
4732 * If we overflow the seq_file buffer, then it will
4733 * ask us for this data again at start up.
4735 * ret is 0 if seq_file write succeeded.
4738 iter->leftover = ret;
4745 * Should be used after trace_array_get(), trace_types_lock
4746 * ensures that i_cdev was already initialized.
4748 static inline int tracing_get_cpu(struct inode *inode)
4750 if (inode->i_cdev) /* See trace_create_cpu_file() */
4751 return (long)inode->i_cdev - 1;
4752 return RING_BUFFER_ALL_CPUS;
4755 static const struct seq_operations tracer_seq_ops = {
4763 * Note, as iter itself can be allocated and freed in different
4764 * ways, this function is only used to free its content, and not
4765 * the iterator itself. The only requirement to all the allocations
4766 * is that it must zero all fields (kzalloc), as freeing works with
4767 * ethier allocated content or NULL.
4769 static void free_trace_iter_content(struct trace_iterator *iter)
4771 /* The fmt is either NULL, allocated or points to static_fmt_buf */
4772 if (iter->fmt != static_fmt_buf)
4776 kfree(iter->buffer_iter);
4777 mutex_destroy(&iter->mutex);
4778 free_cpumask_var(iter->started);
4781 static struct trace_iterator *
4782 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4784 struct trace_array *tr = inode->i_private;
4785 struct trace_iterator *iter;
4788 if (tracing_disabled)
4789 return ERR_PTR(-ENODEV);
4791 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4793 return ERR_PTR(-ENOMEM);
4795 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4797 if (!iter->buffer_iter)
4801 * trace_find_next_entry() may need to save off iter->ent.
4802 * It will place it into the iter->temp buffer. As most
4803 * events are less than 128, allocate a buffer of that size.
4804 * If one is greater, then trace_find_next_entry() will
4805 * allocate a new buffer to adjust for the bigger iter->ent.
4806 * It's not critical if it fails to get allocated here.
4808 iter->temp = kmalloc(128, GFP_KERNEL);
4810 iter->temp_size = 128;
4813 * trace_event_printf() may need to modify given format
4814 * string to replace %p with %px so that it shows real address
4815 * instead of hash value. However, that is only for the event
4816 * tracing, other tracer may not need. Defer the allocation
4817 * until it is needed.
4822 mutex_lock(&trace_types_lock);
4823 iter->trace = tr->current_trace;
4825 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4830 #ifdef CONFIG_TRACER_MAX_TRACE
4831 /* Currently only the top directory has a snapshot */
4832 if (tr->current_trace->print_max || snapshot)
4833 iter->array_buffer = &tr->max_buffer;
4836 iter->array_buffer = &tr->array_buffer;
4837 iter->snapshot = snapshot;
4839 iter->cpu_file = tracing_get_cpu(inode);
4840 mutex_init(&iter->mutex);
4842 /* Notify the tracer early; before we stop tracing. */
4843 if (iter->trace->open)
4844 iter->trace->open(iter);
4846 /* Annotate start of buffers if we had overruns */
4847 if (ring_buffer_overruns(iter->array_buffer->buffer))
4848 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4850 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4851 if (trace_clocks[tr->clock_id].in_ns)
4852 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4855 * If pause-on-trace is enabled, then stop the trace while
4856 * dumping, unless this is the "snapshot" file
4858 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4859 tracing_stop_tr(tr);
4861 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4862 for_each_tracing_cpu(cpu) {
4863 iter->buffer_iter[cpu] =
4864 ring_buffer_read_prepare(iter->array_buffer->buffer,
4867 ring_buffer_read_prepare_sync();
4868 for_each_tracing_cpu(cpu) {
4869 ring_buffer_read_start(iter->buffer_iter[cpu]);
4870 tracing_iter_reset(iter, cpu);
4873 cpu = iter->cpu_file;
4874 iter->buffer_iter[cpu] =
4875 ring_buffer_read_prepare(iter->array_buffer->buffer,
4877 ring_buffer_read_prepare_sync();
4878 ring_buffer_read_start(iter->buffer_iter[cpu]);
4879 tracing_iter_reset(iter, cpu);
4882 mutex_unlock(&trace_types_lock);
4887 mutex_unlock(&trace_types_lock);
4888 free_trace_iter_content(iter);
4890 seq_release_private(inode, file);
4891 return ERR_PTR(-ENOMEM);
4894 int tracing_open_generic(struct inode *inode, struct file *filp)
4898 ret = tracing_check_open_get_tr(NULL);
4902 filp->private_data = inode->i_private;
4906 bool tracing_is_disabled(void)
4908 return (tracing_disabled) ? true: false;
4912 * Open and update trace_array ref count.
4913 * Must have the current trace_array passed to it.
4915 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4917 struct trace_array *tr = inode->i_private;
4920 ret = tracing_check_open_get_tr(tr);
4924 filp->private_data = inode->i_private;
4930 * The private pointer of the inode is the trace_event_file.
4931 * Update the tr ref count associated to it.
4933 int tracing_open_file_tr(struct inode *inode, struct file *filp)
4935 struct trace_event_file *file = inode->i_private;
4938 ret = tracing_check_open_get_tr(file->tr);
4942 mutex_lock(&event_mutex);
4944 /* Fail if the file is marked for removal */
4945 if (file->flags & EVENT_FILE_FL_FREED) {
4946 trace_array_put(file->tr);
4949 event_file_get(file);
4952 mutex_unlock(&event_mutex);
4956 filp->private_data = inode->i_private;
4961 int tracing_release_file_tr(struct inode *inode, struct file *filp)
4963 struct trace_event_file *file = inode->i_private;
4965 trace_array_put(file->tr);
4966 event_file_put(file);
4971 static int tracing_mark_open(struct inode *inode, struct file *filp)
4973 stream_open(inode, filp);
4974 return tracing_open_generic_tr(inode, filp);
4977 static int tracing_release(struct inode *inode, struct file *file)
4979 struct trace_array *tr = inode->i_private;
4980 struct seq_file *m = file->private_data;
4981 struct trace_iterator *iter;
4984 if (!(file->f_mode & FMODE_READ)) {
4985 trace_array_put(tr);
4989 /* Writes do not use seq_file */
4991 mutex_lock(&trace_types_lock);
4993 for_each_tracing_cpu(cpu) {
4994 if (iter->buffer_iter[cpu])
4995 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4998 if (iter->trace && iter->trace->close)
4999 iter->trace->close(iter);
5001 if (!iter->snapshot && tr->stop_count)
5002 /* reenable tracing if it was previously enabled */
5003 tracing_start_tr(tr);
5005 __trace_array_put(tr);
5007 mutex_unlock(&trace_types_lock);
5009 free_trace_iter_content(iter);
5010 seq_release_private(inode, file);
5015 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
5017 struct trace_array *tr = inode->i_private;
5019 trace_array_put(tr);
5023 static int tracing_single_release_tr(struct inode *inode, struct file *file)
5025 struct trace_array *tr = inode->i_private;
5027 trace_array_put(tr);
5029 return single_release(inode, file);
5032 static int tracing_open(struct inode *inode, struct file *file)
5034 struct trace_array *tr = inode->i_private;
5035 struct trace_iterator *iter;
5038 ret = tracing_check_open_get_tr(tr);
5042 /* If this file was open for write, then erase contents */
5043 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
5044 int cpu = tracing_get_cpu(inode);
5045 struct array_buffer *trace_buf = &tr->array_buffer;
5047 #ifdef CONFIG_TRACER_MAX_TRACE
5048 if (tr->current_trace->print_max)
5049 trace_buf = &tr->max_buffer;
5052 if (cpu == RING_BUFFER_ALL_CPUS)
5053 tracing_reset_online_cpus(trace_buf);
5055 tracing_reset_cpu(trace_buf, cpu);
5058 if (file->f_mode & FMODE_READ) {
5059 iter = __tracing_open(inode, file, false);
5061 ret = PTR_ERR(iter);
5062 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5063 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5067 trace_array_put(tr);
5073 * Some tracers are not suitable for instance buffers.
5074 * A tracer is always available for the global array (toplevel)
5075 * or if it explicitly states that it is.
5078 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
5080 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
5083 /* Find the next tracer that this trace array may use */
5084 static struct tracer *
5085 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
5087 while (t && !trace_ok_for_array(t, tr))
5094 t_next(struct seq_file *m, void *v, loff_t *pos)
5096 struct trace_array *tr = m->private;
5097 struct tracer *t = v;
5102 t = get_tracer_for_array(tr, t->next);
5107 static void *t_start(struct seq_file *m, loff_t *pos)
5109 struct trace_array *tr = m->private;
5113 mutex_lock(&trace_types_lock);
5115 t = get_tracer_for_array(tr, trace_types);
5116 for (; t && l < *pos; t = t_next(m, t, &l))
5122 static void t_stop(struct seq_file *m, void *p)
5124 mutex_unlock(&trace_types_lock);
5127 static int t_show(struct seq_file *m, void *v)
5129 struct tracer *t = v;
5134 seq_puts(m, t->name);
5143 static const struct seq_operations show_traces_seq_ops = {
5150 static int show_traces_open(struct inode *inode, struct file *file)
5152 struct trace_array *tr = inode->i_private;
5156 ret = tracing_check_open_get_tr(tr);
5160 ret = seq_open(file, &show_traces_seq_ops);
5162 trace_array_put(tr);
5166 m = file->private_data;
5172 static int show_traces_release(struct inode *inode, struct file *file)
5174 struct trace_array *tr = inode->i_private;
5176 trace_array_put(tr);
5177 return seq_release(inode, file);
5181 tracing_write_stub(struct file *filp, const char __user *ubuf,
5182 size_t count, loff_t *ppos)
5187 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5191 if (file->f_mode & FMODE_READ)
5192 ret = seq_lseek(file, offset, whence);
5194 file->f_pos = ret = 0;
5199 static const struct file_operations tracing_fops = {
5200 .open = tracing_open,
5202 .read_iter = seq_read_iter,
5203 .splice_read = copy_splice_read,
5204 .write = tracing_write_stub,
5205 .llseek = tracing_lseek,
5206 .release = tracing_release,
5209 static const struct file_operations show_traces_fops = {
5210 .open = show_traces_open,
5212 .llseek = seq_lseek,
5213 .release = show_traces_release,
5217 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5218 size_t count, loff_t *ppos)
5220 struct trace_array *tr = file_inode(filp)->i_private;
5224 len = snprintf(NULL, 0, "%*pb\n",
5225 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5226 mask_str = kmalloc(len, GFP_KERNEL);
5230 len = snprintf(mask_str, len, "%*pb\n",
5231 cpumask_pr_args(tr->tracing_cpumask));
5236 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5244 int tracing_set_cpumask(struct trace_array *tr,
5245 cpumask_var_t tracing_cpumask_new)
5252 local_irq_disable();
5253 arch_spin_lock(&tr->max_lock);
5254 for_each_tracing_cpu(cpu) {
5256 * Increase/decrease the disabled counter if we are
5257 * about to flip a bit in the cpumask:
5259 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5260 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5261 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5262 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5263 #ifdef CONFIG_TRACER_MAX_TRACE
5264 ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
5267 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5268 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5269 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5270 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5271 #ifdef CONFIG_TRACER_MAX_TRACE
5272 ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
5276 arch_spin_unlock(&tr->max_lock);
5279 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5285 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5286 size_t count, loff_t *ppos)
5288 struct trace_array *tr = file_inode(filp)->i_private;
5289 cpumask_var_t tracing_cpumask_new;
5292 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5295 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5299 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5303 free_cpumask_var(tracing_cpumask_new);
5308 free_cpumask_var(tracing_cpumask_new);
5313 static const struct file_operations tracing_cpumask_fops = {
5314 .open = tracing_open_generic_tr,
5315 .read = tracing_cpumask_read,
5316 .write = tracing_cpumask_write,
5317 .release = tracing_release_generic_tr,
5318 .llseek = generic_file_llseek,
5321 static int tracing_trace_options_show(struct seq_file *m, void *v)
5323 struct tracer_opt *trace_opts;
5324 struct trace_array *tr = m->private;
5328 mutex_lock(&trace_types_lock);
5329 tracer_flags = tr->current_trace->flags->val;
5330 trace_opts = tr->current_trace->flags->opts;
5332 for (i = 0; trace_options[i]; i++) {
5333 if (tr->trace_flags & (1 << i))
5334 seq_printf(m, "%s\n", trace_options[i]);
5336 seq_printf(m, "no%s\n", trace_options[i]);
5339 for (i = 0; trace_opts[i].name; i++) {
5340 if (tracer_flags & trace_opts[i].bit)
5341 seq_printf(m, "%s\n", trace_opts[i].name);
5343 seq_printf(m, "no%s\n", trace_opts[i].name);
5345 mutex_unlock(&trace_types_lock);
5350 static int __set_tracer_option(struct trace_array *tr,
5351 struct tracer_flags *tracer_flags,
5352 struct tracer_opt *opts, int neg)
5354 struct tracer *trace = tracer_flags->trace;
5357 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5362 tracer_flags->val &= ~opts->bit;
5364 tracer_flags->val |= opts->bit;
5368 /* Try to assign a tracer specific option */
5369 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5371 struct tracer *trace = tr->current_trace;
5372 struct tracer_flags *tracer_flags = trace->flags;
5373 struct tracer_opt *opts = NULL;
5376 for (i = 0; tracer_flags->opts[i].name; i++) {
5377 opts = &tracer_flags->opts[i];
5379 if (strcmp(cmp, opts->name) == 0)
5380 return __set_tracer_option(tr, trace->flags, opts, neg);
5386 /* Some tracers require overwrite to stay enabled */
5387 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5389 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5395 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5399 if ((mask == TRACE_ITER_RECORD_TGID) ||
5400 (mask == TRACE_ITER_RECORD_CMD))
5401 lockdep_assert_held(&event_mutex);
5403 /* do nothing if flag is already set */
5404 if (!!(tr->trace_flags & mask) == !!enabled)
5407 /* Give the tracer a chance to approve the change */
5408 if (tr->current_trace->flag_changed)
5409 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5413 tr->trace_flags |= mask;
5415 tr->trace_flags &= ~mask;
5417 if (mask == TRACE_ITER_RECORD_CMD)
5418 trace_event_enable_cmd_record(enabled);
5420 if (mask == TRACE_ITER_RECORD_TGID) {
5422 tgid_map_max = pid_max;
5423 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5427 * Pairs with smp_load_acquire() in
5428 * trace_find_tgid_ptr() to ensure that if it observes
5429 * the tgid_map we just allocated then it also observes
5430 * the corresponding tgid_map_max value.
5432 smp_store_release(&tgid_map, map);
5435 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5439 trace_event_enable_tgid_record(enabled);
5442 if (mask == TRACE_ITER_EVENT_FORK)
5443 trace_event_follow_fork(tr, enabled);
5445 if (mask == TRACE_ITER_FUNC_FORK)
5446 ftrace_pid_follow_fork(tr, enabled);
5448 if (mask == TRACE_ITER_OVERWRITE) {
5449 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5450 #ifdef CONFIG_TRACER_MAX_TRACE
5451 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5455 if (mask == TRACE_ITER_PRINTK) {
5456 trace_printk_start_stop_comm(enabled);
5457 trace_printk_control(enabled);
5463 int trace_set_options(struct trace_array *tr, char *option)
5468 size_t orig_len = strlen(option);
5471 cmp = strstrip(option);
5473 len = str_has_prefix(cmp, "no");
5479 mutex_lock(&event_mutex);
5480 mutex_lock(&trace_types_lock);
5482 ret = match_string(trace_options, -1, cmp);
5483 /* If no option could be set, test the specific tracer options */
5485 ret = set_tracer_option(tr, cmp, neg);
5487 ret = set_tracer_flag(tr, 1 << ret, !neg);
5489 mutex_unlock(&trace_types_lock);
5490 mutex_unlock(&event_mutex);
5493 * If the first trailing whitespace is replaced with '\0' by strstrip,
5494 * turn it back into a space.
5496 if (orig_len > strlen(option))
5497 option[strlen(option)] = ' ';
5502 static void __init apply_trace_boot_options(void)
5504 char *buf = trace_boot_options_buf;
5508 option = strsep(&buf, ",");
5514 trace_set_options(&global_trace, option);
5516 /* Put back the comma to allow this to be called again */
5523 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5524 size_t cnt, loff_t *ppos)
5526 struct seq_file *m = filp->private_data;
5527 struct trace_array *tr = m->private;
5531 if (cnt >= sizeof(buf))
5534 if (copy_from_user(buf, ubuf, cnt))
5539 ret = trace_set_options(tr, buf);
5548 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5550 struct trace_array *tr = inode->i_private;
5553 ret = tracing_check_open_get_tr(tr);
5557 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5559 trace_array_put(tr);
5564 static const struct file_operations tracing_iter_fops = {
5565 .open = tracing_trace_options_open,
5567 .llseek = seq_lseek,
5568 .release = tracing_single_release_tr,
5569 .write = tracing_trace_options_write,
5572 static const char readme_msg[] =
5573 "tracing mini-HOWTO:\n\n"
5574 "# echo 0 > tracing_on : quick way to disable tracing\n"
5575 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5576 " Important files:\n"
5577 " trace\t\t\t- The static contents of the buffer\n"
5578 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5579 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5580 " current_tracer\t- function and latency tracers\n"
5581 " available_tracers\t- list of configured tracers for current_tracer\n"
5582 " error_log\t- error log for failed commands (that support it)\n"
5583 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5584 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5585 " trace_clock\t\t- change the clock used to order events\n"
5586 " local: Per cpu clock but may not be synced across CPUs\n"
5587 " global: Synced across CPUs but slows tracing down.\n"
5588 " counter: Not a clock, but just an increment\n"
5589 " uptime: Jiffy counter from time of boot\n"
5590 " perf: Same clock that perf events use\n"
5591 #ifdef CONFIG_X86_64
5592 " x86-tsc: TSC cycle counter\n"
5594 "\n timestamp_mode\t- view the mode used to timestamp events\n"
5595 " delta: Delta difference against a buffer-wide timestamp\n"
5596 " absolute: Absolute (standalone) timestamp\n"
5597 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5598 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5599 " tracing_cpumask\t- Limit which CPUs to trace\n"
5600 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5601 "\t\t\t Remove sub-buffer with rmdir\n"
5602 " trace_options\t\t- Set format or modify how tracing happens\n"
5603 "\t\t\t Disable an option by prefixing 'no' to the\n"
5604 "\t\t\t option name\n"
5605 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5606 #ifdef CONFIG_DYNAMIC_FTRACE
5607 "\n available_filter_functions - list of functions that can be filtered on\n"
5608 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5609 "\t\t\t functions\n"
5610 "\t accepts: func_full_name or glob-matching-pattern\n"
5611 "\t modules: Can select a group via module\n"
5612 "\t Format: :mod:<module-name>\n"
5613 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5614 "\t triggers: a command to perform when function is hit\n"
5615 "\t Format: <function>:<trigger>[:count]\n"
5616 "\t trigger: traceon, traceoff\n"
5617 "\t\t enable_event:<system>:<event>\n"
5618 "\t\t disable_event:<system>:<event>\n"
5619 #ifdef CONFIG_STACKTRACE
5622 #ifdef CONFIG_TRACER_SNAPSHOT
5627 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5628 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5629 "\t The first one will disable tracing every time do_fault is hit\n"
5630 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5631 "\t The first time do trap is hit and it disables tracing, the\n"
5632 "\t counter will decrement to 2. If tracing is already disabled,\n"
5633 "\t the counter will not decrement. It only decrements when the\n"
5634 "\t trigger did work\n"
5635 "\t To remove trigger without count:\n"
5636 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5637 "\t To remove trigger with a count:\n"
5638 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5639 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5640 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5641 "\t modules: Can select a group via module command :mod:\n"
5642 "\t Does not accept triggers\n"
5643 #endif /* CONFIG_DYNAMIC_FTRACE */
5644 #ifdef CONFIG_FUNCTION_TRACER
5645 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5647 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5650 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5651 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5652 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5653 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5655 #ifdef CONFIG_TRACER_SNAPSHOT
5656 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5657 "\t\t\t snapshot buffer. Read the contents for more\n"
5658 "\t\t\t information\n"
5660 #ifdef CONFIG_STACK_TRACER
5661 " stack_trace\t\t- Shows the max stack trace when active\n"
5662 " stack_max_size\t- Shows current max stack size that was traced\n"
5663 "\t\t\t Write into this file to reset the max size (trigger a\n"
5664 "\t\t\t new trace)\n"
5665 #ifdef CONFIG_DYNAMIC_FTRACE
5666 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5669 #endif /* CONFIG_STACK_TRACER */
5670 #ifdef CONFIG_DYNAMIC_EVENTS
5671 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5672 "\t\t\t Write into this file to define/undefine new trace events.\n"
5674 #ifdef CONFIG_KPROBE_EVENTS
5675 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5676 "\t\t\t Write into this file to define/undefine new trace events.\n"
5678 #ifdef CONFIG_UPROBE_EVENTS
5679 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5680 "\t\t\t Write into this file to define/undefine new trace events.\n"
5682 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \
5683 defined(CONFIG_FPROBE_EVENTS)
5684 "\t accepts: event-definitions (one definition per line)\n"
5685 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5686 "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n"
5687 "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
5689 #ifdef CONFIG_FPROBE_EVENTS
5690 "\t f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
5691 "\t t[:[<group>/][<event>]] <tracepoint> [<args>]\n"
5693 #ifdef CONFIG_HIST_TRIGGERS
5694 "\t s:[synthetic/]<event> <field> [<field>]\n"
5696 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
5697 "\t -:[<group>/][<event>]\n"
5698 #ifdef CONFIG_KPROBE_EVENTS
5699 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5700 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5702 #ifdef CONFIG_UPROBE_EVENTS
5703 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5705 "\t args: <name>=fetcharg[:type]\n"
5706 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5707 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5708 #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS
5709 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5710 "\t <argname>[->field[->field|.field...]],\n"
5712 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5715 "\t $stack<index>, $stack, $retval, $comm,\n"
5717 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5718 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n"
5719 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5720 "\t symstr, <type>\\[<array-size>\\]\n"
5721 #ifdef CONFIG_HIST_TRIGGERS
5722 "\t field: <stype> <name>;\n"
5723 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5724 "\t [unsigned] char/int/long\n"
5726 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5727 "\t of the <attached-group>/<attached-event>.\n"
5729 " events/\t\t- Directory containing all trace event subsystems:\n"
5730 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5731 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5732 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5734 " filter\t\t- If set, only events passing filter are traced\n"
5735 " events/<system>/<event>/\t- Directory containing control files for\n"
5737 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5738 " filter\t\t- If set, only events passing filter are traced\n"
5739 " trigger\t\t- If set, a command to perform when event is hit\n"
5740 "\t Format: <trigger>[:count][if <filter>]\n"
5741 "\t trigger: traceon, traceoff\n"
5742 "\t enable_event:<system>:<event>\n"
5743 "\t disable_event:<system>:<event>\n"
5744 #ifdef CONFIG_HIST_TRIGGERS
5745 "\t enable_hist:<system>:<event>\n"
5746 "\t disable_hist:<system>:<event>\n"
5748 #ifdef CONFIG_STACKTRACE
5751 #ifdef CONFIG_TRACER_SNAPSHOT
5754 #ifdef CONFIG_HIST_TRIGGERS
5755 "\t\t hist (see below)\n"
5757 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5758 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5759 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5760 "\t events/block/block_unplug/trigger\n"
5761 "\t The first disables tracing every time block_unplug is hit.\n"
5762 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5763 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5764 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5765 "\t Like function triggers, the counter is only decremented if it\n"
5766 "\t enabled or disabled tracing.\n"
5767 "\t To remove a trigger without a count:\n"
5768 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5769 "\t To remove a trigger with a count:\n"
5770 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5771 "\t Filters can be ignored when removing a trigger.\n"
5772 #ifdef CONFIG_HIST_TRIGGERS
5773 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5774 "\t Format: hist:keys=<field1[,field2,...]>\n"
5775 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5776 "\t [:values=<field1[,field2,...]>]\n"
5777 "\t [:sort=<field1[,field2,...]>]\n"
5778 "\t [:size=#entries]\n"
5779 "\t [:pause][:continue][:clear]\n"
5780 "\t [:name=histname1]\n"
5781 "\t [:nohitcount]\n"
5782 "\t [:<handler>.<action>]\n"
5783 "\t [if <filter>]\n\n"
5784 "\t Note, special fields can be used as well:\n"
5785 "\t common_timestamp - to record current timestamp\n"
5786 "\t common_cpu - to record the CPU the event happened on\n"
5788 "\t A hist trigger variable can be:\n"
5789 "\t - a reference to a field e.g. x=current_timestamp,\n"
5790 "\t - a reference to another variable e.g. y=$x,\n"
5791 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5792 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5794 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5795 "\t multiplication(*) and division(/) operators. An operand can be either a\n"
5796 "\t variable reference, field or numeric literal.\n"
5798 "\t When a matching event is hit, an entry is added to a hash\n"
5799 "\t table using the key(s) and value(s) named, and the value of a\n"
5800 "\t sum called 'hitcount' is incremented. Keys and values\n"
5801 "\t correspond to fields in the event's format description. Keys\n"
5802 "\t can be any field, or the special string 'common_stacktrace'.\n"
5803 "\t Compound keys consisting of up to two fields can be specified\n"
5804 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5805 "\t fields. Sort keys consisting of up to two fields can be\n"
5806 "\t specified using the 'sort' keyword. The sort direction can\n"
5807 "\t be modified by appending '.descending' or '.ascending' to a\n"
5808 "\t sort field. The 'size' parameter can be used to specify more\n"
5809 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5810 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5811 "\t its histogram data will be shared with other triggers of the\n"
5812 "\t same name, and trigger hits will update this common data.\n\n"
5813 "\t Reading the 'hist' file for the event will dump the hash\n"
5814 "\t table in its entirety to stdout. If there are multiple hist\n"
5815 "\t triggers attached to an event, there will be a table for each\n"
5816 "\t trigger in the output. The table displayed for a named\n"
5817 "\t trigger will be the same as any other instance having the\n"
5818 "\t same name. The default format used to display a given field\n"
5819 "\t can be modified by appending any of the following modifiers\n"
5820 "\t to the field name, as applicable:\n\n"
5821 "\t .hex display a number as a hex value\n"
5822 "\t .sym display an address as a symbol\n"
5823 "\t .sym-offset display an address as a symbol and offset\n"
5824 "\t .execname display a common_pid as a program name\n"
5825 "\t .syscall display a syscall id as a syscall name\n"
5826 "\t .log2 display log2 value rather than raw number\n"
5827 "\t .buckets=size display values in groups of size rather than raw number\n"
5828 "\t .usecs display a common_timestamp in microseconds\n"
5829 "\t .percent display a number of percentage value\n"
5830 "\t .graph display a bar-graph of a value\n\n"
5831 "\t The 'pause' parameter can be used to pause an existing hist\n"
5832 "\t trigger or to start a hist trigger but not log any events\n"
5833 "\t until told to do so. 'continue' can be used to start or\n"
5834 "\t restart a paused hist trigger.\n\n"
5835 "\t The 'clear' parameter will clear the contents of a running\n"
5836 "\t hist trigger and leave its current paused/active state\n"
5838 "\t The 'nohitcount' (or NOHC) parameter will suppress display of\n"
5839 "\t raw hitcount in the histogram.\n\n"
5840 "\t The enable_hist and disable_hist triggers can be used to\n"
5841 "\t have one event conditionally start and stop another event's\n"
5842 "\t already-attached hist trigger. The syntax is analogous to\n"
5843 "\t the enable_event and disable_event triggers.\n\n"
5844 "\t Hist trigger handlers and actions are executed whenever a\n"
5845 "\t a histogram entry is added or updated. They take the form:\n\n"
5846 "\t <handler>.<action>\n\n"
5847 "\t The available handlers are:\n\n"
5848 "\t onmatch(matching.event) - invoke on addition or update\n"
5849 "\t onmax(var) - invoke if var exceeds current max\n"
5850 "\t onchange(var) - invoke action if var changes\n\n"
5851 "\t The available actions are:\n\n"
5852 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5853 "\t save(field,...) - save current event fields\n"
5854 #ifdef CONFIG_TRACER_SNAPSHOT
5855 "\t snapshot() - snapshot the trace buffer\n\n"
5857 #ifdef CONFIG_SYNTH_EVENTS
5858 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5859 "\t Write into this file to define/undefine new synthetic events.\n"
5860 "\t example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n"
5866 tracing_readme_read(struct file *filp, char __user *ubuf,
5867 size_t cnt, loff_t *ppos)
5869 return simple_read_from_buffer(ubuf, cnt, ppos,
5870 readme_msg, strlen(readme_msg));
5873 static const struct file_operations tracing_readme_fops = {
5874 .open = tracing_open_generic,
5875 .read = tracing_readme_read,
5876 .llseek = generic_file_llseek,
5879 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5883 return trace_find_tgid_ptr(pid);
5886 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5890 return trace_find_tgid_ptr(pid);
5893 static void saved_tgids_stop(struct seq_file *m, void *v)
5897 static int saved_tgids_show(struct seq_file *m, void *v)
5899 int *entry = (int *)v;
5900 int pid = entry - tgid_map;
5906 seq_printf(m, "%d %d\n", pid, tgid);
5910 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5911 .start = saved_tgids_start,
5912 .stop = saved_tgids_stop,
5913 .next = saved_tgids_next,
5914 .show = saved_tgids_show,
5917 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5921 ret = tracing_check_open_get_tr(NULL);
5925 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5929 static const struct file_operations tracing_saved_tgids_fops = {
5930 .open = tracing_saved_tgids_open,
5932 .llseek = seq_lseek,
5933 .release = seq_release,
5936 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5938 unsigned int *ptr = v;
5940 if (*pos || m->count)
5945 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5947 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5956 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5962 arch_spin_lock(&trace_cmdline_lock);
5964 v = &savedcmd->map_cmdline_to_pid[0];
5966 v = saved_cmdlines_next(m, v, &l);
5974 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5976 arch_spin_unlock(&trace_cmdline_lock);
5980 static int saved_cmdlines_show(struct seq_file *m, void *v)
5982 char buf[TASK_COMM_LEN];
5983 unsigned int *pid = v;
5985 __trace_find_cmdline(*pid, buf);
5986 seq_printf(m, "%d %s\n", *pid, buf);
5990 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5991 .start = saved_cmdlines_start,
5992 .next = saved_cmdlines_next,
5993 .stop = saved_cmdlines_stop,
5994 .show = saved_cmdlines_show,
5997 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
6001 ret = tracing_check_open_get_tr(NULL);
6005 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
6008 static const struct file_operations tracing_saved_cmdlines_fops = {
6009 .open = tracing_saved_cmdlines_open,
6011 .llseek = seq_lseek,
6012 .release = seq_release,
6016 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
6017 size_t cnt, loff_t *ppos)
6023 arch_spin_lock(&trace_cmdline_lock);
6024 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
6025 arch_spin_unlock(&trace_cmdline_lock);
6028 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6031 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
6033 kfree(s->saved_cmdlines);
6034 kfree(s->map_cmdline_to_pid);
6038 static int tracing_resize_saved_cmdlines(unsigned int val)
6040 struct saved_cmdlines_buffer *s, *savedcmd_temp;
6042 s = kmalloc(sizeof(*s), GFP_KERNEL);
6046 if (allocate_cmdlines_buffer(val, s) < 0) {
6052 arch_spin_lock(&trace_cmdline_lock);
6053 savedcmd_temp = savedcmd;
6055 arch_spin_unlock(&trace_cmdline_lock);
6057 free_saved_cmdlines_buffer(savedcmd_temp);
6063 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
6064 size_t cnt, loff_t *ppos)
6069 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6073 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
6074 if (!val || val > PID_MAX_DEFAULT)
6077 ret = tracing_resize_saved_cmdlines((unsigned int)val);
6086 static const struct file_operations tracing_saved_cmdlines_size_fops = {
6087 .open = tracing_open_generic,
6088 .read = tracing_saved_cmdlines_size_read,
6089 .write = tracing_saved_cmdlines_size_write,
6092 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
6093 static union trace_eval_map_item *
6094 update_eval_map(union trace_eval_map_item *ptr)
6096 if (!ptr->map.eval_string) {
6097 if (ptr->tail.next) {
6098 ptr = ptr->tail.next;
6099 /* Set ptr to the next real item (skip head) */
6107 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
6109 union trace_eval_map_item *ptr = v;
6112 * Paranoid! If ptr points to end, we don't want to increment past it.
6113 * This really should never happen.
6116 ptr = update_eval_map(ptr);
6117 if (WARN_ON_ONCE(!ptr))
6121 ptr = update_eval_map(ptr);
6126 static void *eval_map_start(struct seq_file *m, loff_t *pos)
6128 union trace_eval_map_item *v;
6131 mutex_lock(&trace_eval_mutex);
6133 v = trace_eval_maps;
6137 while (v && l < *pos) {
6138 v = eval_map_next(m, v, &l);
6144 static void eval_map_stop(struct seq_file *m, void *v)
6146 mutex_unlock(&trace_eval_mutex);
6149 static int eval_map_show(struct seq_file *m, void *v)
6151 union trace_eval_map_item *ptr = v;
6153 seq_printf(m, "%s %ld (%s)\n",
6154 ptr->map.eval_string, ptr->map.eval_value,
6160 static const struct seq_operations tracing_eval_map_seq_ops = {
6161 .start = eval_map_start,
6162 .next = eval_map_next,
6163 .stop = eval_map_stop,
6164 .show = eval_map_show,
6167 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
6171 ret = tracing_check_open_get_tr(NULL);
6175 return seq_open(filp, &tracing_eval_map_seq_ops);
6178 static const struct file_operations tracing_eval_map_fops = {
6179 .open = tracing_eval_map_open,
6181 .llseek = seq_lseek,
6182 .release = seq_release,
6185 static inline union trace_eval_map_item *
6186 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
6188 /* Return tail of array given the head */
6189 return ptr + ptr->head.length + 1;
6193 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
6196 struct trace_eval_map **stop;
6197 struct trace_eval_map **map;
6198 union trace_eval_map_item *map_array;
6199 union trace_eval_map_item *ptr;
6204 * The trace_eval_maps contains the map plus a head and tail item,
6205 * where the head holds the module and length of array, and the
6206 * tail holds a pointer to the next list.
6208 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6210 pr_warn("Unable to allocate trace eval mapping\n");
6214 mutex_lock(&trace_eval_mutex);
6216 if (!trace_eval_maps)
6217 trace_eval_maps = map_array;
6219 ptr = trace_eval_maps;
6221 ptr = trace_eval_jmp_to_tail(ptr);
6222 if (!ptr->tail.next)
6224 ptr = ptr->tail.next;
6227 ptr->tail.next = map_array;
6229 map_array->head.mod = mod;
6230 map_array->head.length = len;
6233 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6234 map_array->map = **map;
6237 memset(map_array, 0, sizeof(*map_array));
6239 mutex_unlock(&trace_eval_mutex);
6242 static void trace_create_eval_file(struct dentry *d_tracer)
6244 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
6245 NULL, &tracing_eval_map_fops);
6248 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
6249 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6250 static inline void trace_insert_eval_map_file(struct module *mod,
6251 struct trace_eval_map **start, int len) { }
6252 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6254 static void trace_insert_eval_map(struct module *mod,
6255 struct trace_eval_map **start, int len)
6257 struct trace_eval_map **map;
6264 trace_event_eval_update(map, len);
6266 trace_insert_eval_map_file(mod, start, len);
6270 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6271 size_t cnt, loff_t *ppos)
6273 struct trace_array *tr = filp->private_data;
6274 char buf[MAX_TRACER_SIZE+2];
6277 mutex_lock(&trace_types_lock);
6278 r = sprintf(buf, "%s\n", tr->current_trace->name);
6279 mutex_unlock(&trace_types_lock);
6281 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6284 int tracer_init(struct tracer *t, struct trace_array *tr)
6286 tracing_reset_online_cpus(&tr->array_buffer);
6290 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6294 for_each_tracing_cpu(cpu)
6295 per_cpu_ptr(buf->data, cpu)->entries = val;
6298 static void update_buffer_entries(struct array_buffer *buf, int cpu)
6300 if (cpu == RING_BUFFER_ALL_CPUS) {
6301 set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
6303 per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
6307 #ifdef CONFIG_TRACER_MAX_TRACE
6308 /* resize @tr's buffer to the size of @size_tr's entries */
6309 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6310 struct array_buffer *size_buf, int cpu_id)
6314 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6315 for_each_tracing_cpu(cpu) {
6316 ret = ring_buffer_resize(trace_buf->buffer,
6317 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6320 per_cpu_ptr(trace_buf->data, cpu)->entries =
6321 per_cpu_ptr(size_buf->data, cpu)->entries;
6324 ret = ring_buffer_resize(trace_buf->buffer,
6325 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6327 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6328 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6333 #endif /* CONFIG_TRACER_MAX_TRACE */
6335 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6336 unsigned long size, int cpu)
6341 * If kernel or user changes the size of the ring buffer
6342 * we use the size that was given, and we can forget about
6343 * expanding it later.
6345 ring_buffer_expanded = true;
6347 /* May be called before buffers are initialized */
6348 if (!tr->array_buffer.buffer)
6351 /* Do not allow tracing while resizng ring buffer */
6352 tracing_stop_tr(tr);
6354 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6358 #ifdef CONFIG_TRACER_MAX_TRACE
6359 if (!tr->current_trace->use_max_tr)
6362 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6364 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6365 &tr->array_buffer, cpu);
6368 * AARGH! We are left with different
6369 * size max buffer!!!!
6370 * The max buffer is our "snapshot" buffer.
6371 * When a tracer needs a snapshot (one of the
6372 * latency tracers), it swaps the max buffer
6373 * with the saved snap shot. We succeeded to
6374 * update the size of the main buffer, but failed to
6375 * update the size of the max buffer. But when we tried
6376 * to reset the main buffer to the original size, we
6377 * failed there too. This is very unlikely to
6378 * happen, but if it does, warn and kill all
6382 tracing_disabled = 1;
6387 update_buffer_entries(&tr->max_buffer, cpu);
6390 #endif /* CONFIG_TRACER_MAX_TRACE */
6392 update_buffer_entries(&tr->array_buffer, cpu);
6394 tracing_start_tr(tr);
6398 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6399 unsigned long size, int cpu_id)
6403 mutex_lock(&trace_types_lock);
6405 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6406 /* make sure, this cpu is enabled in the mask */
6407 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6413 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6418 mutex_unlock(&trace_types_lock);
6425 * tracing_update_buffers - used by tracing facility to expand ring buffers
6427 * To save on memory when the tracing is never used on a system with it
6428 * configured in. The ring buffers are set to a minimum size. But once
6429 * a user starts to use the tracing facility, then they need to grow
6430 * to their default size.
6432 * This function is to be called when a tracer is about to be used.
6434 int tracing_update_buffers(void)
6438 mutex_lock(&trace_types_lock);
6439 if (!ring_buffer_expanded)
6440 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6441 RING_BUFFER_ALL_CPUS);
6442 mutex_unlock(&trace_types_lock);
6447 struct trace_option_dentry;
6450 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6453 * Used to clear out the tracer before deletion of an instance.
6454 * Must have trace_types_lock held.
6456 static void tracing_set_nop(struct trace_array *tr)
6458 if (tr->current_trace == &nop_trace)
6461 tr->current_trace->enabled--;
6463 if (tr->current_trace->reset)
6464 tr->current_trace->reset(tr);
6466 tr->current_trace = &nop_trace;
6469 static bool tracer_options_updated;
6471 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6473 /* Only enable if the directory has been created already. */
6477 /* Only create trace option files after update_tracer_options finish */
6478 if (!tracer_options_updated)
6481 create_trace_option_files(tr, t);
6484 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6487 #ifdef CONFIG_TRACER_MAX_TRACE
6492 mutex_lock(&trace_types_lock);
6494 if (!ring_buffer_expanded) {
6495 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6496 RING_BUFFER_ALL_CPUS);
6502 for (t = trace_types; t; t = t->next) {
6503 if (strcmp(t->name, buf) == 0)
6510 if (t == tr->current_trace)
6513 #ifdef CONFIG_TRACER_SNAPSHOT
6514 if (t->use_max_tr) {
6515 local_irq_disable();
6516 arch_spin_lock(&tr->max_lock);
6517 if (tr->cond_snapshot)
6519 arch_spin_unlock(&tr->max_lock);
6525 /* Some tracers won't work on kernel command line */
6526 if (system_state < SYSTEM_RUNNING && t->noboot) {
6527 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6532 /* Some tracers are only allowed for the top level buffer */
6533 if (!trace_ok_for_array(t, tr)) {
6538 /* If trace pipe files are being read, we can't change the tracer */
6539 if (tr->trace_ref) {
6544 trace_branch_disable();
6546 tr->current_trace->enabled--;
6548 if (tr->current_trace->reset)
6549 tr->current_trace->reset(tr);
6551 #ifdef CONFIG_TRACER_MAX_TRACE
6552 had_max_tr = tr->current_trace->use_max_tr;
6554 /* Current trace needs to be nop_trace before synchronize_rcu */
6555 tr->current_trace = &nop_trace;
6557 if (had_max_tr && !t->use_max_tr) {
6559 * We need to make sure that the update_max_tr sees that
6560 * current_trace changed to nop_trace to keep it from
6561 * swapping the buffers after we resize it.
6562 * The update_max_tr is called from interrupts disabled
6563 * so a synchronized_sched() is sufficient.
6569 if (t->use_max_tr && !tr->allocated_snapshot) {
6570 ret = tracing_alloc_snapshot_instance(tr);
6575 tr->current_trace = &nop_trace;
6579 ret = tracer_init(t, tr);
6584 tr->current_trace = t;
6585 tr->current_trace->enabled++;
6586 trace_branch_enable(tr);
6588 mutex_unlock(&trace_types_lock);
6594 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6595 size_t cnt, loff_t *ppos)
6597 struct trace_array *tr = filp->private_data;
6598 char buf[MAX_TRACER_SIZE+1];
6605 if (cnt > MAX_TRACER_SIZE)
6606 cnt = MAX_TRACER_SIZE;
6608 if (copy_from_user(buf, ubuf, cnt))
6615 err = tracing_set_tracer(tr, name);
6625 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6626 size_t cnt, loff_t *ppos)
6631 r = snprintf(buf, sizeof(buf), "%ld\n",
6632 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6633 if (r > sizeof(buf))
6635 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6639 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6640 size_t cnt, loff_t *ppos)
6645 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6655 tracing_thresh_read(struct file *filp, char __user *ubuf,
6656 size_t cnt, loff_t *ppos)
6658 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6662 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6663 size_t cnt, loff_t *ppos)
6665 struct trace_array *tr = filp->private_data;
6668 mutex_lock(&trace_types_lock);
6669 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6673 if (tr->current_trace->update_thresh) {
6674 ret = tr->current_trace->update_thresh(tr);
6681 mutex_unlock(&trace_types_lock);
6686 #ifdef CONFIG_TRACER_MAX_TRACE
6689 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6690 size_t cnt, loff_t *ppos)
6692 struct trace_array *tr = filp->private_data;
6694 return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
6698 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6699 size_t cnt, loff_t *ppos)
6701 struct trace_array *tr = filp->private_data;
6703 return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
6708 static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
6710 if (cpu == RING_BUFFER_ALL_CPUS) {
6711 if (cpumask_empty(tr->pipe_cpumask)) {
6712 cpumask_setall(tr->pipe_cpumask);
6715 } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
6716 cpumask_set_cpu(cpu, tr->pipe_cpumask);
6722 static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
6724 if (cpu == RING_BUFFER_ALL_CPUS) {
6725 WARN_ON(!cpumask_full(tr->pipe_cpumask));
6726 cpumask_clear(tr->pipe_cpumask);
6728 WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
6729 cpumask_clear_cpu(cpu, tr->pipe_cpumask);
6733 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6735 struct trace_array *tr = inode->i_private;
6736 struct trace_iterator *iter;
6740 ret = tracing_check_open_get_tr(tr);
6744 mutex_lock(&trace_types_lock);
6745 cpu = tracing_get_cpu(inode);
6746 ret = open_pipe_on_cpu(tr, cpu);
6748 goto fail_pipe_on_cpu;
6750 /* create a buffer to store the information to pass to userspace */
6751 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6754 goto fail_alloc_iter;
6757 trace_seq_init(&iter->seq);
6758 iter->trace = tr->current_trace;
6760 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6765 /* trace pipe does not show start of buffer */
6766 cpumask_setall(iter->started);
6768 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6769 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6771 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6772 if (trace_clocks[tr->clock_id].in_ns)
6773 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6776 iter->array_buffer = &tr->array_buffer;
6777 iter->cpu_file = cpu;
6778 mutex_init(&iter->mutex);
6779 filp->private_data = iter;
6781 if (iter->trace->pipe_open)
6782 iter->trace->pipe_open(iter);
6784 nonseekable_open(inode, filp);
6788 mutex_unlock(&trace_types_lock);
6794 close_pipe_on_cpu(tr, cpu);
6796 __trace_array_put(tr);
6797 mutex_unlock(&trace_types_lock);
6801 static int tracing_release_pipe(struct inode *inode, struct file *file)
6803 struct trace_iterator *iter = file->private_data;
6804 struct trace_array *tr = inode->i_private;
6806 mutex_lock(&trace_types_lock);
6810 if (iter->trace->pipe_close)
6811 iter->trace->pipe_close(iter);
6812 close_pipe_on_cpu(tr, iter->cpu_file);
6813 mutex_unlock(&trace_types_lock);
6815 free_trace_iter_content(iter);
6818 trace_array_put(tr);
6824 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6826 struct trace_array *tr = iter->tr;
6828 /* Iterators are static, they should be filled or empty */
6829 if (trace_buffer_iter(iter, iter->cpu_file))
6830 return EPOLLIN | EPOLLRDNORM;
6832 if (tr->trace_flags & TRACE_ITER_BLOCK)
6834 * Always select as readable when in blocking mode
6836 return EPOLLIN | EPOLLRDNORM;
6838 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6839 filp, poll_table, iter->tr->buffer_percent);
6843 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6845 struct trace_iterator *iter = filp->private_data;
6847 return trace_poll(iter, filp, poll_table);
6850 /* Must be called with iter->mutex held. */
6851 static int tracing_wait_pipe(struct file *filp)
6853 struct trace_iterator *iter = filp->private_data;
6856 while (trace_empty(iter)) {
6858 if ((filp->f_flags & O_NONBLOCK)) {
6863 * We block until we read something and tracing is disabled.
6864 * We still block if tracing is disabled, but we have never
6865 * read anything. This allows a user to cat this file, and
6866 * then enable tracing. But after we have read something,
6867 * we give an EOF when tracing is again disabled.
6869 * iter->pos will be 0 if we haven't read anything.
6871 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6874 mutex_unlock(&iter->mutex);
6876 ret = wait_on_pipe(iter, 0);
6878 mutex_lock(&iter->mutex);
6891 tracing_read_pipe(struct file *filp, char __user *ubuf,
6892 size_t cnt, loff_t *ppos)
6894 struct trace_iterator *iter = filp->private_data;
6898 * Avoid more than one consumer on a single file descriptor
6899 * This is just a matter of traces coherency, the ring buffer itself
6902 mutex_lock(&iter->mutex);
6904 /* return any leftover data */
6905 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6909 trace_seq_init(&iter->seq);
6911 if (iter->trace->read) {
6912 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6918 sret = tracing_wait_pipe(filp);
6922 /* stop when tracing is finished */
6923 if (trace_empty(iter)) {
6928 if (cnt >= PAGE_SIZE)
6929 cnt = PAGE_SIZE - 1;
6931 /* reset all but tr, trace, and overruns */
6932 trace_iterator_reset(iter);
6933 cpumask_clear(iter->started);
6934 trace_seq_init(&iter->seq);
6936 trace_event_read_lock();
6937 trace_access_lock(iter->cpu_file);
6938 while (trace_find_next_entry_inc(iter) != NULL) {
6939 enum print_line_t ret;
6940 int save_len = iter->seq.seq.len;
6942 ret = print_trace_line(iter);
6943 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6945 * If one print_trace_line() fills entire trace_seq in one shot,
6946 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6947 * In this case, we need to consume it, otherwise, loop will peek
6948 * this event next time, resulting in an infinite loop.
6950 if (save_len == 0) {
6952 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6953 trace_consume(iter);
6957 /* In other cases, don't print partial lines */
6958 iter->seq.seq.len = save_len;
6961 if (ret != TRACE_TYPE_NO_CONSUME)
6962 trace_consume(iter);
6964 if (trace_seq_used(&iter->seq) >= cnt)
6968 * Setting the full flag means we reached the trace_seq buffer
6969 * size and we should leave by partial output condition above.
6970 * One of the trace_seq_* functions is not used properly.
6972 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6975 trace_access_unlock(iter->cpu_file);
6976 trace_event_read_unlock();
6978 /* Now copy what we have to the user */
6979 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6980 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6981 trace_seq_init(&iter->seq);
6984 * If there was nothing to send to user, in spite of consuming trace
6985 * entries, go back to wait for more entries.
6991 mutex_unlock(&iter->mutex);
6996 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6999 __free_page(spd->pages[idx]);
7003 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
7009 /* Seq buffer is page-sized, exactly what we need. */
7011 save_len = iter->seq.seq.len;
7012 ret = print_trace_line(iter);
7014 if (trace_seq_has_overflowed(&iter->seq)) {
7015 iter->seq.seq.len = save_len;
7020 * This should not be hit, because it should only
7021 * be set if the iter->seq overflowed. But check it
7022 * anyway to be safe.
7024 if (ret == TRACE_TYPE_PARTIAL_LINE) {
7025 iter->seq.seq.len = save_len;
7029 count = trace_seq_used(&iter->seq) - save_len;
7032 iter->seq.seq.len = save_len;
7036 if (ret != TRACE_TYPE_NO_CONSUME)
7037 trace_consume(iter);
7039 if (!trace_find_next_entry_inc(iter)) {
7049 static ssize_t tracing_splice_read_pipe(struct file *filp,
7051 struct pipe_inode_info *pipe,
7055 struct page *pages_def[PIPE_DEF_BUFFERS];
7056 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7057 struct trace_iterator *iter = filp->private_data;
7058 struct splice_pipe_desc spd = {
7060 .partial = partial_def,
7061 .nr_pages = 0, /* This gets updated below. */
7062 .nr_pages_max = PIPE_DEF_BUFFERS,
7063 .ops = &default_pipe_buf_ops,
7064 .spd_release = tracing_spd_release_pipe,
7070 if (splice_grow_spd(pipe, &spd))
7073 mutex_lock(&iter->mutex);
7075 if (iter->trace->splice_read) {
7076 ret = iter->trace->splice_read(iter, filp,
7077 ppos, pipe, len, flags);
7082 ret = tracing_wait_pipe(filp);
7086 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
7091 trace_event_read_lock();
7092 trace_access_lock(iter->cpu_file);
7094 /* Fill as many pages as possible. */
7095 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
7096 spd.pages[i] = alloc_page(GFP_KERNEL);
7100 rem = tracing_fill_pipe_page(rem, iter);
7102 /* Copy the data into the page, so we can start over. */
7103 ret = trace_seq_to_buffer(&iter->seq,
7104 page_address(spd.pages[i]),
7105 trace_seq_used(&iter->seq));
7107 __free_page(spd.pages[i]);
7110 spd.partial[i].offset = 0;
7111 spd.partial[i].len = trace_seq_used(&iter->seq);
7113 trace_seq_init(&iter->seq);
7116 trace_access_unlock(iter->cpu_file);
7117 trace_event_read_unlock();
7118 mutex_unlock(&iter->mutex);
7123 ret = splice_to_pipe(pipe, &spd);
7127 splice_shrink_spd(&spd);
7131 mutex_unlock(&iter->mutex);
7136 tracing_entries_read(struct file *filp, char __user *ubuf,
7137 size_t cnt, loff_t *ppos)
7139 struct inode *inode = file_inode(filp);
7140 struct trace_array *tr = inode->i_private;
7141 int cpu = tracing_get_cpu(inode);
7146 mutex_lock(&trace_types_lock);
7148 if (cpu == RING_BUFFER_ALL_CPUS) {
7149 int cpu, buf_size_same;
7154 /* check if all cpu sizes are same */
7155 for_each_tracing_cpu(cpu) {
7156 /* fill in the size from first enabled cpu */
7158 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
7159 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
7165 if (buf_size_same) {
7166 if (!ring_buffer_expanded)
7167 r = sprintf(buf, "%lu (expanded: %lu)\n",
7169 trace_buf_size >> 10);
7171 r = sprintf(buf, "%lu\n", size >> 10);
7173 r = sprintf(buf, "X\n");
7175 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
7177 mutex_unlock(&trace_types_lock);
7179 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7184 tracing_entries_write(struct file *filp, const char __user *ubuf,
7185 size_t cnt, loff_t *ppos)
7187 struct inode *inode = file_inode(filp);
7188 struct trace_array *tr = inode->i_private;
7192 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7196 /* must have at least 1 entry */
7200 /* value is in KB */
7202 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
7212 tracing_total_entries_read(struct file *filp, char __user *ubuf,
7213 size_t cnt, loff_t *ppos)
7215 struct trace_array *tr = filp->private_data;
7218 unsigned long size = 0, expanded_size = 0;
7220 mutex_lock(&trace_types_lock);
7221 for_each_tracing_cpu(cpu) {
7222 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7223 if (!ring_buffer_expanded)
7224 expanded_size += trace_buf_size >> 10;
7226 if (ring_buffer_expanded)
7227 r = sprintf(buf, "%lu\n", size);
7229 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
7230 mutex_unlock(&trace_types_lock);
7232 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7236 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7237 size_t cnt, loff_t *ppos)
7240 * There is no need to read what the user has written, this function
7241 * is just to make sure that there is no error when "echo" is used
7250 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7252 struct trace_array *tr = inode->i_private;
7254 /* disable tracing ? */
7255 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7256 tracer_tracing_off(tr);
7257 /* resize the ring buffer to 0 */
7258 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7260 trace_array_put(tr);
7266 tracing_mark_write(struct file *filp, const char __user *ubuf,
7267 size_t cnt, loff_t *fpos)
7269 struct trace_array *tr = filp->private_data;
7270 struct ring_buffer_event *event;
7271 enum event_trigger_type tt = ETT_NONE;
7272 struct trace_buffer *buffer;
7273 struct print_entry *entry;
7278 /* Used in tracing_mark_raw_write() as well */
7279 #define FAULTED_STR "<faulted>"
7280 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7282 if (tracing_disabled)
7285 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7288 if (cnt > TRACE_BUF_SIZE)
7289 cnt = TRACE_BUF_SIZE;
7291 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7293 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7295 /* If less than "<faulted>", then make sure we can still add that */
7296 if (cnt < FAULTED_SIZE)
7297 size += FAULTED_SIZE - cnt;
7299 buffer = tr->array_buffer.buffer;
7300 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7302 if (unlikely(!event))
7303 /* Ring buffer disabled, return as if not open for write */
7306 entry = ring_buffer_event_data(event);
7307 entry->ip = _THIS_IP_;
7309 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7311 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7317 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7318 /* do not add \n before testing triggers, but add \0 */
7319 entry->buf[cnt] = '\0';
7320 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7323 if (entry->buf[cnt - 1] != '\n') {
7324 entry->buf[cnt] = '\n';
7325 entry->buf[cnt + 1] = '\0';
7327 entry->buf[cnt] = '\0';
7329 if (static_branch_unlikely(&trace_marker_exports_enabled))
7330 ftrace_exports(event, TRACE_EXPORT_MARKER);
7331 __buffer_unlock_commit(buffer, event);
7334 event_triggers_post_call(tr->trace_marker_file, tt);
7339 /* Limit it for now to 3K (including tag) */
7340 #define RAW_DATA_MAX_SIZE (1024*3)
7343 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7344 size_t cnt, loff_t *fpos)
7346 struct trace_array *tr = filp->private_data;
7347 struct ring_buffer_event *event;
7348 struct trace_buffer *buffer;
7349 struct raw_data_entry *entry;
7354 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7356 if (tracing_disabled)
7359 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7362 /* The marker must at least have a tag id */
7363 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7366 if (cnt > TRACE_BUF_SIZE)
7367 cnt = TRACE_BUF_SIZE;
7369 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7371 size = sizeof(*entry) + cnt;
7372 if (cnt < FAULT_SIZE_ID)
7373 size += FAULT_SIZE_ID - cnt;
7375 buffer = tr->array_buffer.buffer;
7376 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7379 /* Ring buffer disabled, return as if not open for write */
7382 entry = ring_buffer_event_data(event);
7384 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7387 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7392 __buffer_unlock_commit(buffer, event);
7397 static int tracing_clock_show(struct seq_file *m, void *v)
7399 struct trace_array *tr = m->private;
7402 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7404 "%s%s%s%s", i ? " " : "",
7405 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7406 i == tr->clock_id ? "]" : "");
7412 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7416 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7417 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7420 if (i == ARRAY_SIZE(trace_clocks))
7423 mutex_lock(&trace_types_lock);
7427 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7430 * New clock may not be consistent with the previous clock.
7431 * Reset the buffer so that it doesn't have incomparable timestamps.
7433 tracing_reset_online_cpus(&tr->array_buffer);
7435 #ifdef CONFIG_TRACER_MAX_TRACE
7436 if (tr->max_buffer.buffer)
7437 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7438 tracing_reset_online_cpus(&tr->max_buffer);
7441 mutex_unlock(&trace_types_lock);
7446 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7447 size_t cnt, loff_t *fpos)
7449 struct seq_file *m = filp->private_data;
7450 struct trace_array *tr = m->private;
7452 const char *clockstr;
7455 if (cnt >= sizeof(buf))
7458 if (copy_from_user(buf, ubuf, cnt))
7463 clockstr = strstrip(buf);
7465 ret = tracing_set_clock(tr, clockstr);
7474 static int tracing_clock_open(struct inode *inode, struct file *file)
7476 struct trace_array *tr = inode->i_private;
7479 ret = tracing_check_open_get_tr(tr);
7483 ret = single_open(file, tracing_clock_show, inode->i_private);
7485 trace_array_put(tr);
7490 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7492 struct trace_array *tr = m->private;
7494 mutex_lock(&trace_types_lock);
7496 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7497 seq_puts(m, "delta [absolute]\n");
7499 seq_puts(m, "[delta] absolute\n");
7501 mutex_unlock(&trace_types_lock);
7506 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7508 struct trace_array *tr = inode->i_private;
7511 ret = tracing_check_open_get_tr(tr);
7515 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7517 trace_array_put(tr);
7522 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7524 if (rbe == this_cpu_read(trace_buffered_event))
7525 return ring_buffer_time_stamp(buffer);
7527 return ring_buffer_event_time_stamp(buffer, rbe);
7531 * Set or disable using the per CPU trace_buffer_event when possible.
7533 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7537 mutex_lock(&trace_types_lock);
7539 if (set && tr->no_filter_buffering_ref++)
7543 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7548 --tr->no_filter_buffering_ref;
7551 mutex_unlock(&trace_types_lock);
7556 struct ftrace_buffer_info {
7557 struct trace_iterator iter;
7559 unsigned int spare_cpu;
7563 #ifdef CONFIG_TRACER_SNAPSHOT
7564 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7566 struct trace_array *tr = inode->i_private;
7567 struct trace_iterator *iter;
7571 ret = tracing_check_open_get_tr(tr);
7575 if (file->f_mode & FMODE_READ) {
7576 iter = __tracing_open(inode, file, true);
7578 ret = PTR_ERR(iter);
7580 /* Writes still need the seq_file to hold the private data */
7582 m = kzalloc(sizeof(*m), GFP_KERNEL);
7585 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7593 iter->array_buffer = &tr->max_buffer;
7594 iter->cpu_file = tracing_get_cpu(inode);
7596 file->private_data = m;
7600 trace_array_put(tr);
7605 static void tracing_swap_cpu_buffer(void *tr)
7607 update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
7611 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7614 struct seq_file *m = filp->private_data;
7615 struct trace_iterator *iter = m->private;
7616 struct trace_array *tr = iter->tr;
7620 ret = tracing_update_buffers();
7624 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7628 mutex_lock(&trace_types_lock);
7630 if (tr->current_trace->use_max_tr) {
7635 local_irq_disable();
7636 arch_spin_lock(&tr->max_lock);
7637 if (tr->cond_snapshot)
7639 arch_spin_unlock(&tr->max_lock);
7646 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7650 if (tr->allocated_snapshot)
7654 /* Only allow per-cpu swap if the ring buffer supports it */
7655 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7656 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7661 if (tr->allocated_snapshot)
7662 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7663 &tr->array_buffer, iter->cpu_file);
7665 ret = tracing_alloc_snapshot_instance(tr);
7668 /* Now, we're going to swap */
7669 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
7670 local_irq_disable();
7671 update_max_tr(tr, current, smp_processor_id(), NULL);
7674 smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
7679 if (tr->allocated_snapshot) {
7680 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7681 tracing_reset_online_cpus(&tr->max_buffer);
7683 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7693 mutex_unlock(&trace_types_lock);
7697 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7699 struct seq_file *m = file->private_data;
7702 ret = tracing_release(inode, file);
7704 if (file->f_mode & FMODE_READ)
7707 /* If write only, the seq_file is just a stub */
7715 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7716 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7717 size_t count, loff_t *ppos);
7718 static int tracing_buffers_release(struct inode *inode, struct file *file);
7719 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7720 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7722 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7724 struct ftrace_buffer_info *info;
7727 /* The following checks for tracefs lockdown */
7728 ret = tracing_buffers_open(inode, filp);
7732 info = filp->private_data;
7734 if (info->iter.trace->use_max_tr) {
7735 tracing_buffers_release(inode, filp);
7739 info->iter.snapshot = true;
7740 info->iter.array_buffer = &info->iter.tr->max_buffer;
7745 #endif /* CONFIG_TRACER_SNAPSHOT */
7748 static const struct file_operations tracing_thresh_fops = {
7749 .open = tracing_open_generic,
7750 .read = tracing_thresh_read,
7751 .write = tracing_thresh_write,
7752 .llseek = generic_file_llseek,
7755 #ifdef CONFIG_TRACER_MAX_TRACE
7756 static const struct file_operations tracing_max_lat_fops = {
7757 .open = tracing_open_generic_tr,
7758 .read = tracing_max_lat_read,
7759 .write = tracing_max_lat_write,
7760 .llseek = generic_file_llseek,
7761 .release = tracing_release_generic_tr,
7765 static const struct file_operations set_tracer_fops = {
7766 .open = tracing_open_generic_tr,
7767 .read = tracing_set_trace_read,
7768 .write = tracing_set_trace_write,
7769 .llseek = generic_file_llseek,
7770 .release = tracing_release_generic_tr,
7773 static const struct file_operations tracing_pipe_fops = {
7774 .open = tracing_open_pipe,
7775 .poll = tracing_poll_pipe,
7776 .read = tracing_read_pipe,
7777 .splice_read = tracing_splice_read_pipe,
7778 .release = tracing_release_pipe,
7779 .llseek = no_llseek,
7782 static const struct file_operations tracing_entries_fops = {
7783 .open = tracing_open_generic_tr,
7784 .read = tracing_entries_read,
7785 .write = tracing_entries_write,
7786 .llseek = generic_file_llseek,
7787 .release = tracing_release_generic_tr,
7790 static const struct file_operations tracing_total_entries_fops = {
7791 .open = tracing_open_generic_tr,
7792 .read = tracing_total_entries_read,
7793 .llseek = generic_file_llseek,
7794 .release = tracing_release_generic_tr,
7797 static const struct file_operations tracing_free_buffer_fops = {
7798 .open = tracing_open_generic_tr,
7799 .write = tracing_free_buffer_write,
7800 .release = tracing_free_buffer_release,
7803 static const struct file_operations tracing_mark_fops = {
7804 .open = tracing_mark_open,
7805 .write = tracing_mark_write,
7806 .release = tracing_release_generic_tr,
7809 static const struct file_operations tracing_mark_raw_fops = {
7810 .open = tracing_mark_open,
7811 .write = tracing_mark_raw_write,
7812 .release = tracing_release_generic_tr,
7815 static const struct file_operations trace_clock_fops = {
7816 .open = tracing_clock_open,
7818 .llseek = seq_lseek,
7819 .release = tracing_single_release_tr,
7820 .write = tracing_clock_write,
7823 static const struct file_operations trace_time_stamp_mode_fops = {
7824 .open = tracing_time_stamp_mode_open,
7826 .llseek = seq_lseek,
7827 .release = tracing_single_release_tr,
7830 #ifdef CONFIG_TRACER_SNAPSHOT
7831 static const struct file_operations snapshot_fops = {
7832 .open = tracing_snapshot_open,
7834 .write = tracing_snapshot_write,
7835 .llseek = tracing_lseek,
7836 .release = tracing_snapshot_release,
7839 static const struct file_operations snapshot_raw_fops = {
7840 .open = snapshot_raw_open,
7841 .read = tracing_buffers_read,
7842 .release = tracing_buffers_release,
7843 .splice_read = tracing_buffers_splice_read,
7844 .llseek = no_llseek,
7847 #endif /* CONFIG_TRACER_SNAPSHOT */
7850 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7851 * @filp: The active open file structure
7852 * @ubuf: The userspace provided buffer to read value into
7853 * @cnt: The maximum number of bytes to read
7854 * @ppos: The current "file" position
7856 * This function implements the write interface for a struct trace_min_max_param.
7857 * The filp->private_data must point to a trace_min_max_param structure that
7858 * defines where to write the value, the min and the max acceptable values,
7859 * and a lock to protect the write.
7862 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7864 struct trace_min_max_param *param = filp->private_data;
7871 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7876 mutex_lock(param->lock);
7878 if (param->min && val < *param->min)
7881 if (param->max && val > *param->max)
7888 mutex_unlock(param->lock);
7897 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7898 * @filp: The active open file structure
7899 * @ubuf: The userspace provided buffer to read value into
7900 * @cnt: The maximum number of bytes to read
7901 * @ppos: The current "file" position
7903 * This function implements the read interface for a struct trace_min_max_param.
7904 * The filp->private_data must point to a trace_min_max_param struct with valid
7908 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7910 struct trace_min_max_param *param = filp->private_data;
7911 char buf[U64_STR_SIZE];
7920 if (cnt > sizeof(buf))
7923 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7925 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7928 const struct file_operations trace_min_max_fops = {
7929 .open = tracing_open_generic,
7930 .read = trace_min_max_read,
7931 .write = trace_min_max_write,
7934 #define TRACING_LOG_ERRS_MAX 8
7935 #define TRACING_LOG_LOC_MAX 128
7937 #define CMD_PREFIX " Command: "
7940 const char **errs; /* ptr to loc-specific array of err strings */
7941 u8 type; /* index into errs -> specific err string */
7942 u16 pos; /* caret position */
7946 struct tracing_log_err {
7947 struct list_head list;
7948 struct err_info info;
7949 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7950 char *cmd; /* what caused err */
7953 static DEFINE_MUTEX(tracing_err_log_lock);
7955 static struct tracing_log_err *alloc_tracing_log_err(int len)
7957 struct tracing_log_err *err;
7959 err = kzalloc(sizeof(*err), GFP_KERNEL);
7961 return ERR_PTR(-ENOMEM);
7963 err->cmd = kzalloc(len, GFP_KERNEL);
7966 return ERR_PTR(-ENOMEM);
7972 static void free_tracing_log_err(struct tracing_log_err *err)
7978 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
7981 struct tracing_log_err *err;
7984 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7985 err = alloc_tracing_log_err(len);
7986 if (PTR_ERR(err) != -ENOMEM)
7987 tr->n_err_log_entries++;
7991 cmd = kzalloc(len, GFP_KERNEL);
7993 return ERR_PTR(-ENOMEM);
7994 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7997 list_del(&err->list);
8003 * err_pos - find the position of a string within a command for error careting
8004 * @cmd: The tracing command that caused the error
8005 * @str: The string to position the caret at within @cmd
8007 * Finds the position of the first occurrence of @str within @cmd. The
8008 * return value can be passed to tracing_log_err() for caret placement
8011 * Returns the index within @cmd of the first occurrence of @str or 0
8012 * if @str was not found.
8014 unsigned int err_pos(char *cmd, const char *str)
8018 if (WARN_ON(!strlen(cmd)))
8021 found = strstr(cmd, str);
8029 * tracing_log_err - write an error to the tracing error log
8030 * @tr: The associated trace array for the error (NULL for top level array)
8031 * @loc: A string describing where the error occurred
8032 * @cmd: The tracing command that caused the error
8033 * @errs: The array of loc-specific static error strings
8034 * @type: The index into errs[], which produces the specific static err string
8035 * @pos: The position the caret should be placed in the cmd
8037 * Writes an error into tracing/error_log of the form:
8039 * <loc>: error: <text>
8043 * tracing/error_log is a small log file containing the last
8044 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
8045 * unless there has been a tracing error, and the error log can be
8046 * cleared and have its memory freed by writing the empty string in
8047 * truncation mode to it i.e. echo > tracing/error_log.
8049 * NOTE: the @errs array along with the @type param are used to
8050 * produce a static error string - this string is not copied and saved
8051 * when the error is logged - only a pointer to it is saved. See
8052 * existing callers for examples of how static strings are typically
8053 * defined for use with tracing_log_err().
8055 void tracing_log_err(struct trace_array *tr,
8056 const char *loc, const char *cmd,
8057 const char **errs, u8 type, u16 pos)
8059 struct tracing_log_err *err;
8065 len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
8067 mutex_lock(&tracing_err_log_lock);
8068 err = get_tracing_log_err(tr, len);
8069 if (PTR_ERR(err) == -ENOMEM) {
8070 mutex_unlock(&tracing_err_log_lock);
8074 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
8075 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
8077 err->info.errs = errs;
8078 err->info.type = type;
8079 err->info.pos = pos;
8080 err->info.ts = local_clock();
8082 list_add_tail(&err->list, &tr->err_log);
8083 mutex_unlock(&tracing_err_log_lock);
8086 static void clear_tracing_err_log(struct trace_array *tr)
8088 struct tracing_log_err *err, *next;
8090 mutex_lock(&tracing_err_log_lock);
8091 list_for_each_entry_safe(err, next, &tr->err_log, list) {
8092 list_del(&err->list);
8093 free_tracing_log_err(err);
8096 tr->n_err_log_entries = 0;
8097 mutex_unlock(&tracing_err_log_lock);
8100 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
8102 struct trace_array *tr = m->private;
8104 mutex_lock(&tracing_err_log_lock);
8106 return seq_list_start(&tr->err_log, *pos);
8109 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
8111 struct trace_array *tr = m->private;
8113 return seq_list_next(v, &tr->err_log, pos);
8116 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
8118 mutex_unlock(&tracing_err_log_lock);
8121 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
8125 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
8127 for (i = 0; i < pos; i++)
8132 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
8134 struct tracing_log_err *err = v;
8137 const char *err_text = err->info.errs[err->info.type];
8138 u64 sec = err->info.ts;
8141 nsec = do_div(sec, NSEC_PER_SEC);
8142 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
8143 err->loc, err_text);
8144 seq_printf(m, "%s", err->cmd);
8145 tracing_err_log_show_pos(m, err->info.pos);
8151 static const struct seq_operations tracing_err_log_seq_ops = {
8152 .start = tracing_err_log_seq_start,
8153 .next = tracing_err_log_seq_next,
8154 .stop = tracing_err_log_seq_stop,
8155 .show = tracing_err_log_seq_show
8158 static int tracing_err_log_open(struct inode *inode, struct file *file)
8160 struct trace_array *tr = inode->i_private;
8163 ret = tracing_check_open_get_tr(tr);
8167 /* If this file was opened for write, then erase contents */
8168 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
8169 clear_tracing_err_log(tr);
8171 if (file->f_mode & FMODE_READ) {
8172 ret = seq_open(file, &tracing_err_log_seq_ops);
8174 struct seq_file *m = file->private_data;
8177 trace_array_put(tr);
8183 static ssize_t tracing_err_log_write(struct file *file,
8184 const char __user *buffer,
8185 size_t count, loff_t *ppos)
8190 static int tracing_err_log_release(struct inode *inode, struct file *file)
8192 struct trace_array *tr = inode->i_private;
8194 trace_array_put(tr);
8196 if (file->f_mode & FMODE_READ)
8197 seq_release(inode, file);
8202 static const struct file_operations tracing_err_log_fops = {
8203 .open = tracing_err_log_open,
8204 .write = tracing_err_log_write,
8206 .llseek = tracing_lseek,
8207 .release = tracing_err_log_release,
8210 static int tracing_buffers_open(struct inode *inode, struct file *filp)
8212 struct trace_array *tr = inode->i_private;
8213 struct ftrace_buffer_info *info;
8216 ret = tracing_check_open_get_tr(tr);
8220 info = kvzalloc(sizeof(*info), GFP_KERNEL);
8222 trace_array_put(tr);
8226 mutex_lock(&trace_types_lock);
8229 info->iter.cpu_file = tracing_get_cpu(inode);
8230 info->iter.trace = tr->current_trace;
8231 info->iter.array_buffer = &tr->array_buffer;
8233 /* Force reading ring buffer for first read */
8234 info->read = (unsigned int)-1;
8236 filp->private_data = info;
8240 mutex_unlock(&trace_types_lock);
8242 ret = nonseekable_open(inode, filp);
8244 trace_array_put(tr);
8250 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8252 struct ftrace_buffer_info *info = filp->private_data;
8253 struct trace_iterator *iter = &info->iter;
8255 return trace_poll(iter, filp, poll_table);
8259 tracing_buffers_read(struct file *filp, char __user *ubuf,
8260 size_t count, loff_t *ppos)
8262 struct ftrace_buffer_info *info = filp->private_data;
8263 struct trace_iterator *iter = &info->iter;
8270 #ifdef CONFIG_TRACER_MAX_TRACE
8271 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8276 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8278 if (IS_ERR(info->spare)) {
8279 ret = PTR_ERR(info->spare);
8282 info->spare_cpu = iter->cpu_file;
8288 /* Do we have previous read data to read? */
8289 if (info->read < PAGE_SIZE)
8293 trace_access_lock(iter->cpu_file);
8294 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8298 trace_access_unlock(iter->cpu_file);
8301 if (trace_empty(iter)) {
8302 if ((filp->f_flags & O_NONBLOCK))
8305 ret = wait_on_pipe(iter, 0);
8316 size = PAGE_SIZE - info->read;
8320 ret = copy_to_user(ubuf, info->spare + info->read, size);
8332 static int tracing_buffers_release(struct inode *inode, struct file *file)
8334 struct ftrace_buffer_info *info = file->private_data;
8335 struct trace_iterator *iter = &info->iter;
8337 mutex_lock(&trace_types_lock);
8339 iter->tr->trace_ref--;
8341 __trace_array_put(iter->tr);
8344 /* Make sure the waiters see the new wait_index */
8347 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8350 ring_buffer_free_read_page(iter->array_buffer->buffer,
8351 info->spare_cpu, info->spare);
8354 mutex_unlock(&trace_types_lock);
8360 struct trace_buffer *buffer;
8363 refcount_t refcount;
8366 static void buffer_ref_release(struct buffer_ref *ref)
8368 if (!refcount_dec_and_test(&ref->refcount))
8370 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8374 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8375 struct pipe_buffer *buf)
8377 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8379 buffer_ref_release(ref);
8383 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8384 struct pipe_buffer *buf)
8386 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8388 if (refcount_read(&ref->refcount) > INT_MAX/2)
8391 refcount_inc(&ref->refcount);
8395 /* Pipe buffer operations for a buffer. */
8396 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8397 .release = buffer_pipe_buf_release,
8398 .get = buffer_pipe_buf_get,
8402 * Callback from splice_to_pipe(), if we need to release some pages
8403 * at the end of the spd in case we error'ed out in filling the pipe.
8405 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8407 struct buffer_ref *ref =
8408 (struct buffer_ref *)spd->partial[i].private;
8410 buffer_ref_release(ref);
8411 spd->partial[i].private = 0;
8415 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8416 struct pipe_inode_info *pipe, size_t len,
8419 struct ftrace_buffer_info *info = file->private_data;
8420 struct trace_iterator *iter = &info->iter;
8421 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8422 struct page *pages_def[PIPE_DEF_BUFFERS];
8423 struct splice_pipe_desc spd = {
8425 .partial = partial_def,
8426 .nr_pages_max = PIPE_DEF_BUFFERS,
8427 .ops = &buffer_pipe_buf_ops,
8428 .spd_release = buffer_spd_release,
8430 struct buffer_ref *ref;
8434 #ifdef CONFIG_TRACER_MAX_TRACE
8435 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8439 if (*ppos & (PAGE_SIZE - 1))
8442 if (len & (PAGE_SIZE - 1)) {
8443 if (len < PAGE_SIZE)
8448 if (splice_grow_spd(pipe, &spd))
8452 trace_access_lock(iter->cpu_file);
8453 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8455 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8459 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8465 refcount_set(&ref->refcount, 1);
8466 ref->buffer = iter->array_buffer->buffer;
8467 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8468 if (IS_ERR(ref->page)) {
8469 ret = PTR_ERR(ref->page);
8474 ref->cpu = iter->cpu_file;
8476 r = ring_buffer_read_page(ref->buffer, &ref->page,
8477 len, iter->cpu_file, 1);
8479 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8485 page = virt_to_page(ref->page);
8487 spd.pages[i] = page;
8488 spd.partial[i].len = PAGE_SIZE;
8489 spd.partial[i].offset = 0;
8490 spd.partial[i].private = (unsigned long)ref;
8494 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8497 trace_access_unlock(iter->cpu_file);
8500 /* did we read anything? */
8501 if (!spd.nr_pages) {
8508 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8511 wait_index = READ_ONCE(iter->wait_index);
8513 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8517 /* No need to wait after waking up when tracing is off */
8518 if (!tracer_tracing_is_on(iter->tr))
8521 /* Make sure we see the new wait_index */
8523 if (wait_index != iter->wait_index)
8529 ret = splice_to_pipe(pipe, &spd);
8531 splice_shrink_spd(&spd);
8536 /* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */
8537 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8539 struct ftrace_buffer_info *info = file->private_data;
8540 struct trace_iterator *iter = &info->iter;
8543 return -ENOIOCTLCMD;
8545 mutex_lock(&trace_types_lock);
8548 /* Make sure the waiters see the new wait_index */
8551 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8553 mutex_unlock(&trace_types_lock);
8557 static const struct file_operations tracing_buffers_fops = {
8558 .open = tracing_buffers_open,
8559 .read = tracing_buffers_read,
8560 .poll = tracing_buffers_poll,
8561 .release = tracing_buffers_release,
8562 .splice_read = tracing_buffers_splice_read,
8563 .unlocked_ioctl = tracing_buffers_ioctl,
8564 .llseek = no_llseek,
8568 tracing_stats_read(struct file *filp, char __user *ubuf,
8569 size_t count, loff_t *ppos)
8571 struct inode *inode = file_inode(filp);
8572 struct trace_array *tr = inode->i_private;
8573 struct array_buffer *trace_buf = &tr->array_buffer;
8574 int cpu = tracing_get_cpu(inode);
8575 struct trace_seq *s;
8577 unsigned long long t;
8578 unsigned long usec_rem;
8580 s = kmalloc(sizeof(*s), GFP_KERNEL);
8586 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8587 trace_seq_printf(s, "entries: %ld\n", cnt);
8589 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8590 trace_seq_printf(s, "overrun: %ld\n", cnt);
8592 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8593 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8595 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8596 trace_seq_printf(s, "bytes: %ld\n", cnt);
8598 if (trace_clocks[tr->clock_id].in_ns) {
8599 /* local or global for trace_clock */
8600 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8601 usec_rem = do_div(t, USEC_PER_SEC);
8602 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8605 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8606 usec_rem = do_div(t, USEC_PER_SEC);
8607 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8609 /* counter or tsc mode for trace_clock */
8610 trace_seq_printf(s, "oldest event ts: %llu\n",
8611 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8613 trace_seq_printf(s, "now ts: %llu\n",
8614 ring_buffer_time_stamp(trace_buf->buffer));
8617 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8618 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8620 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8621 trace_seq_printf(s, "read events: %ld\n", cnt);
8623 count = simple_read_from_buffer(ubuf, count, ppos,
8624 s->buffer, trace_seq_used(s));
8631 static const struct file_operations tracing_stats_fops = {
8632 .open = tracing_open_generic_tr,
8633 .read = tracing_stats_read,
8634 .llseek = generic_file_llseek,
8635 .release = tracing_release_generic_tr,
8638 #ifdef CONFIG_DYNAMIC_FTRACE
8641 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8642 size_t cnt, loff_t *ppos)
8648 /* 256 should be plenty to hold the amount needed */
8649 buf = kmalloc(256, GFP_KERNEL);
8653 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8654 ftrace_update_tot_cnt,
8655 ftrace_number_of_pages,
8656 ftrace_number_of_groups);
8658 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8663 static const struct file_operations tracing_dyn_info_fops = {
8664 .open = tracing_open_generic,
8665 .read = tracing_read_dyn_info,
8666 .llseek = generic_file_llseek,
8668 #endif /* CONFIG_DYNAMIC_FTRACE */
8670 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8672 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8673 struct trace_array *tr, struct ftrace_probe_ops *ops,
8676 tracing_snapshot_instance(tr);
8680 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8681 struct trace_array *tr, struct ftrace_probe_ops *ops,
8684 struct ftrace_func_mapper *mapper = data;
8688 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8698 tracing_snapshot_instance(tr);
8702 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8703 struct ftrace_probe_ops *ops, void *data)
8705 struct ftrace_func_mapper *mapper = data;
8708 seq_printf(m, "%ps:", (void *)ip);
8710 seq_puts(m, "snapshot");
8713 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8716 seq_printf(m, ":count=%ld\n", *count);
8718 seq_puts(m, ":unlimited\n");
8724 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8725 unsigned long ip, void *init_data, void **data)
8727 struct ftrace_func_mapper *mapper = *data;
8730 mapper = allocate_ftrace_func_mapper();
8736 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8740 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8741 unsigned long ip, void *data)
8743 struct ftrace_func_mapper *mapper = data;
8748 free_ftrace_func_mapper(mapper, NULL);
8752 ftrace_func_mapper_remove_ip(mapper, ip);
8755 static struct ftrace_probe_ops snapshot_probe_ops = {
8756 .func = ftrace_snapshot,
8757 .print = ftrace_snapshot_print,
8760 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8761 .func = ftrace_count_snapshot,
8762 .print = ftrace_snapshot_print,
8763 .init = ftrace_snapshot_init,
8764 .free = ftrace_snapshot_free,
8768 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8769 char *glob, char *cmd, char *param, int enable)
8771 struct ftrace_probe_ops *ops;
8772 void *count = (void *)-1;
8779 /* hash funcs only work with set_ftrace_filter */
8783 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8786 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8791 number = strsep(¶m, ":");
8793 if (!strlen(number))
8797 * We use the callback data field (which is a pointer)
8800 ret = kstrtoul(number, 0, (unsigned long *)&count);
8805 ret = tracing_alloc_snapshot_instance(tr);
8809 ret = register_ftrace_function_probe(glob, tr, ops, count);
8812 return ret < 0 ? ret : 0;
8815 static struct ftrace_func_command ftrace_snapshot_cmd = {
8817 .func = ftrace_trace_snapshot_callback,
8820 static __init int register_snapshot_cmd(void)
8822 return register_ftrace_command(&ftrace_snapshot_cmd);
8825 static inline __init int register_snapshot_cmd(void) { return 0; }
8826 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8828 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8830 if (WARN_ON(!tr->dir))
8831 return ERR_PTR(-ENODEV);
8833 /* Top directory uses NULL as the parent */
8834 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8837 /* All sub buffers have a descriptor */
8841 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8843 struct dentry *d_tracer;
8846 return tr->percpu_dir;
8848 d_tracer = tracing_get_dentry(tr);
8849 if (IS_ERR(d_tracer))
8852 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8854 MEM_FAIL(!tr->percpu_dir,
8855 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8857 return tr->percpu_dir;
8860 static struct dentry *
8861 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8862 void *data, long cpu, const struct file_operations *fops)
8864 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8866 if (ret) /* See tracing_get_cpu() */
8867 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8872 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8874 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8875 struct dentry *d_cpu;
8876 char cpu_dir[30]; /* 30 characters should be more than enough */
8881 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8882 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8884 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8888 /* per cpu trace_pipe */
8889 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8890 tr, cpu, &tracing_pipe_fops);
8893 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8894 tr, cpu, &tracing_fops);
8896 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8897 tr, cpu, &tracing_buffers_fops);
8899 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8900 tr, cpu, &tracing_stats_fops);
8902 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8903 tr, cpu, &tracing_entries_fops);
8905 #ifdef CONFIG_TRACER_SNAPSHOT
8906 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8907 tr, cpu, &snapshot_fops);
8909 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8910 tr, cpu, &snapshot_raw_fops);
8914 #ifdef CONFIG_FTRACE_SELFTEST
8915 /* Let selftest have access to static functions in this file */
8916 #include "trace_selftest.c"
8920 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8923 struct trace_option_dentry *topt = filp->private_data;
8926 if (topt->flags->val & topt->opt->bit)
8931 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8935 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8938 struct trace_option_dentry *topt = filp->private_data;
8942 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8946 if (val != 0 && val != 1)
8949 if (!!(topt->flags->val & topt->opt->bit) != val) {
8950 mutex_lock(&trace_types_lock);
8951 ret = __set_tracer_option(topt->tr, topt->flags,
8953 mutex_unlock(&trace_types_lock);
8963 static int tracing_open_options(struct inode *inode, struct file *filp)
8965 struct trace_option_dentry *topt = inode->i_private;
8968 ret = tracing_check_open_get_tr(topt->tr);
8972 filp->private_data = inode->i_private;
8976 static int tracing_release_options(struct inode *inode, struct file *file)
8978 struct trace_option_dentry *topt = file->private_data;
8980 trace_array_put(topt->tr);
8984 static const struct file_operations trace_options_fops = {
8985 .open = tracing_open_options,
8986 .read = trace_options_read,
8987 .write = trace_options_write,
8988 .llseek = generic_file_llseek,
8989 .release = tracing_release_options,
8993 * In order to pass in both the trace_array descriptor as well as the index
8994 * to the flag that the trace option file represents, the trace_array
8995 * has a character array of trace_flags_index[], which holds the index
8996 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8997 * The address of this character array is passed to the flag option file
8998 * read/write callbacks.
9000 * In order to extract both the index and the trace_array descriptor,
9001 * get_tr_index() uses the following algorithm.
9005 * As the pointer itself contains the address of the index (remember
9008 * Then to get the trace_array descriptor, by subtracting that index
9009 * from the ptr, we get to the start of the index itself.
9011 * ptr - idx == &index[0]
9013 * Then a simple container_of() from that pointer gets us to the
9014 * trace_array descriptor.
9016 static void get_tr_index(void *data, struct trace_array **ptr,
9017 unsigned int *pindex)
9019 *pindex = *(unsigned char *)data;
9021 *ptr = container_of(data - *pindex, struct trace_array,
9026 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
9029 void *tr_index = filp->private_data;
9030 struct trace_array *tr;
9034 get_tr_index(tr_index, &tr, &index);
9036 if (tr->trace_flags & (1 << index))
9041 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
9045 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
9048 void *tr_index = filp->private_data;
9049 struct trace_array *tr;
9054 get_tr_index(tr_index, &tr, &index);
9056 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9060 if (val != 0 && val != 1)
9063 mutex_lock(&event_mutex);
9064 mutex_lock(&trace_types_lock);
9065 ret = set_tracer_flag(tr, 1 << index, val);
9066 mutex_unlock(&trace_types_lock);
9067 mutex_unlock(&event_mutex);
9077 static const struct file_operations trace_options_core_fops = {
9078 .open = tracing_open_generic,
9079 .read = trace_options_core_read,
9080 .write = trace_options_core_write,
9081 .llseek = generic_file_llseek,
9084 struct dentry *trace_create_file(const char *name,
9086 struct dentry *parent,
9088 const struct file_operations *fops)
9092 ret = tracefs_create_file(name, mode, parent, data, fops);
9094 pr_warn("Could not create tracefs '%s' entry\n", name);
9100 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
9102 struct dentry *d_tracer;
9107 d_tracer = tracing_get_dentry(tr);
9108 if (IS_ERR(d_tracer))
9111 tr->options = tracefs_create_dir("options", d_tracer);
9113 pr_warn("Could not create tracefs directory 'options'\n");
9121 create_trace_option_file(struct trace_array *tr,
9122 struct trace_option_dentry *topt,
9123 struct tracer_flags *flags,
9124 struct tracer_opt *opt)
9126 struct dentry *t_options;
9128 t_options = trace_options_init_dentry(tr);
9132 topt->flags = flags;
9136 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
9137 t_options, topt, &trace_options_fops);
9142 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
9144 struct trace_option_dentry *topts;
9145 struct trace_options *tr_topts;
9146 struct tracer_flags *flags;
9147 struct tracer_opt *opts;
9154 flags = tracer->flags;
9156 if (!flags || !flags->opts)
9160 * If this is an instance, only create flags for tracers
9161 * the instance may have.
9163 if (!trace_ok_for_array(tracer, tr))
9166 for (i = 0; i < tr->nr_topts; i++) {
9167 /* Make sure there's no duplicate flags. */
9168 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
9174 for (cnt = 0; opts[cnt].name; cnt++)
9177 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
9181 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
9188 tr->topts = tr_topts;
9189 tr->topts[tr->nr_topts].tracer = tracer;
9190 tr->topts[tr->nr_topts].topts = topts;
9193 for (cnt = 0; opts[cnt].name; cnt++) {
9194 create_trace_option_file(tr, &topts[cnt], flags,
9196 MEM_FAIL(topts[cnt].entry == NULL,
9197 "Failed to create trace option: %s",
9202 static struct dentry *
9203 create_trace_option_core_file(struct trace_array *tr,
9204 const char *option, long index)
9206 struct dentry *t_options;
9208 t_options = trace_options_init_dentry(tr);
9212 return trace_create_file(option, TRACE_MODE_WRITE, t_options,
9213 (void *)&tr->trace_flags_index[index],
9214 &trace_options_core_fops);
9217 static void create_trace_options_dir(struct trace_array *tr)
9219 struct dentry *t_options;
9220 bool top_level = tr == &global_trace;
9223 t_options = trace_options_init_dentry(tr);
9227 for (i = 0; trace_options[i]; i++) {
9229 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
9230 create_trace_option_core_file(tr, trace_options[i], i);
9235 rb_simple_read(struct file *filp, char __user *ubuf,
9236 size_t cnt, loff_t *ppos)
9238 struct trace_array *tr = filp->private_data;
9242 r = tracer_tracing_is_on(tr);
9243 r = sprintf(buf, "%d\n", r);
9245 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9249 rb_simple_write(struct file *filp, const char __user *ubuf,
9250 size_t cnt, loff_t *ppos)
9252 struct trace_array *tr = filp->private_data;
9253 struct trace_buffer *buffer = tr->array_buffer.buffer;
9257 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9262 mutex_lock(&trace_types_lock);
9263 if (!!val == tracer_tracing_is_on(tr)) {
9264 val = 0; /* do nothing */
9266 tracer_tracing_on(tr);
9267 if (tr->current_trace->start)
9268 tr->current_trace->start(tr);
9270 tracer_tracing_off(tr);
9271 if (tr->current_trace->stop)
9272 tr->current_trace->stop(tr);
9273 /* Wake up any waiters */
9274 ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
9276 mutex_unlock(&trace_types_lock);
9284 static const struct file_operations rb_simple_fops = {
9285 .open = tracing_open_generic_tr,
9286 .read = rb_simple_read,
9287 .write = rb_simple_write,
9288 .release = tracing_release_generic_tr,
9289 .llseek = default_llseek,
9293 buffer_percent_read(struct file *filp, char __user *ubuf,
9294 size_t cnt, loff_t *ppos)
9296 struct trace_array *tr = filp->private_data;
9300 r = tr->buffer_percent;
9301 r = sprintf(buf, "%d\n", r);
9303 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9307 buffer_percent_write(struct file *filp, const char __user *ubuf,
9308 size_t cnt, loff_t *ppos)
9310 struct trace_array *tr = filp->private_data;
9314 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9321 tr->buffer_percent = val;
9328 static const struct file_operations buffer_percent_fops = {
9329 .open = tracing_open_generic_tr,
9330 .read = buffer_percent_read,
9331 .write = buffer_percent_write,
9332 .release = tracing_release_generic_tr,
9333 .llseek = default_llseek,
9336 static struct dentry *trace_instance_dir;
9339 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9342 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9344 enum ring_buffer_flags rb_flags;
9346 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9350 buf->buffer = ring_buffer_alloc(size, rb_flags);
9354 buf->data = alloc_percpu(struct trace_array_cpu);
9356 ring_buffer_free(buf->buffer);
9361 /* Allocate the first page for all buffers */
9362 set_buffer_entries(&tr->array_buffer,
9363 ring_buffer_size(tr->array_buffer.buffer, 0));
9368 static void free_trace_buffer(struct array_buffer *buf)
9371 ring_buffer_free(buf->buffer);
9373 free_percpu(buf->data);
9378 static int allocate_trace_buffers(struct trace_array *tr, int size)
9382 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9386 #ifdef CONFIG_TRACER_MAX_TRACE
9387 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9388 allocate_snapshot ? size : 1);
9389 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9390 free_trace_buffer(&tr->array_buffer);
9393 tr->allocated_snapshot = allocate_snapshot;
9395 allocate_snapshot = false;
9401 static void free_trace_buffers(struct trace_array *tr)
9406 free_trace_buffer(&tr->array_buffer);
9408 #ifdef CONFIG_TRACER_MAX_TRACE
9409 free_trace_buffer(&tr->max_buffer);
9413 static void init_trace_flags_index(struct trace_array *tr)
9417 /* Used by the trace options files */
9418 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9419 tr->trace_flags_index[i] = i;
9422 static void __update_tracer_options(struct trace_array *tr)
9426 for (t = trace_types; t; t = t->next)
9427 add_tracer_options(tr, t);
9430 static void update_tracer_options(struct trace_array *tr)
9432 mutex_lock(&trace_types_lock);
9433 tracer_options_updated = true;
9434 __update_tracer_options(tr);
9435 mutex_unlock(&trace_types_lock);
9438 /* Must have trace_types_lock held */
9439 struct trace_array *trace_array_find(const char *instance)
9441 struct trace_array *tr, *found = NULL;
9443 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9444 if (tr->name && strcmp(tr->name, instance) == 0) {
9453 struct trace_array *trace_array_find_get(const char *instance)
9455 struct trace_array *tr;
9457 mutex_lock(&trace_types_lock);
9458 tr = trace_array_find(instance);
9461 mutex_unlock(&trace_types_lock);
9466 static int trace_array_create_dir(struct trace_array *tr)
9470 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9474 ret = event_trace_add_tracer(tr->dir, tr);
9476 tracefs_remove(tr->dir);
9480 init_tracer_tracefs(tr, tr->dir);
9481 __update_tracer_options(tr);
9486 static struct trace_array *trace_array_create(const char *name)
9488 struct trace_array *tr;
9492 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9494 return ERR_PTR(ret);
9496 tr->name = kstrdup(name, GFP_KERNEL);
9500 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9503 if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
9506 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9508 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9510 raw_spin_lock_init(&tr->start_lock);
9512 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9514 tr->current_trace = &nop_trace;
9516 INIT_LIST_HEAD(&tr->systems);
9517 INIT_LIST_HEAD(&tr->events);
9518 INIT_LIST_HEAD(&tr->hist_vars);
9519 INIT_LIST_HEAD(&tr->err_log);
9521 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9524 if (ftrace_allocate_ftrace_ops(tr) < 0)
9527 ftrace_init_trace_array(tr);
9529 init_trace_flags_index(tr);
9531 if (trace_instance_dir) {
9532 ret = trace_array_create_dir(tr);
9536 __trace_early_add_events(tr);
9538 list_add(&tr->list, &ftrace_trace_arrays);
9545 ftrace_free_ftrace_ops(tr);
9546 free_trace_buffers(tr);
9547 free_cpumask_var(tr->pipe_cpumask);
9548 free_cpumask_var(tr->tracing_cpumask);
9552 return ERR_PTR(ret);
9555 static int instance_mkdir(const char *name)
9557 struct trace_array *tr;
9560 mutex_lock(&event_mutex);
9561 mutex_lock(&trace_types_lock);
9564 if (trace_array_find(name))
9567 tr = trace_array_create(name);
9569 ret = PTR_ERR_OR_ZERO(tr);
9572 mutex_unlock(&trace_types_lock);
9573 mutex_unlock(&event_mutex);
9578 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9579 * @name: The name of the trace array to be looked up/created.
9581 * Returns pointer to trace array with given name.
9582 * NULL, if it cannot be created.
9584 * NOTE: This function increments the reference counter associated with the
9585 * trace array returned. This makes sure it cannot be freed while in use.
9586 * Use trace_array_put() once the trace array is no longer needed.
9587 * If the trace_array is to be freed, trace_array_destroy() needs to
9588 * be called after the trace_array_put(), or simply let user space delete
9589 * it from the tracefs instances directory. But until the
9590 * trace_array_put() is called, user space can not delete it.
9593 struct trace_array *trace_array_get_by_name(const char *name)
9595 struct trace_array *tr;
9597 mutex_lock(&event_mutex);
9598 mutex_lock(&trace_types_lock);
9600 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9601 if (tr->name && strcmp(tr->name, name) == 0)
9605 tr = trace_array_create(name);
9613 mutex_unlock(&trace_types_lock);
9614 mutex_unlock(&event_mutex);
9617 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9619 static int __remove_instance(struct trace_array *tr)
9623 /* Reference counter for a newly created trace array = 1. */
9624 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9627 list_del(&tr->list);
9629 /* Disable all the flags that were enabled coming in */
9630 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9631 if ((1 << i) & ZEROED_TRACE_FLAGS)
9632 set_tracer_flag(tr, 1 << i, 0);
9635 tracing_set_nop(tr);
9636 clear_ftrace_function_probes(tr);
9637 event_trace_del_tracer(tr);
9638 ftrace_clear_pids(tr);
9639 ftrace_destroy_function_files(tr);
9640 tracefs_remove(tr->dir);
9641 free_percpu(tr->last_func_repeats);
9642 free_trace_buffers(tr);
9643 clear_tracing_err_log(tr);
9645 for (i = 0; i < tr->nr_topts; i++) {
9646 kfree(tr->topts[i].topts);
9650 free_cpumask_var(tr->pipe_cpumask);
9651 free_cpumask_var(tr->tracing_cpumask);
9658 int trace_array_destroy(struct trace_array *this_tr)
9660 struct trace_array *tr;
9666 mutex_lock(&event_mutex);
9667 mutex_lock(&trace_types_lock);
9671 /* Making sure trace array exists before destroying it. */
9672 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9673 if (tr == this_tr) {
9674 ret = __remove_instance(tr);
9679 mutex_unlock(&trace_types_lock);
9680 mutex_unlock(&event_mutex);
9684 EXPORT_SYMBOL_GPL(trace_array_destroy);
9686 static int instance_rmdir(const char *name)
9688 struct trace_array *tr;
9691 mutex_lock(&event_mutex);
9692 mutex_lock(&trace_types_lock);
9695 tr = trace_array_find(name);
9697 ret = __remove_instance(tr);
9699 mutex_unlock(&trace_types_lock);
9700 mutex_unlock(&event_mutex);
9705 static __init void create_trace_instances(struct dentry *d_tracer)
9707 struct trace_array *tr;
9709 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9712 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9715 mutex_lock(&event_mutex);
9716 mutex_lock(&trace_types_lock);
9718 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9721 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9722 "Failed to create instance directory\n"))
9726 mutex_unlock(&trace_types_lock);
9727 mutex_unlock(&event_mutex);
9731 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9733 struct trace_event_file *file;
9736 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9737 tr, &show_traces_fops);
9739 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9740 tr, &set_tracer_fops);
9742 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9743 tr, &tracing_cpumask_fops);
9745 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9746 tr, &tracing_iter_fops);
9748 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9751 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9752 tr, &tracing_pipe_fops);
9754 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9755 tr, &tracing_entries_fops);
9757 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9758 tr, &tracing_total_entries_fops);
9760 trace_create_file("free_buffer", 0200, d_tracer,
9761 tr, &tracing_free_buffer_fops);
9763 trace_create_file("trace_marker", 0220, d_tracer,
9764 tr, &tracing_mark_fops);
9766 file = __find_event_file(tr, "ftrace", "print");
9767 if (file && file->ef)
9768 eventfs_add_file("trigger", TRACE_MODE_WRITE, file->ef,
9769 file, &event_trigger_fops);
9770 tr->trace_marker_file = file;
9772 trace_create_file("trace_marker_raw", 0220, d_tracer,
9773 tr, &tracing_mark_raw_fops);
9775 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9778 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9779 tr, &rb_simple_fops);
9781 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9782 &trace_time_stamp_mode_fops);
9784 tr->buffer_percent = 50;
9786 trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
9787 tr, &buffer_percent_fops);
9789 create_trace_options_dir(tr);
9791 #ifdef CONFIG_TRACER_MAX_TRACE
9792 trace_create_maxlat_file(tr, d_tracer);
9795 if (ftrace_create_function_files(tr, d_tracer))
9796 MEM_FAIL(1, "Could not allocate function filter files");
9798 #ifdef CONFIG_TRACER_SNAPSHOT
9799 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9800 tr, &snapshot_fops);
9803 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9804 tr, &tracing_err_log_fops);
9806 for_each_tracing_cpu(cpu)
9807 tracing_init_tracefs_percpu(tr, cpu);
9809 ftrace_init_tracefs(tr, d_tracer);
9812 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9814 struct vfsmount *mnt;
9815 struct file_system_type *type;
9818 * To maintain backward compatibility for tools that mount
9819 * debugfs to get to the tracing facility, tracefs is automatically
9820 * mounted to the debugfs/tracing directory.
9822 type = get_fs_type("tracefs");
9825 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9826 put_filesystem(type);
9835 * tracing_init_dentry - initialize top level trace array
9837 * This is called when creating files or directories in the tracing
9838 * directory. It is called via fs_initcall() by any of the boot up code
9839 * and expects to return the dentry of the top level tracing directory.
9841 int tracing_init_dentry(void)
9843 struct trace_array *tr = &global_trace;
9845 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9846 pr_warn("Tracing disabled due to lockdown\n");
9850 /* The top level trace array uses NULL as parent */
9854 if (WARN_ON(!tracefs_initialized()))
9858 * As there may still be users that expect the tracing
9859 * files to exist in debugfs/tracing, we must automount
9860 * the tracefs file system there, so older tools still
9861 * work with the newer kernel.
9863 tr->dir = debugfs_create_automount("tracing", NULL,
9864 trace_automount, NULL);
9869 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9870 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9872 static struct workqueue_struct *eval_map_wq __initdata;
9873 static struct work_struct eval_map_work __initdata;
9874 static struct work_struct tracerfs_init_work __initdata;
9876 static void __init eval_map_work_func(struct work_struct *work)
9880 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9881 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9884 static int __init trace_eval_init(void)
9886 INIT_WORK(&eval_map_work, eval_map_work_func);
9888 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9890 pr_err("Unable to allocate eval_map_wq\n");
9892 eval_map_work_func(&eval_map_work);
9896 queue_work(eval_map_wq, &eval_map_work);
9900 subsys_initcall(trace_eval_init);
9902 static int __init trace_eval_sync(void)
9904 /* Make sure the eval map updates are finished */
9906 destroy_workqueue(eval_map_wq);
9910 late_initcall_sync(trace_eval_sync);
9913 #ifdef CONFIG_MODULES
9914 static void trace_module_add_evals(struct module *mod)
9916 if (!mod->num_trace_evals)
9920 * Modules with bad taint do not have events created, do
9921 * not bother with enums either.
9923 if (trace_module_has_bad_taint(mod))
9926 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9929 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9930 static void trace_module_remove_evals(struct module *mod)
9932 union trace_eval_map_item *map;
9933 union trace_eval_map_item **last = &trace_eval_maps;
9935 if (!mod->num_trace_evals)
9938 mutex_lock(&trace_eval_mutex);
9940 map = trace_eval_maps;
9943 if (map->head.mod == mod)
9945 map = trace_eval_jmp_to_tail(map);
9946 last = &map->tail.next;
9947 map = map->tail.next;
9952 *last = trace_eval_jmp_to_tail(map)->tail.next;
9955 mutex_unlock(&trace_eval_mutex);
9958 static inline void trace_module_remove_evals(struct module *mod) { }
9959 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9961 static int trace_module_notify(struct notifier_block *self,
9962 unsigned long val, void *data)
9964 struct module *mod = data;
9967 case MODULE_STATE_COMING:
9968 trace_module_add_evals(mod);
9970 case MODULE_STATE_GOING:
9971 trace_module_remove_evals(mod);
9978 static struct notifier_block trace_module_nb = {
9979 .notifier_call = trace_module_notify,
9982 #endif /* CONFIG_MODULES */
9984 static __init void tracer_init_tracefs_work_func(struct work_struct *work)
9989 init_tracer_tracefs(&global_trace, NULL);
9990 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9992 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
9993 &global_trace, &tracing_thresh_fops);
9995 trace_create_file("README", TRACE_MODE_READ, NULL,
9996 NULL, &tracing_readme_fops);
9998 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
9999 NULL, &tracing_saved_cmdlines_fops);
10001 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
10002 NULL, &tracing_saved_cmdlines_size_fops);
10004 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
10005 NULL, &tracing_saved_tgids_fops);
10007 trace_create_eval_file(NULL);
10009 #ifdef CONFIG_MODULES
10010 register_module_notifier(&trace_module_nb);
10013 #ifdef CONFIG_DYNAMIC_FTRACE
10014 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
10015 NULL, &tracing_dyn_info_fops);
10018 create_trace_instances(NULL);
10020 update_tracer_options(&global_trace);
10023 static __init int tracer_init_tracefs(void)
10027 trace_access_lock_init();
10029 ret = tracing_init_dentry();
10034 INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
10035 queue_work(eval_map_wq, &tracerfs_init_work);
10037 tracer_init_tracefs_work_func(NULL);
10040 rv_init_interface();
10045 fs_initcall(tracer_init_tracefs);
10047 static int trace_die_panic_handler(struct notifier_block *self,
10048 unsigned long ev, void *unused);
10050 static struct notifier_block trace_panic_notifier = {
10051 .notifier_call = trace_die_panic_handler,
10052 .priority = INT_MAX - 1,
10055 static struct notifier_block trace_die_notifier = {
10056 .notifier_call = trace_die_panic_handler,
10057 .priority = INT_MAX - 1,
10061 * The idea is to execute the following die/panic callback early, in order
10062 * to avoid showing irrelevant information in the trace (like other panic
10063 * notifier functions); we are the 2nd to run, after hung_task/rcu_stall
10064 * warnings get disabled (to prevent potential log flooding).
10066 static int trace_die_panic_handler(struct notifier_block *self,
10067 unsigned long ev, void *unused)
10069 if (!ftrace_dump_on_oops)
10070 return NOTIFY_DONE;
10072 /* The die notifier requires DIE_OOPS to trigger */
10073 if (self == &trace_die_notifier && ev != DIE_OOPS)
10074 return NOTIFY_DONE;
10076 ftrace_dump(ftrace_dump_on_oops);
10078 return NOTIFY_DONE;
10082 * printk is set to max of 1024, we really don't need it that big.
10083 * Nothing should be printing 1000 characters anyway.
10085 #define TRACE_MAX_PRINT 1000
10088 * Define here KERN_TRACE so that we have one place to modify
10089 * it if we decide to change what log level the ftrace dump
10092 #define KERN_TRACE KERN_EMERG
10095 trace_printk_seq(struct trace_seq *s)
10097 /* Probably should print a warning here. */
10098 if (s->seq.len >= TRACE_MAX_PRINT)
10099 s->seq.len = TRACE_MAX_PRINT;
10102 * More paranoid code. Although the buffer size is set to
10103 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
10104 * an extra layer of protection.
10106 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
10107 s->seq.len = s->seq.size - 1;
10109 /* should be zero ended, but we are paranoid. */
10110 s->buffer[s->seq.len] = 0;
10112 printk(KERN_TRACE "%s", s->buffer);
10117 void trace_init_global_iter(struct trace_iterator *iter)
10119 iter->tr = &global_trace;
10120 iter->trace = iter->tr->current_trace;
10121 iter->cpu_file = RING_BUFFER_ALL_CPUS;
10122 iter->array_buffer = &global_trace.array_buffer;
10124 if (iter->trace && iter->trace->open)
10125 iter->trace->open(iter);
10127 /* Annotate start of buffers if we had overruns */
10128 if (ring_buffer_overruns(iter->array_buffer->buffer))
10129 iter->iter_flags |= TRACE_FILE_ANNOTATE;
10131 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
10132 if (trace_clocks[iter->tr->clock_id].in_ns)
10133 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
10135 /* Can not use kmalloc for iter.temp and iter.fmt */
10136 iter->temp = static_temp_buf;
10137 iter->temp_size = STATIC_TEMP_BUF_SIZE;
10138 iter->fmt = static_fmt_buf;
10139 iter->fmt_size = STATIC_FMT_BUF_SIZE;
10142 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
10144 /* use static because iter can be a bit big for the stack */
10145 static struct trace_iterator iter;
10146 static atomic_t dump_running;
10147 struct trace_array *tr = &global_trace;
10148 unsigned int old_userobj;
10149 unsigned long flags;
10152 /* Only allow one dump user at a time. */
10153 if (atomic_inc_return(&dump_running) != 1) {
10154 atomic_dec(&dump_running);
10159 * Always turn off tracing when we dump.
10160 * We don't need to show trace output of what happens
10161 * between multiple crashes.
10163 * If the user does a sysrq-z, then they can re-enable
10164 * tracing with echo 1 > tracing_on.
10168 local_irq_save(flags);
10170 /* Simulate the iterator */
10171 trace_init_global_iter(&iter);
10173 for_each_tracing_cpu(cpu) {
10174 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10177 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10179 /* don't look at user memory in panic mode */
10180 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10182 switch (oops_dump_mode) {
10184 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10187 iter.cpu_file = raw_smp_processor_id();
10192 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
10193 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10196 printk(KERN_TRACE "Dumping ftrace buffer:\n");
10198 /* Did function tracer already get disabled? */
10199 if (ftrace_is_dead()) {
10200 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
10201 printk("# MAY BE MISSING FUNCTION EVENTS\n");
10205 * We need to stop all tracing on all CPUS to read
10206 * the next buffer. This is a bit expensive, but is
10207 * not done often. We fill all what we can read,
10208 * and then release the locks again.
10211 while (!trace_empty(&iter)) {
10214 printk(KERN_TRACE "---------------------------------\n");
10218 trace_iterator_reset(&iter);
10219 iter.iter_flags |= TRACE_FILE_LAT_FMT;
10221 if (trace_find_next_entry_inc(&iter) != NULL) {
10224 ret = print_trace_line(&iter);
10225 if (ret != TRACE_TYPE_NO_CONSUME)
10226 trace_consume(&iter);
10228 touch_nmi_watchdog();
10230 trace_printk_seq(&iter.seq);
10234 printk(KERN_TRACE " (ftrace buffer empty)\n");
10236 printk(KERN_TRACE "---------------------------------\n");
10239 tr->trace_flags |= old_userobj;
10241 for_each_tracing_cpu(cpu) {
10242 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10244 atomic_dec(&dump_running);
10245 local_irq_restore(flags);
10247 EXPORT_SYMBOL_GPL(ftrace_dump);
10249 #define WRITE_BUFSIZE 4096
10251 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
10252 size_t count, loff_t *ppos,
10253 int (*createfn)(const char *))
10255 char *kbuf, *buf, *tmp;
10260 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
10264 while (done < count) {
10265 size = count - done;
10267 if (size >= WRITE_BUFSIZE)
10268 size = WRITE_BUFSIZE - 1;
10270 if (copy_from_user(kbuf, buffer + done, size)) {
10277 tmp = strchr(buf, '\n');
10280 size = tmp - buf + 1;
10282 size = strlen(buf);
10283 if (done + size < count) {
10286 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10287 pr_warn("Line length is too long: Should be less than %d\n",
10288 WRITE_BUFSIZE - 2);
10295 /* Remove comments */
10296 tmp = strchr(buf, '#');
10301 ret = createfn(buf);
10306 } while (done < count);
10316 #ifdef CONFIG_TRACER_MAX_TRACE
10317 __init static bool tr_needs_alloc_snapshot(const char *name)
10320 int len = strlen(name);
10323 if (!boot_snapshot_index)
10326 if (strncmp(name, boot_snapshot_info, len) == 0 &&
10327 boot_snapshot_info[len] == '\t')
10330 test = kmalloc(strlen(name) + 3, GFP_KERNEL);
10334 sprintf(test, "\t%s\t", name);
10335 ret = strstr(boot_snapshot_info, test) == NULL;
10340 __init static void do_allocate_snapshot(const char *name)
10342 if (!tr_needs_alloc_snapshot(name))
10346 * When allocate_snapshot is set, the next call to
10347 * allocate_trace_buffers() (called by trace_array_get_by_name())
10348 * will allocate the snapshot buffer. That will alse clear
10351 allocate_snapshot = true;
10354 static inline void do_allocate_snapshot(const char *name) { }
10357 __init static void enable_instances(void)
10359 struct trace_array *tr;
10364 /* A tab is always appended */
10365 boot_instance_info[boot_instance_index - 1] = '\0';
10366 str = boot_instance_info;
10368 while ((curr_str = strsep(&str, "\t"))) {
10370 tok = strsep(&curr_str, ",");
10372 if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
10373 do_allocate_snapshot(tok);
10375 tr = trace_array_get_by_name(tok);
10377 pr_warn("Failed to create instance buffer %s\n", curr_str);
10380 /* Allow user space to delete it */
10381 trace_array_put(tr);
10383 while ((tok = strsep(&curr_str, ","))) {
10384 early_enable_events(tr, tok, true);
10389 __init static int tracer_alloc_buffers(void)
10395 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10396 pr_warn("Tracing disabled due to lockdown\n");
10401 * Make sure we don't accidentally add more trace options
10402 * than we have bits for.
10404 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10406 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10409 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10410 goto out_free_buffer_mask;
10412 /* Only allocate trace_printk buffers if a trace_printk exists */
10413 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10414 /* Must be called before global_trace.buffer is allocated */
10415 trace_printk_init_buffers();
10417 /* To save memory, keep the ring buffer size to its minimum */
10418 if (ring_buffer_expanded)
10419 ring_buf_size = trace_buf_size;
10423 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10424 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10426 raw_spin_lock_init(&global_trace.start_lock);
10429 * The prepare callbacks allocates some memory for the ring buffer. We
10430 * don't free the buffer if the CPU goes down. If we were to free
10431 * the buffer, then the user would lose any trace that was in the
10432 * buffer. The memory will be removed once the "instance" is removed.
10434 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10435 "trace/RB:prepare", trace_rb_cpu_prepare,
10438 goto out_free_cpumask;
10439 /* Used for event triggers */
10441 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10443 goto out_rm_hp_state;
10445 if (trace_create_savedcmd() < 0)
10446 goto out_free_temp_buffer;
10448 if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
10449 goto out_free_savedcmd;
10451 /* TODO: make the number of buffers hot pluggable with CPUS */
10452 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10453 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10454 goto out_free_pipe_cpumask;
10456 if (global_trace.buffer_disabled)
10459 if (trace_boot_clock) {
10460 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10462 pr_warn("Trace clock %s not defined, going back to default\n",
10467 * register_tracer() might reference current_trace, so it
10468 * needs to be set before we register anything. This is
10469 * just a bootstrap of current_trace anyway.
10471 global_trace.current_trace = &nop_trace;
10473 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10475 ftrace_init_global_array_ops(&global_trace);
10477 init_trace_flags_index(&global_trace);
10479 register_tracer(&nop_trace);
10481 /* Function tracing may start here (via kernel command line) */
10482 init_function_trace();
10484 /* All seems OK, enable tracing */
10485 tracing_disabled = 0;
10487 atomic_notifier_chain_register(&panic_notifier_list,
10488 &trace_panic_notifier);
10490 register_die_notifier(&trace_die_notifier);
10492 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10494 INIT_LIST_HEAD(&global_trace.systems);
10495 INIT_LIST_HEAD(&global_trace.events);
10496 INIT_LIST_HEAD(&global_trace.hist_vars);
10497 INIT_LIST_HEAD(&global_trace.err_log);
10498 list_add(&global_trace.list, &ftrace_trace_arrays);
10500 apply_trace_boot_options();
10502 register_snapshot_cmd();
10508 out_free_pipe_cpumask:
10509 free_cpumask_var(global_trace.pipe_cpumask);
10511 free_saved_cmdlines_buffer(savedcmd);
10512 out_free_temp_buffer:
10513 ring_buffer_free(temp_buffer);
10515 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10517 free_cpumask_var(global_trace.tracing_cpumask);
10518 out_free_buffer_mask:
10519 free_cpumask_var(tracing_buffer_mask);
10524 void __init ftrace_boot_snapshot(void)
10526 #ifdef CONFIG_TRACER_MAX_TRACE
10527 struct trace_array *tr;
10529 if (!snapshot_at_boot)
10532 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
10533 if (!tr->allocated_snapshot)
10536 tracing_snapshot_instance(tr);
10537 trace_array_puts(tr, "** Boot snapshot taken **\n");
10542 void __init early_trace_init(void)
10544 if (tracepoint_printk) {
10545 tracepoint_print_iter =
10546 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10547 if (MEM_FAIL(!tracepoint_print_iter,
10548 "Failed to allocate trace iterator\n"))
10549 tracepoint_printk = 0;
10551 static_key_enable(&tracepoint_printk_key.key);
10553 tracer_alloc_buffers();
10558 void __init trace_init(void)
10560 trace_event_init();
10562 if (boot_instance_index)
10563 enable_instances();
10566 __init static void clear_boot_tracer(void)
10569 * The default tracer at boot buffer is an init section.
10570 * This function is called in lateinit. If we did not
10571 * find the boot tracer, then clear it out, to prevent
10572 * later registration from accessing the buffer that is
10573 * about to be freed.
10575 if (!default_bootup_tracer)
10578 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10579 default_bootup_tracer);
10580 default_bootup_tracer = NULL;
10583 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10584 __init static void tracing_set_default_clock(void)
10586 /* sched_clock_stable() is determined in late_initcall */
10587 if (!trace_boot_clock && !sched_clock_stable()) {
10588 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10589 pr_warn("Can not set tracing clock due to lockdown\n");
10593 printk(KERN_WARNING
10594 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10595 "If you want to keep using the local clock, then add:\n"
10596 " \"trace_clock=local\"\n"
10597 "on the kernel command line\n");
10598 tracing_set_clock(&global_trace, "global");
10602 static inline void tracing_set_default_clock(void) { }
10605 __init static int late_trace_init(void)
10607 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10608 static_key_disable(&tracepoint_printk_key.key);
10609 tracepoint_printk = 0;
10612 tracing_set_default_clock();
10613 clear_boot_tracer();
10617 late_initcall_sync(late_trace_init);