1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/panic_notifier.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
52 #include <asm/setup.h> /* COMMAND_LINE_SIZE */
55 #include "trace_output.h"
58 * On boot up, the ring buffer is set to the minimum size, so that
59 * we do not waste memory on systems that are not using tracing.
61 bool ring_buffer_expanded;
63 #ifdef CONFIG_FTRACE_STARTUP_TEST
65 * We need to change this state when a selftest is running.
66 * A selftest will lurk into the ring-buffer to count the
67 * entries inserted during the selftest although some concurrent
68 * insertions into the ring-buffer such as trace_printk could occurred
69 * at the same time, giving false positive or negative results.
71 static bool __read_mostly tracing_selftest_running;
74 * If boot-time tracing including tracers/events via kernel cmdline
75 * is running, we do not want to run SELFTEST.
77 bool __read_mostly tracing_selftest_disabled;
79 void __init disable_tracing_selftest(const char *reason)
81 if (!tracing_selftest_disabled) {
82 tracing_selftest_disabled = true;
83 pr_info("Ftrace startup test is disabled due to %s\n", reason);
87 #define tracing_selftest_running 0
88 #define tracing_selftest_disabled 0
91 /* Pipe tracepoints to printk */
92 static struct trace_iterator *tracepoint_print_iter;
93 int tracepoint_printk;
94 static bool tracepoint_printk_stop_on_boot __initdata;
95 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
97 /* For tracers that don't implement custom flags */
98 static struct tracer_opt dummy_tracer_opt[] = {
103 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
109 * To prevent the comm cache from being overwritten when no
110 * tracing is active, only save the comm when a trace event
113 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
116 * Kill all tracing for good (never come back).
117 * It is initialized to 1 but will turn to zero if the initialization
118 * of the tracer is successful. But that is the only place that sets
121 static int tracing_disabled = 1;
123 cpumask_var_t __read_mostly tracing_buffer_mask;
126 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
128 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
129 * is set, then ftrace_dump is called. This will output the contents
130 * of the ftrace buffers to the console. This is very useful for
131 * capturing traces that lead to crashes and outputing it to a
134 * It is default off, but you can enable it with either specifying
135 * "ftrace_dump_on_oops" in the kernel command line, or setting
136 * /proc/sys/kernel/ftrace_dump_on_oops
137 * Set 1 if you want to dump buffers of all CPUs
138 * Set 2 if you want to dump the buffer of the CPU that triggered oops
141 enum ftrace_dump_mode ftrace_dump_on_oops;
143 /* When set, tracing will stop when a WARN*() is hit */
144 int __disable_trace_on_warning;
146 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
147 /* Map of enums to their values, for "eval_map" file */
148 struct trace_eval_map_head {
150 unsigned long length;
153 union trace_eval_map_item;
155 struct trace_eval_map_tail {
157 * "end" is first and points to NULL as it must be different
158 * than "mod" or "eval_string"
160 union trace_eval_map_item *next;
161 const char *end; /* points to NULL */
164 static DEFINE_MUTEX(trace_eval_mutex);
167 * The trace_eval_maps are saved in an array with two extra elements,
168 * one at the beginning, and one at the end. The beginning item contains
169 * the count of the saved maps (head.length), and the module they
170 * belong to if not built in (head.mod). The ending item contains a
171 * pointer to the next array of saved eval_map items.
173 union trace_eval_map_item {
174 struct trace_eval_map map;
175 struct trace_eval_map_head head;
176 struct trace_eval_map_tail tail;
179 static union trace_eval_map_item *trace_eval_maps;
180 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
182 int tracing_set_tracer(struct trace_array *tr, const char *buf);
183 static void ftrace_trace_userstack(struct trace_array *tr,
184 struct trace_buffer *buffer,
185 unsigned int trace_ctx);
187 #define MAX_TRACER_SIZE 100
188 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
189 static char *default_bootup_tracer;
191 static bool allocate_snapshot;
192 static bool snapshot_at_boot;
194 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata;
195 static int boot_instance_index;
197 static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
198 static int boot_snapshot_index;
200 static int __init set_cmdline_ftrace(char *str)
202 strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
203 default_bootup_tracer = bootup_tracer_buf;
204 /* We are using ftrace early, expand it */
205 ring_buffer_expanded = true;
208 __setup("ftrace=", set_cmdline_ftrace);
210 static int __init set_ftrace_dump_on_oops(char *str)
212 if (*str++ != '=' || !*str || !strcmp("1", str)) {
213 ftrace_dump_on_oops = DUMP_ALL;
217 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
218 ftrace_dump_on_oops = DUMP_ORIG;
224 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
226 static int __init stop_trace_on_warning(char *str)
228 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
229 __disable_trace_on_warning = 1;
232 __setup("traceoff_on_warning", stop_trace_on_warning);
234 static int __init boot_alloc_snapshot(char *str)
236 char *slot = boot_snapshot_info + boot_snapshot_index;
237 int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
242 if (strlen(str) >= left)
245 ret = snprintf(slot, left, "%s\t", str);
246 boot_snapshot_index += ret;
248 allocate_snapshot = true;
249 /* We also need the main ring buffer expanded */
250 ring_buffer_expanded = true;
254 __setup("alloc_snapshot", boot_alloc_snapshot);
257 static int __init boot_snapshot(char *str)
259 snapshot_at_boot = true;
260 boot_alloc_snapshot(str);
263 __setup("ftrace_boot_snapshot", boot_snapshot);
266 static int __init boot_instance(char *str)
268 char *slot = boot_instance_info + boot_instance_index;
269 int left = sizeof(boot_instance_info) - boot_instance_index;
272 if (strlen(str) >= left)
275 ret = snprintf(slot, left, "%s\t", str);
276 boot_instance_index += ret;
280 __setup("trace_instance=", boot_instance);
283 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
285 static int __init set_trace_boot_options(char *str)
287 strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
290 __setup("trace_options=", set_trace_boot_options);
292 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
293 static char *trace_boot_clock __initdata;
295 static int __init set_trace_boot_clock(char *str)
297 strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
298 trace_boot_clock = trace_boot_clock_buf;
301 __setup("trace_clock=", set_trace_boot_clock);
303 static int __init set_tracepoint_printk(char *str)
305 /* Ignore the "tp_printk_stop_on_boot" param */
309 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
310 tracepoint_printk = 1;
313 __setup("tp_printk", set_tracepoint_printk);
315 static int __init set_tracepoint_printk_stop(char *str)
317 tracepoint_printk_stop_on_boot = true;
320 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
322 unsigned long long ns2usecs(u64 nsec)
330 trace_process_export(struct trace_export *export,
331 struct ring_buffer_event *event, int flag)
333 struct trace_entry *entry;
334 unsigned int size = 0;
336 if (export->flags & flag) {
337 entry = ring_buffer_event_data(event);
338 size = ring_buffer_event_length(event);
339 export->write(export, entry, size);
343 static DEFINE_MUTEX(ftrace_export_lock);
345 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
347 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
348 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
349 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
351 static inline void ftrace_exports_enable(struct trace_export *export)
353 if (export->flags & TRACE_EXPORT_FUNCTION)
354 static_branch_inc(&trace_function_exports_enabled);
356 if (export->flags & TRACE_EXPORT_EVENT)
357 static_branch_inc(&trace_event_exports_enabled);
359 if (export->flags & TRACE_EXPORT_MARKER)
360 static_branch_inc(&trace_marker_exports_enabled);
363 static inline void ftrace_exports_disable(struct trace_export *export)
365 if (export->flags & TRACE_EXPORT_FUNCTION)
366 static_branch_dec(&trace_function_exports_enabled);
368 if (export->flags & TRACE_EXPORT_EVENT)
369 static_branch_dec(&trace_event_exports_enabled);
371 if (export->flags & TRACE_EXPORT_MARKER)
372 static_branch_dec(&trace_marker_exports_enabled);
375 static void ftrace_exports(struct ring_buffer_event *event, int flag)
377 struct trace_export *export;
379 preempt_disable_notrace();
381 export = rcu_dereference_raw_check(ftrace_exports_list);
383 trace_process_export(export, event, flag);
384 export = rcu_dereference_raw_check(export->next);
387 preempt_enable_notrace();
391 add_trace_export(struct trace_export **list, struct trace_export *export)
393 rcu_assign_pointer(export->next, *list);
395 * We are entering export into the list but another
396 * CPU might be walking that list. We need to make sure
397 * the export->next pointer is valid before another CPU sees
398 * the export pointer included into the list.
400 rcu_assign_pointer(*list, export);
404 rm_trace_export(struct trace_export **list, struct trace_export *export)
406 struct trace_export **p;
408 for (p = list; *p != NULL; p = &(*p)->next)
415 rcu_assign_pointer(*p, (*p)->next);
421 add_ftrace_export(struct trace_export **list, struct trace_export *export)
423 ftrace_exports_enable(export);
425 add_trace_export(list, export);
429 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
433 ret = rm_trace_export(list, export);
434 ftrace_exports_disable(export);
439 int register_ftrace_export(struct trace_export *export)
441 if (WARN_ON_ONCE(!export->write))
444 mutex_lock(&ftrace_export_lock);
446 add_ftrace_export(&ftrace_exports_list, export);
448 mutex_unlock(&ftrace_export_lock);
452 EXPORT_SYMBOL_GPL(register_ftrace_export);
454 int unregister_ftrace_export(struct trace_export *export)
458 mutex_lock(&ftrace_export_lock);
460 ret = rm_ftrace_export(&ftrace_exports_list, export);
462 mutex_unlock(&ftrace_export_lock);
466 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
468 /* trace_flags holds trace_options default values */
469 #define TRACE_DEFAULT_FLAGS \
470 (FUNCTION_DEFAULT_FLAGS | \
471 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
472 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
473 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
474 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
477 /* trace_options that are only supported by global_trace */
478 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
479 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
481 /* trace_flags that are default zero for instances */
482 #define ZEROED_TRACE_FLAGS \
483 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
486 * The global_trace is the descriptor that holds the top-level tracing
487 * buffers for the live tracing.
489 static struct trace_array global_trace = {
490 .trace_flags = TRACE_DEFAULT_FLAGS,
493 LIST_HEAD(ftrace_trace_arrays);
495 int trace_array_get(struct trace_array *this_tr)
497 struct trace_array *tr;
500 mutex_lock(&trace_types_lock);
501 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
508 mutex_unlock(&trace_types_lock);
513 static void __trace_array_put(struct trace_array *this_tr)
515 WARN_ON(!this_tr->ref);
520 * trace_array_put - Decrement the reference counter for this trace array.
521 * @this_tr : pointer to the trace array
523 * NOTE: Use this when we no longer need the trace array returned by
524 * trace_array_get_by_name(). This ensures the trace array can be later
528 void trace_array_put(struct trace_array *this_tr)
533 mutex_lock(&trace_types_lock);
534 __trace_array_put(this_tr);
535 mutex_unlock(&trace_types_lock);
537 EXPORT_SYMBOL_GPL(trace_array_put);
539 int tracing_check_open_get_tr(struct trace_array *tr)
543 ret = security_locked_down(LOCKDOWN_TRACEFS);
547 if (tracing_disabled)
550 if (tr && trace_array_get(tr) < 0)
556 int call_filter_check_discard(struct trace_event_call *call, void *rec,
557 struct trace_buffer *buffer,
558 struct ring_buffer_event *event)
560 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
561 !filter_match_preds(call->filter, rec)) {
562 __trace_event_discard_commit(buffer, event);
570 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
571 * @filtered_pids: The list of pids to check
572 * @search_pid: The PID to find in @filtered_pids
574 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
577 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
579 return trace_pid_list_is_set(filtered_pids, search_pid);
583 * trace_ignore_this_task - should a task be ignored for tracing
584 * @filtered_pids: The list of pids to check
585 * @filtered_no_pids: The list of pids not to be traced
586 * @task: The task that should be ignored if not filtered
588 * Checks if @task should be traced or not from @filtered_pids.
589 * Returns true if @task should *NOT* be traced.
590 * Returns false if @task should be traced.
593 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
594 struct trace_pid_list *filtered_no_pids,
595 struct task_struct *task)
598 * If filtered_no_pids is not empty, and the task's pid is listed
599 * in filtered_no_pids, then return true.
600 * Otherwise, if filtered_pids is empty, that means we can
601 * trace all tasks. If it has content, then only trace pids
602 * within filtered_pids.
605 return (filtered_pids &&
606 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
608 trace_find_filtered_pid(filtered_no_pids, task->pid));
612 * trace_filter_add_remove_task - Add or remove a task from a pid_list
613 * @pid_list: The list to modify
614 * @self: The current task for fork or NULL for exit
615 * @task: The task to add or remove
617 * If adding a task, if @self is defined, the task is only added if @self
618 * is also included in @pid_list. This happens on fork and tasks should
619 * only be added when the parent is listed. If @self is NULL, then the
620 * @task pid will be removed from the list, which would happen on exit
623 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
624 struct task_struct *self,
625 struct task_struct *task)
630 /* For forks, we only add if the forking task is listed */
632 if (!trace_find_filtered_pid(pid_list, self->pid))
636 /* "self" is set for forks, and NULL for exits */
638 trace_pid_list_set(pid_list, task->pid);
640 trace_pid_list_clear(pid_list, task->pid);
644 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
645 * @pid_list: The pid list to show
646 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
647 * @pos: The position of the file
649 * This is used by the seq_file "next" operation to iterate the pids
650 * listed in a trace_pid_list structure.
652 * Returns the pid+1 as we want to display pid of zero, but NULL would
653 * stop the iteration.
655 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
657 long pid = (unsigned long)v;
662 /* pid already is +1 of the actual previous bit */
663 if (trace_pid_list_next(pid_list, pid, &next) < 0)
668 /* Return pid + 1 to allow zero to be represented */
669 return (void *)(pid + 1);
673 * trace_pid_start - Used for seq_file to start reading pid lists
674 * @pid_list: The pid list to show
675 * @pos: The position of the file
677 * This is used by seq_file "start" operation to start the iteration
680 * Returns the pid+1 as we want to display pid of zero, but NULL would
681 * stop the iteration.
683 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
689 if (trace_pid_list_first(pid_list, &first) < 0)
694 /* Return pid + 1 so that zero can be the exit value */
695 for (pid++; pid && l < *pos;
696 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
702 * trace_pid_show - show the current pid in seq_file processing
703 * @m: The seq_file structure to write into
704 * @v: A void pointer of the pid (+1) value to display
706 * Can be directly used by seq_file operations to display the current
709 int trace_pid_show(struct seq_file *m, void *v)
711 unsigned long pid = (unsigned long)v - 1;
713 seq_printf(m, "%lu\n", pid);
717 /* 128 should be much more than enough */
718 #define PID_BUF_SIZE 127
720 int trace_pid_write(struct trace_pid_list *filtered_pids,
721 struct trace_pid_list **new_pid_list,
722 const char __user *ubuf, size_t cnt)
724 struct trace_pid_list *pid_list;
725 struct trace_parser parser;
733 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
737 * Always recreate a new array. The write is an all or nothing
738 * operation. Always create a new array when adding new pids by
739 * the user. If the operation fails, then the current list is
742 pid_list = trace_pid_list_alloc();
744 trace_parser_put(&parser);
749 /* copy the current bits to the new max */
750 ret = trace_pid_list_first(filtered_pids, &pid);
752 trace_pid_list_set(pid_list, pid);
753 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
763 ret = trace_get_user(&parser, ubuf, cnt, &pos);
771 if (!trace_parser_loaded(&parser))
775 if (kstrtoul(parser.buffer, 0, &val))
780 if (trace_pid_list_set(pid_list, pid) < 0) {
786 trace_parser_clear(&parser);
789 trace_parser_put(&parser);
792 trace_pid_list_free(pid_list);
797 /* Cleared the list of pids */
798 trace_pid_list_free(pid_list);
802 *new_pid_list = pid_list;
807 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
811 /* Early boot up does not have a buffer yet */
813 return trace_clock_local();
815 ts = ring_buffer_time_stamp(buf->buffer);
816 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
821 u64 ftrace_now(int cpu)
823 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
827 * tracing_is_enabled - Show if global_trace has been enabled
829 * Shows if the global trace has been enabled or not. It uses the
830 * mirror flag "buffer_disabled" to be used in fast paths such as for
831 * the irqsoff tracer. But it may be inaccurate due to races. If you
832 * need to know the accurate state, use tracing_is_on() which is a little
833 * slower, but accurate.
835 int tracing_is_enabled(void)
838 * For quick access (irqsoff uses this in fast path), just
839 * return the mirror variable of the state of the ring buffer.
840 * It's a little racy, but we don't really care.
843 return !global_trace.buffer_disabled;
847 * trace_buf_size is the size in bytes that is allocated
848 * for a buffer. Note, the number of bytes is always rounded
851 * This number is purposely set to a low number of 16384.
852 * If the dump on oops happens, it will be much appreciated
853 * to not have to wait for all that output. Anyway this can be
854 * boot time and run time configurable.
856 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
858 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
860 /* trace_types holds a link list of available tracers. */
861 static struct tracer *trace_types __read_mostly;
864 * trace_types_lock is used to protect the trace_types list.
866 DEFINE_MUTEX(trace_types_lock);
869 * serialize the access of the ring buffer
871 * ring buffer serializes readers, but it is low level protection.
872 * The validity of the events (which returns by ring_buffer_peek() ..etc)
873 * are not protected by ring buffer.
875 * The content of events may become garbage if we allow other process consumes
876 * these events concurrently:
877 * A) the page of the consumed events may become a normal page
878 * (not reader page) in ring buffer, and this page will be rewritten
879 * by events producer.
880 * B) The page of the consumed events may become a page for splice_read,
881 * and this page will be returned to system.
883 * These primitives allow multi process access to different cpu ring buffer
886 * These primitives don't distinguish read-only and read-consume access.
887 * Multi read-only access are also serialized.
891 static DECLARE_RWSEM(all_cpu_access_lock);
892 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
894 static inline void trace_access_lock(int cpu)
896 if (cpu == RING_BUFFER_ALL_CPUS) {
897 /* gain it for accessing the whole ring buffer. */
898 down_write(&all_cpu_access_lock);
900 /* gain it for accessing a cpu ring buffer. */
902 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
903 down_read(&all_cpu_access_lock);
905 /* Secondly block other access to this @cpu ring buffer. */
906 mutex_lock(&per_cpu(cpu_access_lock, cpu));
910 static inline void trace_access_unlock(int cpu)
912 if (cpu == RING_BUFFER_ALL_CPUS) {
913 up_write(&all_cpu_access_lock);
915 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
916 up_read(&all_cpu_access_lock);
920 static inline void trace_access_lock_init(void)
924 for_each_possible_cpu(cpu)
925 mutex_init(&per_cpu(cpu_access_lock, cpu));
930 static DEFINE_MUTEX(access_lock);
932 static inline void trace_access_lock(int cpu)
935 mutex_lock(&access_lock);
938 static inline void trace_access_unlock(int cpu)
941 mutex_unlock(&access_lock);
944 static inline void trace_access_lock_init(void)
950 #ifdef CONFIG_STACKTRACE
951 static void __ftrace_trace_stack(struct trace_buffer *buffer,
952 unsigned int trace_ctx,
953 int skip, struct pt_regs *regs);
954 static inline void ftrace_trace_stack(struct trace_array *tr,
955 struct trace_buffer *buffer,
956 unsigned int trace_ctx,
957 int skip, struct pt_regs *regs);
960 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
961 unsigned int trace_ctx,
962 int skip, struct pt_regs *regs)
965 static inline void ftrace_trace_stack(struct trace_array *tr,
966 struct trace_buffer *buffer,
967 unsigned long trace_ctx,
968 int skip, struct pt_regs *regs)
974 static __always_inline void
975 trace_event_setup(struct ring_buffer_event *event,
976 int type, unsigned int trace_ctx)
978 struct trace_entry *ent = ring_buffer_event_data(event);
980 tracing_generic_entry_update(ent, type, trace_ctx);
983 static __always_inline struct ring_buffer_event *
984 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
987 unsigned int trace_ctx)
989 struct ring_buffer_event *event;
991 event = ring_buffer_lock_reserve(buffer, len);
993 trace_event_setup(event, type, trace_ctx);
998 void tracer_tracing_on(struct trace_array *tr)
1000 if (tr->array_buffer.buffer)
1001 ring_buffer_record_on(tr->array_buffer.buffer);
1003 * This flag is looked at when buffers haven't been allocated
1004 * yet, or by some tracers (like irqsoff), that just want to
1005 * know if the ring buffer has been disabled, but it can handle
1006 * races of where it gets disabled but we still do a record.
1007 * As the check is in the fast path of the tracers, it is more
1008 * important to be fast than accurate.
1010 tr->buffer_disabled = 0;
1011 /* Make the flag seen by readers */
1016 * tracing_on - enable tracing buffers
1018 * This function enables tracing buffers that may have been
1019 * disabled with tracing_off.
1021 void tracing_on(void)
1023 tracer_tracing_on(&global_trace);
1025 EXPORT_SYMBOL_GPL(tracing_on);
1028 static __always_inline void
1029 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
1031 __this_cpu_write(trace_taskinfo_save, true);
1033 /* If this is the temp buffer, we need to commit fully */
1034 if (this_cpu_read(trace_buffered_event) == event) {
1035 /* Length is in event->array[0] */
1036 ring_buffer_write(buffer, event->array[0], &event->array[1]);
1037 /* Release the temp buffer */
1038 this_cpu_dec(trace_buffered_event_cnt);
1039 /* ring_buffer_unlock_commit() enables preemption */
1040 preempt_enable_notrace();
1042 ring_buffer_unlock_commit(buffer);
1045 int __trace_array_puts(struct trace_array *tr, unsigned long ip,
1046 const char *str, int size)
1048 struct ring_buffer_event *event;
1049 struct trace_buffer *buffer;
1050 struct print_entry *entry;
1051 unsigned int trace_ctx;
1054 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1057 if (unlikely(tracing_selftest_running && tr == &global_trace))
1060 if (unlikely(tracing_disabled))
1063 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1065 trace_ctx = tracing_gen_ctx();
1066 buffer = tr->array_buffer.buffer;
1067 ring_buffer_nest_start(buffer);
1068 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1075 entry = ring_buffer_event_data(event);
1078 memcpy(&entry->buf, str, size);
1080 /* Add a newline if necessary */
1081 if (entry->buf[size - 1] != '\n') {
1082 entry->buf[size] = '\n';
1083 entry->buf[size + 1] = '\0';
1085 entry->buf[size] = '\0';
1087 __buffer_unlock_commit(buffer, event);
1088 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1090 ring_buffer_nest_end(buffer);
1093 EXPORT_SYMBOL_GPL(__trace_array_puts);
1096 * __trace_puts - write a constant string into the trace buffer.
1097 * @ip: The address of the caller
1098 * @str: The constant string to write
1099 * @size: The size of the string.
1101 int __trace_puts(unsigned long ip, const char *str, int size)
1103 return __trace_array_puts(&global_trace, ip, str, size);
1105 EXPORT_SYMBOL_GPL(__trace_puts);
1108 * __trace_bputs - write the pointer to a constant string into trace buffer
1109 * @ip: The address of the caller
1110 * @str: The constant string to write to the buffer to
1112 int __trace_bputs(unsigned long ip, const char *str)
1114 struct ring_buffer_event *event;
1115 struct trace_buffer *buffer;
1116 struct bputs_entry *entry;
1117 unsigned int trace_ctx;
1118 int size = sizeof(struct bputs_entry);
1121 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1124 if (unlikely(tracing_selftest_running || tracing_disabled))
1127 trace_ctx = tracing_gen_ctx();
1128 buffer = global_trace.array_buffer.buffer;
1130 ring_buffer_nest_start(buffer);
1131 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1136 entry = ring_buffer_event_data(event);
1140 __buffer_unlock_commit(buffer, event);
1141 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1145 ring_buffer_nest_end(buffer);
1148 EXPORT_SYMBOL_GPL(__trace_bputs);
1150 #ifdef CONFIG_TRACER_SNAPSHOT
1151 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1154 struct tracer *tracer = tr->current_trace;
1155 unsigned long flags;
1158 trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1159 trace_array_puts(tr, "*** snapshot is being ignored ***\n");
1163 if (!tr->allocated_snapshot) {
1164 trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
1165 trace_array_puts(tr, "*** stopping trace here! ***\n");
1166 tracer_tracing_off(tr);
1170 /* Note, snapshot can not be used when the tracer uses it */
1171 if (tracer->use_max_tr) {
1172 trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
1173 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1177 local_irq_save(flags);
1178 update_max_tr(tr, current, smp_processor_id(), cond_data);
1179 local_irq_restore(flags);
1182 void tracing_snapshot_instance(struct trace_array *tr)
1184 tracing_snapshot_instance_cond(tr, NULL);
1188 * tracing_snapshot - take a snapshot of the current buffer.
1190 * This causes a swap between the snapshot buffer and the current live
1191 * tracing buffer. You can use this to take snapshots of the live
1192 * trace when some condition is triggered, but continue to trace.
1194 * Note, make sure to allocate the snapshot with either
1195 * a tracing_snapshot_alloc(), or by doing it manually
1196 * with: echo 1 > /sys/kernel/tracing/snapshot
1198 * If the snapshot buffer is not allocated, it will stop tracing.
1199 * Basically making a permanent snapshot.
1201 void tracing_snapshot(void)
1203 struct trace_array *tr = &global_trace;
1205 tracing_snapshot_instance(tr);
1207 EXPORT_SYMBOL_GPL(tracing_snapshot);
1210 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1211 * @tr: The tracing instance to snapshot
1212 * @cond_data: The data to be tested conditionally, and possibly saved
1214 * This is the same as tracing_snapshot() except that the snapshot is
1215 * conditional - the snapshot will only happen if the
1216 * cond_snapshot.update() implementation receiving the cond_data
1217 * returns true, which means that the trace array's cond_snapshot
1218 * update() operation used the cond_data to determine whether the
1219 * snapshot should be taken, and if it was, presumably saved it along
1220 * with the snapshot.
1222 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1224 tracing_snapshot_instance_cond(tr, cond_data);
1226 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1229 * tracing_cond_snapshot_data - get the user data associated with a snapshot
1230 * @tr: The tracing instance
1232 * When the user enables a conditional snapshot using
1233 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1234 * with the snapshot. This accessor is used to retrieve it.
1236 * Should not be called from cond_snapshot.update(), since it takes
1237 * the tr->max_lock lock, which the code calling
1238 * cond_snapshot.update() has already done.
1240 * Returns the cond_data associated with the trace array's snapshot.
1242 void *tracing_cond_snapshot_data(struct trace_array *tr)
1244 void *cond_data = NULL;
1246 local_irq_disable();
1247 arch_spin_lock(&tr->max_lock);
1249 if (tr->cond_snapshot)
1250 cond_data = tr->cond_snapshot->cond_data;
1252 arch_spin_unlock(&tr->max_lock);
1257 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1259 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1260 struct array_buffer *size_buf, int cpu_id);
1261 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1263 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1267 if (!tr->allocated_snapshot) {
1269 /* allocate spare buffer */
1270 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1271 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1275 tr->allocated_snapshot = true;
1281 static void free_snapshot(struct trace_array *tr)
1284 * We don't free the ring buffer. instead, resize it because
1285 * The max_tr ring buffer has some state (e.g. ring->clock) and
1286 * we want preserve it.
1288 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1289 set_buffer_entries(&tr->max_buffer, 1);
1290 tracing_reset_online_cpus(&tr->max_buffer);
1291 tr->allocated_snapshot = false;
1295 * tracing_alloc_snapshot - allocate snapshot buffer.
1297 * This only allocates the snapshot buffer if it isn't already
1298 * allocated - it doesn't also take a snapshot.
1300 * This is meant to be used in cases where the snapshot buffer needs
1301 * to be set up for events that can't sleep but need to be able to
1302 * trigger a snapshot.
1304 int tracing_alloc_snapshot(void)
1306 struct trace_array *tr = &global_trace;
1309 ret = tracing_alloc_snapshot_instance(tr);
1314 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1317 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1319 * This is similar to tracing_snapshot(), but it will allocate the
1320 * snapshot buffer if it isn't already allocated. Use this only
1321 * where it is safe to sleep, as the allocation may sleep.
1323 * This causes a swap between the snapshot buffer and the current live
1324 * tracing buffer. You can use this to take snapshots of the live
1325 * trace when some condition is triggered, but continue to trace.
1327 void tracing_snapshot_alloc(void)
1331 ret = tracing_alloc_snapshot();
1337 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1340 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1341 * @tr: The tracing instance
1342 * @cond_data: User data to associate with the snapshot
1343 * @update: Implementation of the cond_snapshot update function
1345 * Check whether the conditional snapshot for the given instance has
1346 * already been enabled, or if the current tracer is already using a
1347 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1348 * save the cond_data and update function inside.
1350 * Returns 0 if successful, error otherwise.
1352 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1353 cond_update_fn_t update)
1355 struct cond_snapshot *cond_snapshot;
1358 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1362 cond_snapshot->cond_data = cond_data;
1363 cond_snapshot->update = update;
1365 mutex_lock(&trace_types_lock);
1367 ret = tracing_alloc_snapshot_instance(tr);
1371 if (tr->current_trace->use_max_tr) {
1377 * The cond_snapshot can only change to NULL without the
1378 * trace_types_lock. We don't care if we race with it going
1379 * to NULL, but we want to make sure that it's not set to
1380 * something other than NULL when we get here, which we can
1381 * do safely with only holding the trace_types_lock and not
1382 * having to take the max_lock.
1384 if (tr->cond_snapshot) {
1389 local_irq_disable();
1390 arch_spin_lock(&tr->max_lock);
1391 tr->cond_snapshot = cond_snapshot;
1392 arch_spin_unlock(&tr->max_lock);
1395 mutex_unlock(&trace_types_lock);
1400 mutex_unlock(&trace_types_lock);
1401 kfree(cond_snapshot);
1404 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1407 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1408 * @tr: The tracing instance
1410 * Check whether the conditional snapshot for the given instance is
1411 * enabled; if so, free the cond_snapshot associated with it,
1412 * otherwise return -EINVAL.
1414 * Returns 0 if successful, error otherwise.
1416 int tracing_snapshot_cond_disable(struct trace_array *tr)
1420 local_irq_disable();
1421 arch_spin_lock(&tr->max_lock);
1423 if (!tr->cond_snapshot)
1426 kfree(tr->cond_snapshot);
1427 tr->cond_snapshot = NULL;
1430 arch_spin_unlock(&tr->max_lock);
1435 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1437 void tracing_snapshot(void)
1439 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1441 EXPORT_SYMBOL_GPL(tracing_snapshot);
1442 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1444 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1446 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1447 int tracing_alloc_snapshot(void)
1449 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1452 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1453 void tracing_snapshot_alloc(void)
1458 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1459 void *tracing_cond_snapshot_data(struct trace_array *tr)
1463 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1464 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1468 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1469 int tracing_snapshot_cond_disable(struct trace_array *tr)
1473 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1474 #define free_snapshot(tr) do { } while (0)
1475 #endif /* CONFIG_TRACER_SNAPSHOT */
1477 void tracer_tracing_off(struct trace_array *tr)
1479 if (tr->array_buffer.buffer)
1480 ring_buffer_record_off(tr->array_buffer.buffer);
1482 * This flag is looked at when buffers haven't been allocated
1483 * yet, or by some tracers (like irqsoff), that just want to
1484 * know if the ring buffer has been disabled, but it can handle
1485 * races of where it gets disabled but we still do a record.
1486 * As the check is in the fast path of the tracers, it is more
1487 * important to be fast than accurate.
1489 tr->buffer_disabled = 1;
1490 /* Make the flag seen by readers */
1495 * tracing_off - turn off tracing buffers
1497 * This function stops the tracing buffers from recording data.
1498 * It does not disable any overhead the tracers themselves may
1499 * be causing. This function simply causes all recording to
1500 * the ring buffers to fail.
1502 void tracing_off(void)
1504 tracer_tracing_off(&global_trace);
1506 EXPORT_SYMBOL_GPL(tracing_off);
1508 void disable_trace_on_warning(void)
1510 if (__disable_trace_on_warning) {
1511 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1512 "Disabling tracing due to warning\n");
1518 * tracer_tracing_is_on - show real state of ring buffer enabled
1519 * @tr : the trace array to know if ring buffer is enabled
1521 * Shows real state of the ring buffer if it is enabled or not.
1523 bool tracer_tracing_is_on(struct trace_array *tr)
1525 if (tr->array_buffer.buffer)
1526 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1527 return !tr->buffer_disabled;
1531 * tracing_is_on - show state of ring buffers enabled
1533 int tracing_is_on(void)
1535 return tracer_tracing_is_on(&global_trace);
1537 EXPORT_SYMBOL_GPL(tracing_is_on);
1539 static int __init set_buf_size(char *str)
1541 unsigned long buf_size;
1545 buf_size = memparse(str, &str);
1547 * nr_entries can not be zero and the startup
1548 * tests require some buffer space. Therefore
1549 * ensure we have at least 4096 bytes of buffer.
1551 trace_buf_size = max(4096UL, buf_size);
1554 __setup("trace_buf_size=", set_buf_size);
1556 static int __init set_tracing_thresh(char *str)
1558 unsigned long threshold;
1563 ret = kstrtoul(str, 0, &threshold);
1566 tracing_thresh = threshold * 1000;
1569 __setup("tracing_thresh=", set_tracing_thresh);
1571 unsigned long nsecs_to_usecs(unsigned long nsecs)
1573 return nsecs / 1000;
1577 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1578 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1579 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1580 * of strings in the order that the evals (enum) were defined.
1585 /* These must match the bit positions in trace_iterator_flags */
1586 static const char *trace_options[] = {
1594 int in_ns; /* is this clock in nanoseconds? */
1595 } trace_clocks[] = {
1596 { trace_clock_local, "local", 1 },
1597 { trace_clock_global, "global", 1 },
1598 { trace_clock_counter, "counter", 0 },
1599 { trace_clock_jiffies, "uptime", 0 },
1600 { trace_clock, "perf", 1 },
1601 { ktime_get_mono_fast_ns, "mono", 1 },
1602 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1603 { ktime_get_boot_fast_ns, "boot", 1 },
1604 { ktime_get_tai_fast_ns, "tai", 1 },
1608 bool trace_clock_in_ns(struct trace_array *tr)
1610 if (trace_clocks[tr->clock_id].in_ns)
1617 * trace_parser_get_init - gets the buffer for trace parser
1619 int trace_parser_get_init(struct trace_parser *parser, int size)
1621 memset(parser, 0, sizeof(*parser));
1623 parser->buffer = kmalloc(size, GFP_KERNEL);
1624 if (!parser->buffer)
1627 parser->size = size;
1632 * trace_parser_put - frees the buffer for trace parser
1634 void trace_parser_put(struct trace_parser *parser)
1636 kfree(parser->buffer);
1637 parser->buffer = NULL;
1641 * trace_get_user - reads the user input string separated by space
1642 * (matched by isspace(ch))
1644 * For each string found the 'struct trace_parser' is updated,
1645 * and the function returns.
1647 * Returns number of bytes read.
1649 * See kernel/trace/trace.h for 'struct trace_parser' details.
1651 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1652 size_t cnt, loff_t *ppos)
1659 trace_parser_clear(parser);
1661 ret = get_user(ch, ubuf++);
1669 * The parser is not finished with the last write,
1670 * continue reading the user input without skipping spaces.
1672 if (!parser->cont) {
1673 /* skip white space */
1674 while (cnt && isspace(ch)) {
1675 ret = get_user(ch, ubuf++);
1684 /* only spaces were written */
1685 if (isspace(ch) || !ch) {
1692 /* read the non-space input */
1693 while (cnt && !isspace(ch) && ch) {
1694 if (parser->idx < parser->size - 1)
1695 parser->buffer[parser->idx++] = ch;
1700 ret = get_user(ch, ubuf++);
1707 /* We either got finished input or we have to wait for another call. */
1708 if (isspace(ch) || !ch) {
1709 parser->buffer[parser->idx] = 0;
1710 parser->cont = false;
1711 } else if (parser->idx < parser->size - 1) {
1712 parser->cont = true;
1713 parser->buffer[parser->idx++] = ch;
1714 /* Make sure the parsed string always terminates with '\0'. */
1715 parser->buffer[parser->idx] = 0;
1728 /* TODO add a seq_buf_to_buffer() */
1729 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1733 if (trace_seq_used(s) <= s->seq.readpos)
1736 len = trace_seq_used(s) - s->seq.readpos;
1739 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1741 s->seq.readpos += cnt;
1745 unsigned long __read_mostly tracing_thresh;
1747 #ifdef CONFIG_TRACER_MAX_TRACE
1748 static const struct file_operations tracing_max_lat_fops;
1750 #ifdef LATENCY_FS_NOTIFY
1752 static struct workqueue_struct *fsnotify_wq;
1754 static void latency_fsnotify_workfn(struct work_struct *work)
1756 struct trace_array *tr = container_of(work, struct trace_array,
1758 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1761 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1763 struct trace_array *tr = container_of(iwork, struct trace_array,
1765 queue_work(fsnotify_wq, &tr->fsnotify_work);
1768 static void trace_create_maxlat_file(struct trace_array *tr,
1769 struct dentry *d_tracer)
1771 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1772 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1773 tr->d_max_latency = trace_create_file("tracing_max_latency",
1776 &tracing_max_lat_fops);
1779 __init static int latency_fsnotify_init(void)
1781 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1782 WQ_UNBOUND | WQ_HIGHPRI, 0);
1784 pr_err("Unable to allocate tr_max_lat_wq\n");
1790 late_initcall_sync(latency_fsnotify_init);
1792 void latency_fsnotify(struct trace_array *tr)
1797 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1798 * possible that we are called from __schedule() or do_idle(), which
1799 * could cause a deadlock.
1801 irq_work_queue(&tr->fsnotify_irqwork);
1804 #else /* !LATENCY_FS_NOTIFY */
1806 #define trace_create_maxlat_file(tr, d_tracer) \
1807 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1808 d_tracer, tr, &tracing_max_lat_fops)
1813 * Copy the new maximum trace into the separate maximum-trace
1814 * structure. (this way the maximum trace is permanently saved,
1815 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1818 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1820 struct array_buffer *trace_buf = &tr->array_buffer;
1821 struct array_buffer *max_buf = &tr->max_buffer;
1822 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1823 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1826 max_buf->time_start = data->preempt_timestamp;
1828 max_data->saved_latency = tr->max_latency;
1829 max_data->critical_start = data->critical_start;
1830 max_data->critical_end = data->critical_end;
1832 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1833 max_data->pid = tsk->pid;
1835 * If tsk == current, then use current_uid(), as that does not use
1836 * RCU. The irq tracer can be called out of RCU scope.
1839 max_data->uid = current_uid();
1841 max_data->uid = task_uid(tsk);
1843 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1844 max_data->policy = tsk->policy;
1845 max_data->rt_priority = tsk->rt_priority;
1847 /* record this tasks comm */
1848 tracing_record_cmdline(tsk);
1849 latency_fsnotify(tr);
1853 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1855 * @tsk: the task with the latency
1856 * @cpu: The cpu that initiated the trace.
1857 * @cond_data: User data associated with a conditional snapshot
1859 * Flip the buffers between the @tr and the max_tr and record information
1860 * about which task was the cause of this latency.
1863 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1869 WARN_ON_ONCE(!irqs_disabled());
1871 if (!tr->allocated_snapshot) {
1872 /* Only the nop tracer should hit this when disabling */
1873 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1877 arch_spin_lock(&tr->max_lock);
1879 /* Inherit the recordable setting from array_buffer */
1880 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1881 ring_buffer_record_on(tr->max_buffer.buffer);
1883 ring_buffer_record_off(tr->max_buffer.buffer);
1885 #ifdef CONFIG_TRACER_SNAPSHOT
1886 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1887 arch_spin_unlock(&tr->max_lock);
1891 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1893 __update_max_tr(tr, tsk, cpu);
1895 arch_spin_unlock(&tr->max_lock);
1899 * update_max_tr_single - only copy one trace over, and reset the rest
1901 * @tsk: task with the latency
1902 * @cpu: the cpu of the buffer to copy.
1904 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1907 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1914 WARN_ON_ONCE(!irqs_disabled());
1915 if (!tr->allocated_snapshot) {
1916 /* Only the nop tracer should hit this when disabling */
1917 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1921 arch_spin_lock(&tr->max_lock);
1923 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1925 if (ret == -EBUSY) {
1927 * We failed to swap the buffer due to a commit taking
1928 * place on this CPU. We fail to record, but we reset
1929 * the max trace buffer (no one writes directly to it)
1930 * and flag that it failed.
1931 * Another reason is resize is in progress.
1933 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1934 "Failed to swap buffers due to commit or resize in progress\n");
1937 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1939 __update_max_tr(tr, tsk, cpu);
1940 arch_spin_unlock(&tr->max_lock);
1943 #endif /* CONFIG_TRACER_MAX_TRACE */
1945 static int wait_on_pipe(struct trace_iterator *iter, int full)
1947 /* Iterators are static, they should be filled or empty */
1948 if (trace_buffer_iter(iter, iter->cpu_file))
1951 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1955 #ifdef CONFIG_FTRACE_STARTUP_TEST
1956 static bool selftests_can_run;
1958 struct trace_selftests {
1959 struct list_head list;
1960 struct tracer *type;
1963 static LIST_HEAD(postponed_selftests);
1965 static int save_selftest(struct tracer *type)
1967 struct trace_selftests *selftest;
1969 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1973 selftest->type = type;
1974 list_add(&selftest->list, &postponed_selftests);
1978 static int run_tracer_selftest(struct tracer *type)
1980 struct trace_array *tr = &global_trace;
1981 struct tracer *saved_tracer = tr->current_trace;
1984 if (!type->selftest || tracing_selftest_disabled)
1988 * If a tracer registers early in boot up (before scheduling is
1989 * initialized and such), then do not run its selftests yet.
1990 * Instead, run it a little later in the boot process.
1992 if (!selftests_can_run)
1993 return save_selftest(type);
1995 if (!tracing_is_on()) {
1996 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
2002 * Run a selftest on this tracer.
2003 * Here we reset the trace buffer, and set the current
2004 * tracer to be this tracer. The tracer can then run some
2005 * internal tracing to verify that everything is in order.
2006 * If we fail, we do not register this tracer.
2008 tracing_reset_online_cpus(&tr->array_buffer);
2010 tr->current_trace = type;
2012 #ifdef CONFIG_TRACER_MAX_TRACE
2013 if (type->use_max_tr) {
2014 /* If we expanded the buffers, make sure the max is expanded too */
2015 if (ring_buffer_expanded)
2016 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
2017 RING_BUFFER_ALL_CPUS);
2018 tr->allocated_snapshot = true;
2022 /* the test is responsible for initializing and enabling */
2023 pr_info("Testing tracer %s: ", type->name);
2024 ret = type->selftest(type, tr);
2025 /* the test is responsible for resetting too */
2026 tr->current_trace = saved_tracer;
2028 printk(KERN_CONT "FAILED!\n");
2029 /* Add the warning after printing 'FAILED' */
2033 /* Only reset on passing, to avoid touching corrupted buffers */
2034 tracing_reset_online_cpus(&tr->array_buffer);
2036 #ifdef CONFIG_TRACER_MAX_TRACE
2037 if (type->use_max_tr) {
2038 tr->allocated_snapshot = false;
2040 /* Shrink the max buffer again */
2041 if (ring_buffer_expanded)
2042 ring_buffer_resize(tr->max_buffer.buffer, 1,
2043 RING_BUFFER_ALL_CPUS);
2047 printk(KERN_CONT "PASSED\n");
2051 static int do_run_tracer_selftest(struct tracer *type)
2056 * Tests can take a long time, especially if they are run one after the
2057 * other, as does happen during bootup when all the tracers are
2058 * registered. This could cause the soft lockup watchdog to trigger.
2062 tracing_selftest_running = true;
2063 ret = run_tracer_selftest(type);
2064 tracing_selftest_running = false;
2069 static __init int init_trace_selftests(void)
2071 struct trace_selftests *p, *n;
2072 struct tracer *t, **last;
2075 selftests_can_run = true;
2077 mutex_lock(&trace_types_lock);
2079 if (list_empty(&postponed_selftests))
2082 pr_info("Running postponed tracer tests:\n");
2084 tracing_selftest_running = true;
2085 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2086 /* This loop can take minutes when sanitizers are enabled, so
2087 * lets make sure we allow RCU processing.
2090 ret = run_tracer_selftest(p->type);
2091 /* If the test fails, then warn and remove from available_tracers */
2093 WARN(1, "tracer: %s failed selftest, disabling\n",
2095 last = &trace_types;
2096 for (t = trace_types; t; t = t->next) {
2107 tracing_selftest_running = false;
2110 mutex_unlock(&trace_types_lock);
2114 core_initcall(init_trace_selftests);
2116 static inline int run_tracer_selftest(struct tracer *type)
2120 static inline int do_run_tracer_selftest(struct tracer *type)
2124 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2126 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2128 static void __init apply_trace_boot_options(void);
2131 * register_tracer - register a tracer with the ftrace system.
2132 * @type: the plugin for the tracer
2134 * Register a new plugin tracer.
2136 int __init register_tracer(struct tracer *type)
2142 pr_info("Tracer must have a name\n");
2146 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2147 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2151 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2152 pr_warn("Can not register tracer %s due to lockdown\n",
2157 mutex_lock(&trace_types_lock);
2159 for (t = trace_types; t; t = t->next) {
2160 if (strcmp(type->name, t->name) == 0) {
2162 pr_info("Tracer %s already registered\n",
2169 if (!type->set_flag)
2170 type->set_flag = &dummy_set_flag;
2172 /*allocate a dummy tracer_flags*/
2173 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2178 type->flags->val = 0;
2179 type->flags->opts = dummy_tracer_opt;
2181 if (!type->flags->opts)
2182 type->flags->opts = dummy_tracer_opt;
2184 /* store the tracer for __set_tracer_option */
2185 type->flags->trace = type;
2187 ret = do_run_tracer_selftest(type);
2191 type->next = trace_types;
2193 add_tracer_options(&global_trace, type);
2196 mutex_unlock(&trace_types_lock);
2198 if (ret || !default_bootup_tracer)
2201 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2204 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2205 /* Do we want this tracer to start on bootup? */
2206 tracing_set_tracer(&global_trace, type->name);
2207 default_bootup_tracer = NULL;
2209 apply_trace_boot_options();
2211 /* disable other selftests, since this will break it. */
2212 disable_tracing_selftest("running a tracer");
2218 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2220 struct trace_buffer *buffer = buf->buffer;
2225 ring_buffer_record_disable(buffer);
2227 /* Make sure all commits have finished */
2229 ring_buffer_reset_cpu(buffer, cpu);
2231 ring_buffer_record_enable(buffer);
2234 void tracing_reset_online_cpus(struct array_buffer *buf)
2236 struct trace_buffer *buffer = buf->buffer;
2241 ring_buffer_record_disable(buffer);
2243 /* Make sure all commits have finished */
2246 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2248 ring_buffer_reset_online_cpus(buffer);
2250 ring_buffer_record_enable(buffer);
2253 /* Must have trace_types_lock held */
2254 void tracing_reset_all_online_cpus_unlocked(void)
2256 struct trace_array *tr;
2258 lockdep_assert_held(&trace_types_lock);
2260 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2261 if (!tr->clear_trace)
2263 tr->clear_trace = false;
2264 tracing_reset_online_cpus(&tr->array_buffer);
2265 #ifdef CONFIG_TRACER_MAX_TRACE
2266 tracing_reset_online_cpus(&tr->max_buffer);
2271 void tracing_reset_all_online_cpus(void)
2273 mutex_lock(&trace_types_lock);
2274 tracing_reset_all_online_cpus_unlocked();
2275 mutex_unlock(&trace_types_lock);
2279 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2280 * is the tgid last observed corresponding to pid=i.
2282 static int *tgid_map;
2284 /* The maximum valid index into tgid_map. */
2285 static size_t tgid_map_max;
2287 #define SAVED_CMDLINES_DEFAULT 128
2288 #define NO_CMDLINE_MAP UINT_MAX
2290 * Preemption must be disabled before acquiring trace_cmdline_lock.
2291 * The various trace_arrays' max_lock must be acquired in a context
2292 * where interrupt is disabled.
2294 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2295 struct saved_cmdlines_buffer {
2296 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2297 unsigned *map_cmdline_to_pid;
2298 unsigned cmdline_num;
2300 char *saved_cmdlines;
2302 static struct saved_cmdlines_buffer *savedcmd;
2304 static inline char *get_saved_cmdlines(int idx)
2306 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2309 static inline void set_cmdline(int idx, const char *cmdline)
2311 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2314 static int allocate_cmdlines_buffer(unsigned int val,
2315 struct saved_cmdlines_buffer *s)
2317 s->map_cmdline_to_pid = kmalloc_array(val,
2318 sizeof(*s->map_cmdline_to_pid),
2320 if (!s->map_cmdline_to_pid)
2323 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2324 if (!s->saved_cmdlines) {
2325 kfree(s->map_cmdline_to_pid);
2330 s->cmdline_num = val;
2331 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2332 sizeof(s->map_pid_to_cmdline));
2333 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2334 val * sizeof(*s->map_cmdline_to_pid));
2339 static int trace_create_savedcmd(void)
2343 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2347 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2357 int is_tracing_stopped(void)
2359 return global_trace.stop_count;
2362 static void tracing_start_tr(struct trace_array *tr)
2364 struct trace_buffer *buffer;
2365 unsigned long flags;
2367 if (tracing_disabled)
2370 raw_spin_lock_irqsave(&tr->start_lock, flags);
2371 if (--tr->stop_count) {
2372 if (WARN_ON_ONCE(tr->stop_count < 0)) {
2373 /* Someone screwed up their debugging */
2379 /* Prevent the buffers from switching */
2380 arch_spin_lock(&tr->max_lock);
2382 buffer = tr->array_buffer.buffer;
2384 ring_buffer_record_enable(buffer);
2386 #ifdef CONFIG_TRACER_MAX_TRACE
2387 buffer = tr->max_buffer.buffer;
2389 ring_buffer_record_enable(buffer);
2392 arch_spin_unlock(&tr->max_lock);
2395 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2399 * tracing_start - quick start of the tracer
2401 * If tracing is enabled but was stopped by tracing_stop,
2402 * this will start the tracer back up.
2404 void tracing_start(void)
2407 return tracing_start_tr(&global_trace);
2410 static void tracing_stop_tr(struct trace_array *tr)
2412 struct trace_buffer *buffer;
2413 unsigned long flags;
2415 raw_spin_lock_irqsave(&tr->start_lock, flags);
2416 if (tr->stop_count++)
2419 /* Prevent the buffers from switching */
2420 arch_spin_lock(&tr->max_lock);
2422 buffer = tr->array_buffer.buffer;
2424 ring_buffer_record_disable(buffer);
2426 #ifdef CONFIG_TRACER_MAX_TRACE
2427 buffer = tr->max_buffer.buffer;
2429 ring_buffer_record_disable(buffer);
2432 arch_spin_unlock(&tr->max_lock);
2435 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2439 * tracing_stop - quick stop of the tracer
2441 * Light weight way to stop tracing. Use in conjunction with
2444 void tracing_stop(void)
2446 return tracing_stop_tr(&global_trace);
2449 static int trace_save_cmdline(struct task_struct *tsk)
2453 /* treat recording of idle task as a success */
2457 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2460 * It's not the end of the world if we don't get
2461 * the lock, but we also don't want to spin
2462 * nor do we want to disable interrupts,
2463 * so if we miss here, then better luck next time.
2465 * This is called within the scheduler and wake up, so interrupts
2466 * had better been disabled and run queue lock been held.
2468 lockdep_assert_preemption_disabled();
2469 if (!arch_spin_trylock(&trace_cmdline_lock))
2472 idx = savedcmd->map_pid_to_cmdline[tpid];
2473 if (idx == NO_CMDLINE_MAP) {
2474 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2476 savedcmd->map_pid_to_cmdline[tpid] = idx;
2477 savedcmd->cmdline_idx = idx;
2480 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2481 set_cmdline(idx, tsk->comm);
2483 arch_spin_unlock(&trace_cmdline_lock);
2488 static void __trace_find_cmdline(int pid, char comm[])
2494 strcpy(comm, "<idle>");
2498 if (WARN_ON_ONCE(pid < 0)) {
2499 strcpy(comm, "<XXX>");
2503 tpid = pid & (PID_MAX_DEFAULT - 1);
2504 map = savedcmd->map_pid_to_cmdline[tpid];
2505 if (map != NO_CMDLINE_MAP) {
2506 tpid = savedcmd->map_cmdline_to_pid[map];
2508 strscpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2512 strcpy(comm, "<...>");
2515 void trace_find_cmdline(int pid, char comm[])
2518 arch_spin_lock(&trace_cmdline_lock);
2520 __trace_find_cmdline(pid, comm);
2522 arch_spin_unlock(&trace_cmdline_lock);
2526 static int *trace_find_tgid_ptr(int pid)
2529 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2530 * if we observe a non-NULL tgid_map then we also observe the correct
2533 int *map = smp_load_acquire(&tgid_map);
2535 if (unlikely(!map || pid > tgid_map_max))
2541 int trace_find_tgid(int pid)
2543 int *ptr = trace_find_tgid_ptr(pid);
2545 return ptr ? *ptr : 0;
2548 static int trace_save_tgid(struct task_struct *tsk)
2552 /* treat recording of idle task as a success */
2556 ptr = trace_find_tgid_ptr(tsk->pid);
2564 static bool tracing_record_taskinfo_skip(int flags)
2566 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2568 if (!__this_cpu_read(trace_taskinfo_save))
2574 * tracing_record_taskinfo - record the task info of a task
2576 * @task: task to record
2577 * @flags: TRACE_RECORD_CMDLINE for recording comm
2578 * TRACE_RECORD_TGID for recording tgid
2580 void tracing_record_taskinfo(struct task_struct *task, int flags)
2584 if (tracing_record_taskinfo_skip(flags))
2588 * Record as much task information as possible. If some fail, continue
2589 * to try to record the others.
2591 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2592 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2594 /* If recording any information failed, retry again soon. */
2598 __this_cpu_write(trace_taskinfo_save, false);
2602 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2604 * @prev: previous task during sched_switch
2605 * @next: next task during sched_switch
2606 * @flags: TRACE_RECORD_CMDLINE for recording comm
2607 * TRACE_RECORD_TGID for recording tgid
2609 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2610 struct task_struct *next, int flags)
2614 if (tracing_record_taskinfo_skip(flags))
2618 * Record as much task information as possible. If some fail, continue
2619 * to try to record the others.
2621 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2622 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2623 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2624 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2626 /* If recording any information failed, retry again soon. */
2630 __this_cpu_write(trace_taskinfo_save, false);
2633 /* Helpers to record a specific task information */
2634 void tracing_record_cmdline(struct task_struct *task)
2636 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2639 void tracing_record_tgid(struct task_struct *task)
2641 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2645 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2646 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2647 * simplifies those functions and keeps them in sync.
2649 enum print_line_t trace_handle_return(struct trace_seq *s)
2651 return trace_seq_has_overflowed(s) ?
2652 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2654 EXPORT_SYMBOL_GPL(trace_handle_return);
2656 static unsigned short migration_disable_value(void)
2658 #if defined(CONFIG_SMP)
2659 return current->migration_disabled;
2665 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2667 unsigned int trace_flags = irqs_status;
2670 pc = preempt_count();
2673 trace_flags |= TRACE_FLAG_NMI;
2674 if (pc & HARDIRQ_MASK)
2675 trace_flags |= TRACE_FLAG_HARDIRQ;
2676 if (in_serving_softirq())
2677 trace_flags |= TRACE_FLAG_SOFTIRQ;
2678 if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2679 trace_flags |= TRACE_FLAG_BH_OFF;
2681 if (tif_need_resched())
2682 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2683 if (test_preempt_need_resched())
2684 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2685 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2686 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2689 struct ring_buffer_event *
2690 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2693 unsigned int trace_ctx)
2695 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2698 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2699 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2700 static int trace_buffered_event_ref;
2703 * trace_buffered_event_enable - enable buffering events
2705 * When events are being filtered, it is quicker to use a temporary
2706 * buffer to write the event data into if there's a likely chance
2707 * that it will not be committed. The discard of the ring buffer
2708 * is not as fast as committing, and is much slower than copying
2711 * When an event is to be filtered, allocate per cpu buffers to
2712 * write the event data into, and if the event is filtered and discarded
2713 * it is simply dropped, otherwise, the entire data is to be committed
2716 void trace_buffered_event_enable(void)
2718 struct ring_buffer_event *event;
2722 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2724 if (trace_buffered_event_ref++)
2727 for_each_tracing_cpu(cpu) {
2728 page = alloc_pages_node(cpu_to_node(cpu),
2729 GFP_KERNEL | __GFP_NORETRY, 0);
2730 /* This is just an optimization and can handle failures */
2732 pr_err("Failed to allocate event buffer\n");
2736 event = page_address(page);
2737 memset(event, 0, sizeof(*event));
2739 per_cpu(trace_buffered_event, cpu) = event;
2742 if (cpu == smp_processor_id() &&
2743 __this_cpu_read(trace_buffered_event) !=
2744 per_cpu(trace_buffered_event, cpu))
2750 static void enable_trace_buffered_event(void *data)
2752 /* Probably not needed, but do it anyway */
2754 this_cpu_dec(trace_buffered_event_cnt);
2757 static void disable_trace_buffered_event(void *data)
2759 this_cpu_inc(trace_buffered_event_cnt);
2763 * trace_buffered_event_disable - disable buffering events
2765 * When a filter is removed, it is faster to not use the buffered
2766 * events, and to commit directly into the ring buffer. Free up
2767 * the temp buffers when there are no more users. This requires
2768 * special synchronization with current events.
2770 void trace_buffered_event_disable(void)
2774 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2776 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2779 if (--trace_buffered_event_ref)
2782 /* For each CPU, set the buffer as used. */
2783 on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
2786 /* Wait for all current users to finish */
2789 for_each_tracing_cpu(cpu) {
2790 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2791 per_cpu(trace_buffered_event, cpu) = NULL;
2795 * Wait for all CPUs that potentially started checking if they can use
2796 * their event buffer only after the previous synchronize_rcu() call and
2797 * they still read a valid pointer from trace_buffered_event. It must be
2798 * ensured they don't see cleared trace_buffered_event_cnt else they
2799 * could wrongly decide to use the pointed-to buffer which is now freed.
2803 /* For each CPU, relinquish the buffer */
2804 on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
2808 static struct trace_buffer *temp_buffer;
2810 struct ring_buffer_event *
2811 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2812 struct trace_event_file *trace_file,
2813 int type, unsigned long len,
2814 unsigned int trace_ctx)
2816 struct ring_buffer_event *entry;
2817 struct trace_array *tr = trace_file->tr;
2820 *current_rb = tr->array_buffer.buffer;
2822 if (!tr->no_filter_buffering_ref &&
2823 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2824 preempt_disable_notrace();
2826 * Filtering is on, so try to use the per cpu buffer first.
2827 * This buffer will simulate a ring_buffer_event,
2828 * where the type_len is zero and the array[0] will
2829 * hold the full length.
2830 * (see include/linux/ring-buffer.h for details on
2831 * how the ring_buffer_event is structured).
2833 * Using a temp buffer during filtering and copying it
2834 * on a matched filter is quicker than writing directly
2835 * into the ring buffer and then discarding it when
2836 * it doesn't match. That is because the discard
2837 * requires several atomic operations to get right.
2838 * Copying on match and doing nothing on a failed match
2839 * is still quicker than no copy on match, but having
2840 * to discard out of the ring buffer on a failed match.
2842 if ((entry = __this_cpu_read(trace_buffered_event))) {
2843 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2845 val = this_cpu_inc_return(trace_buffered_event_cnt);
2848 * Preemption is disabled, but interrupts and NMIs
2849 * can still come in now. If that happens after
2850 * the above increment, then it will have to go
2851 * back to the old method of allocating the event
2852 * on the ring buffer, and if the filter fails, it
2853 * will have to call ring_buffer_discard_commit()
2856 * Need to also check the unlikely case that the
2857 * length is bigger than the temp buffer size.
2858 * If that happens, then the reserve is pretty much
2859 * guaranteed to fail, as the ring buffer currently
2860 * only allows events less than a page. But that may
2861 * change in the future, so let the ring buffer reserve
2862 * handle the failure in that case.
2864 if (val == 1 && likely(len <= max_len)) {
2865 trace_event_setup(entry, type, trace_ctx);
2866 entry->array[0] = len;
2867 /* Return with preemption disabled */
2870 this_cpu_dec(trace_buffered_event_cnt);
2872 /* __trace_buffer_lock_reserve() disables preemption */
2873 preempt_enable_notrace();
2876 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2879 * If tracing is off, but we have triggers enabled
2880 * we still need to look at the event data. Use the temp_buffer
2881 * to store the trace event for the trigger to use. It's recursive
2882 * safe and will not be recorded anywhere.
2884 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2885 *current_rb = temp_buffer;
2886 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2891 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2893 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2894 static DEFINE_MUTEX(tracepoint_printk_mutex);
2896 static void output_printk(struct trace_event_buffer *fbuffer)
2898 struct trace_event_call *event_call;
2899 struct trace_event_file *file;
2900 struct trace_event *event;
2901 unsigned long flags;
2902 struct trace_iterator *iter = tracepoint_print_iter;
2904 /* We should never get here if iter is NULL */
2905 if (WARN_ON_ONCE(!iter))
2908 event_call = fbuffer->trace_file->event_call;
2909 if (!event_call || !event_call->event.funcs ||
2910 !event_call->event.funcs->trace)
2913 file = fbuffer->trace_file;
2914 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2915 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2916 !filter_match_preds(file->filter, fbuffer->entry)))
2919 event = &fbuffer->trace_file->event_call->event;
2921 raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2922 trace_seq_init(&iter->seq);
2923 iter->ent = fbuffer->entry;
2924 event_call->event.funcs->trace(iter, 0, event);
2925 trace_seq_putc(&iter->seq, 0);
2926 printk("%s", iter->seq.buffer);
2928 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2931 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2932 void *buffer, size_t *lenp,
2935 int save_tracepoint_printk;
2938 mutex_lock(&tracepoint_printk_mutex);
2939 save_tracepoint_printk = tracepoint_printk;
2941 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2944 * This will force exiting early, as tracepoint_printk
2945 * is always zero when tracepoint_printk_iter is not allocated
2947 if (!tracepoint_print_iter)
2948 tracepoint_printk = 0;
2950 if (save_tracepoint_printk == tracepoint_printk)
2953 if (tracepoint_printk)
2954 static_key_enable(&tracepoint_printk_key.key);
2956 static_key_disable(&tracepoint_printk_key.key);
2959 mutex_unlock(&tracepoint_printk_mutex);
2964 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2966 enum event_trigger_type tt = ETT_NONE;
2967 struct trace_event_file *file = fbuffer->trace_file;
2969 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2970 fbuffer->entry, &tt))
2973 if (static_key_false(&tracepoint_printk_key.key))
2974 output_printk(fbuffer);
2976 if (static_branch_unlikely(&trace_event_exports_enabled))
2977 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2979 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2980 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2984 event_triggers_post_call(file, tt);
2987 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2992 * trace_buffer_unlock_commit_regs()
2993 * trace_event_buffer_commit()
2994 * trace_event_raw_event_xxx()
2996 # define STACK_SKIP 3
2998 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2999 struct trace_buffer *buffer,
3000 struct ring_buffer_event *event,
3001 unsigned int trace_ctx,
3002 struct pt_regs *regs)
3004 __buffer_unlock_commit(buffer, event);
3007 * If regs is not set, then skip the necessary functions.
3008 * Note, we can still get here via blktrace, wakeup tracer
3009 * and mmiotrace, but that's ok if they lose a function or
3010 * two. They are not that meaningful.
3012 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
3013 ftrace_trace_userstack(tr, buffer, trace_ctx);
3017 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
3020 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
3021 struct ring_buffer_event *event)
3023 __buffer_unlock_commit(buffer, event);
3027 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
3028 parent_ip, unsigned int trace_ctx)
3030 struct trace_event_call *call = &event_function;
3031 struct trace_buffer *buffer = tr->array_buffer.buffer;
3032 struct ring_buffer_event *event;
3033 struct ftrace_entry *entry;
3035 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
3039 entry = ring_buffer_event_data(event);
3041 entry->parent_ip = parent_ip;
3043 if (!call_filter_check_discard(call, entry, buffer, event)) {
3044 if (static_branch_unlikely(&trace_function_exports_enabled))
3045 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
3046 __buffer_unlock_commit(buffer, event);
3050 #ifdef CONFIG_STACKTRACE
3052 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
3053 #define FTRACE_KSTACK_NESTING 4
3055 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
3057 struct ftrace_stack {
3058 unsigned long calls[FTRACE_KSTACK_ENTRIES];
3062 struct ftrace_stacks {
3063 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
3066 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
3067 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
3069 static void __ftrace_trace_stack(struct trace_buffer *buffer,
3070 unsigned int trace_ctx,
3071 int skip, struct pt_regs *regs)
3073 struct trace_event_call *call = &event_kernel_stack;
3074 struct ring_buffer_event *event;
3075 unsigned int size, nr_entries;
3076 struct ftrace_stack *fstack;
3077 struct stack_entry *entry;
3081 * Add one, for this function and the call to save_stack_trace()
3082 * If regs is set, then these functions will not be in the way.
3084 #ifndef CONFIG_UNWINDER_ORC
3089 preempt_disable_notrace();
3091 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3093 /* This should never happen. If it does, yell once and skip */
3094 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3098 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3099 * interrupt will either see the value pre increment or post
3100 * increment. If the interrupt happens pre increment it will have
3101 * restored the counter when it returns. We just need a barrier to
3102 * keep gcc from moving things around.
3106 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3107 size = ARRAY_SIZE(fstack->calls);
3110 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3113 nr_entries = stack_trace_save(fstack->calls, size, skip);
3116 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3117 struct_size(entry, caller, nr_entries),
3121 entry = ring_buffer_event_data(event);
3123 entry->size = nr_entries;
3124 memcpy(&entry->caller, fstack->calls,
3125 flex_array_size(entry, caller, nr_entries));
3127 if (!call_filter_check_discard(call, entry, buffer, event))
3128 __buffer_unlock_commit(buffer, event);
3131 /* Again, don't let gcc optimize things here */
3133 __this_cpu_dec(ftrace_stack_reserve);
3134 preempt_enable_notrace();
3138 static inline void ftrace_trace_stack(struct trace_array *tr,
3139 struct trace_buffer *buffer,
3140 unsigned int trace_ctx,
3141 int skip, struct pt_regs *regs)
3143 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3146 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3149 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3152 struct trace_buffer *buffer = tr->array_buffer.buffer;
3154 if (rcu_is_watching()) {
3155 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3159 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY)))
3163 * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
3164 * but if the above rcu_is_watching() failed, then the NMI
3165 * triggered someplace critical, and ct_irq_enter() should
3166 * not be called from NMI.
3168 if (unlikely(in_nmi()))
3171 ct_irq_enter_irqson();
3172 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3173 ct_irq_exit_irqson();
3177 * trace_dump_stack - record a stack back trace in the trace buffer
3178 * @skip: Number of functions to skip (helper handlers)
3180 void trace_dump_stack(int skip)
3182 if (tracing_disabled || tracing_selftest_running)
3185 #ifndef CONFIG_UNWINDER_ORC
3186 /* Skip 1 to skip this function. */
3189 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3190 tracing_gen_ctx(), skip, NULL);
3192 EXPORT_SYMBOL_GPL(trace_dump_stack);
3194 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3195 static DEFINE_PER_CPU(int, user_stack_count);
3198 ftrace_trace_userstack(struct trace_array *tr,
3199 struct trace_buffer *buffer, unsigned int trace_ctx)
3201 struct trace_event_call *call = &event_user_stack;
3202 struct ring_buffer_event *event;
3203 struct userstack_entry *entry;
3205 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3209 * NMIs can not handle page faults, even with fix ups.
3210 * The save user stack can (and often does) fault.
3212 if (unlikely(in_nmi()))
3216 * prevent recursion, since the user stack tracing may
3217 * trigger other kernel events.
3220 if (__this_cpu_read(user_stack_count))
3223 __this_cpu_inc(user_stack_count);
3225 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3226 sizeof(*entry), trace_ctx);
3228 goto out_drop_count;
3229 entry = ring_buffer_event_data(event);
3231 entry->tgid = current->tgid;
3232 memset(&entry->caller, 0, sizeof(entry->caller));
3234 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3235 if (!call_filter_check_discard(call, entry, buffer, event))
3236 __buffer_unlock_commit(buffer, event);
3239 __this_cpu_dec(user_stack_count);
3243 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3244 static void ftrace_trace_userstack(struct trace_array *tr,
3245 struct trace_buffer *buffer,
3246 unsigned int trace_ctx)
3249 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3251 #endif /* CONFIG_STACKTRACE */
3254 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3255 unsigned long long delta)
3257 entry->bottom_delta_ts = delta & U32_MAX;
3258 entry->top_delta_ts = (delta >> 32);
3261 void trace_last_func_repeats(struct trace_array *tr,
3262 struct trace_func_repeats *last_info,
3263 unsigned int trace_ctx)
3265 struct trace_buffer *buffer = tr->array_buffer.buffer;
3266 struct func_repeats_entry *entry;
3267 struct ring_buffer_event *event;
3270 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3271 sizeof(*entry), trace_ctx);
3275 delta = ring_buffer_event_time_stamp(buffer, event) -
3276 last_info->ts_last_call;
3278 entry = ring_buffer_event_data(event);
3279 entry->ip = last_info->ip;
3280 entry->parent_ip = last_info->parent_ip;
3281 entry->count = last_info->count;
3282 func_repeats_set_delta_ts(entry, delta);
3284 __buffer_unlock_commit(buffer, event);
3287 /* created for use with alloc_percpu */
3288 struct trace_buffer_struct {
3290 char buffer[4][TRACE_BUF_SIZE];
3293 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3296 * This allows for lockless recording. If we're nested too deeply, then
3297 * this returns NULL.
3299 static char *get_trace_buf(void)
3301 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3303 if (!trace_percpu_buffer || buffer->nesting >= 4)
3308 /* Interrupts must see nesting incremented before we use the buffer */
3310 return &buffer->buffer[buffer->nesting - 1][0];
3313 static void put_trace_buf(void)
3315 /* Don't let the decrement of nesting leak before this */
3317 this_cpu_dec(trace_percpu_buffer->nesting);
3320 static int alloc_percpu_trace_buffer(void)
3322 struct trace_buffer_struct __percpu *buffers;
3324 if (trace_percpu_buffer)
3327 buffers = alloc_percpu(struct trace_buffer_struct);
3328 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3331 trace_percpu_buffer = buffers;
3335 static int buffers_allocated;
3337 void trace_printk_init_buffers(void)
3339 if (buffers_allocated)
3342 if (alloc_percpu_trace_buffer())
3345 /* trace_printk() is for debug use only. Don't use it in production. */
3348 pr_warn("**********************************************************\n");
3349 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3351 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3353 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3354 pr_warn("** unsafe for production use. **\n");
3356 pr_warn("** If you see this message and you are not debugging **\n");
3357 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3359 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3360 pr_warn("**********************************************************\n");
3362 /* Expand the buffers to set size */
3363 tracing_update_buffers();
3365 buffers_allocated = 1;
3368 * trace_printk_init_buffers() can be called by modules.
3369 * If that happens, then we need to start cmdline recording
3370 * directly here. If the global_trace.buffer is already
3371 * allocated here, then this was called by module code.
3373 if (global_trace.array_buffer.buffer)
3374 tracing_start_cmdline_record();
3376 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3378 void trace_printk_start_comm(void)
3380 /* Start tracing comms if trace printk is set */
3381 if (!buffers_allocated)
3383 tracing_start_cmdline_record();
3386 static void trace_printk_start_stop_comm(int enabled)
3388 if (!buffers_allocated)
3392 tracing_start_cmdline_record();
3394 tracing_stop_cmdline_record();
3398 * trace_vbprintk - write binary msg to tracing buffer
3399 * @ip: The address of the caller
3400 * @fmt: The string format to write to the buffer
3401 * @args: Arguments for @fmt
3403 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3405 struct trace_event_call *call = &event_bprint;
3406 struct ring_buffer_event *event;
3407 struct trace_buffer *buffer;
3408 struct trace_array *tr = &global_trace;
3409 struct bprint_entry *entry;
3410 unsigned int trace_ctx;
3414 if (unlikely(tracing_selftest_running || tracing_disabled))
3417 /* Don't pollute graph traces with trace_vprintk internals */
3418 pause_graph_tracing();
3420 trace_ctx = tracing_gen_ctx();
3421 preempt_disable_notrace();
3423 tbuffer = get_trace_buf();
3429 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3431 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3434 size = sizeof(*entry) + sizeof(u32) * len;
3435 buffer = tr->array_buffer.buffer;
3436 ring_buffer_nest_start(buffer);
3437 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3441 entry = ring_buffer_event_data(event);
3445 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3446 if (!call_filter_check_discard(call, entry, buffer, event)) {
3447 __buffer_unlock_commit(buffer, event);
3448 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3452 ring_buffer_nest_end(buffer);
3457 preempt_enable_notrace();
3458 unpause_graph_tracing();
3462 EXPORT_SYMBOL_GPL(trace_vbprintk);
3466 __trace_array_vprintk(struct trace_buffer *buffer,
3467 unsigned long ip, const char *fmt, va_list args)
3469 struct trace_event_call *call = &event_print;
3470 struct ring_buffer_event *event;
3472 struct print_entry *entry;
3473 unsigned int trace_ctx;
3476 if (tracing_disabled)
3479 /* Don't pollute graph traces with trace_vprintk internals */
3480 pause_graph_tracing();
3482 trace_ctx = tracing_gen_ctx();
3483 preempt_disable_notrace();
3486 tbuffer = get_trace_buf();
3492 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3494 size = sizeof(*entry) + len + 1;
3495 ring_buffer_nest_start(buffer);
3496 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3500 entry = ring_buffer_event_data(event);
3503 memcpy(&entry->buf, tbuffer, len + 1);
3504 if (!call_filter_check_discard(call, entry, buffer, event)) {
3505 __buffer_unlock_commit(buffer, event);
3506 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3510 ring_buffer_nest_end(buffer);
3514 preempt_enable_notrace();
3515 unpause_graph_tracing();
3521 int trace_array_vprintk(struct trace_array *tr,
3522 unsigned long ip, const char *fmt, va_list args)
3524 if (tracing_selftest_running && tr == &global_trace)
3527 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3531 * trace_array_printk - Print a message to a specific instance
3532 * @tr: The instance trace_array descriptor
3533 * @ip: The instruction pointer that this is called from.
3534 * @fmt: The format to print (printf format)
3536 * If a subsystem sets up its own instance, they have the right to
3537 * printk strings into their tracing instance buffer using this
3538 * function. Note, this function will not write into the top level
3539 * buffer (use trace_printk() for that), as writing into the top level
3540 * buffer should only have events that can be individually disabled.
3541 * trace_printk() is only used for debugging a kernel, and should not
3542 * be ever incorporated in normal use.
3544 * trace_array_printk() can be used, as it will not add noise to the
3545 * top level tracing buffer.
3547 * Note, trace_array_init_printk() must be called on @tr before this
3551 int trace_array_printk(struct trace_array *tr,
3552 unsigned long ip, const char *fmt, ...)
3560 /* This is only allowed for created instances */
3561 if (tr == &global_trace)
3564 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3568 ret = trace_array_vprintk(tr, ip, fmt, ap);
3572 EXPORT_SYMBOL_GPL(trace_array_printk);
3575 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3576 * @tr: The trace array to initialize the buffers for
3578 * As trace_array_printk() only writes into instances, they are OK to
3579 * have in the kernel (unlike trace_printk()). This needs to be called
3580 * before trace_array_printk() can be used on a trace_array.
3582 int trace_array_init_printk(struct trace_array *tr)
3587 /* This is only allowed for created instances */
3588 if (tr == &global_trace)
3591 return alloc_percpu_trace_buffer();
3593 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3596 int trace_array_printk_buf(struct trace_buffer *buffer,
3597 unsigned long ip, const char *fmt, ...)
3602 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3606 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3612 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3614 return trace_array_vprintk(&global_trace, ip, fmt, args);
3616 EXPORT_SYMBOL_GPL(trace_vprintk);
3618 static void trace_iterator_increment(struct trace_iterator *iter)
3620 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3624 ring_buffer_iter_advance(buf_iter);
3627 static struct trace_entry *
3628 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3629 unsigned long *lost_events)
3631 struct ring_buffer_event *event;
3632 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3635 event = ring_buffer_iter_peek(buf_iter, ts);
3637 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3638 (unsigned long)-1 : 0;
3640 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3645 iter->ent_size = ring_buffer_event_length(event);
3646 return ring_buffer_event_data(event);
3652 static struct trace_entry *
3653 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3654 unsigned long *missing_events, u64 *ent_ts)
3656 struct trace_buffer *buffer = iter->array_buffer->buffer;
3657 struct trace_entry *ent, *next = NULL;
3658 unsigned long lost_events = 0, next_lost = 0;
3659 int cpu_file = iter->cpu_file;
3660 u64 next_ts = 0, ts;
3666 * If we are in a per_cpu trace file, don't bother by iterating over
3667 * all cpu and peek directly.
3669 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3670 if (ring_buffer_empty_cpu(buffer, cpu_file))
3672 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3674 *ent_cpu = cpu_file;
3679 for_each_tracing_cpu(cpu) {
3681 if (ring_buffer_empty_cpu(buffer, cpu))
3684 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3687 * Pick the entry with the smallest timestamp:
3689 if (ent && (!next || ts < next_ts)) {
3693 next_lost = lost_events;
3694 next_size = iter->ent_size;
3698 iter->ent_size = next_size;
3701 *ent_cpu = next_cpu;
3707 *missing_events = next_lost;
3712 #define STATIC_FMT_BUF_SIZE 128
3713 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3715 char *trace_iter_expand_format(struct trace_iterator *iter)
3720 * iter->tr is NULL when used with tp_printk, which makes
3721 * this get called where it is not safe to call krealloc().
3723 if (!iter->tr || iter->fmt == static_fmt_buf)
3726 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3729 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3736 /* Returns true if the string is safe to dereference from an event */
3737 static bool trace_safe_str(struct trace_iterator *iter, const char *str,
3740 unsigned long addr = (unsigned long)str;
3741 struct trace_event *trace_event;
3742 struct trace_event_call *event;
3744 /* Ignore strings with no length */
3748 /* OK if part of the event data */
3749 if ((addr >= (unsigned long)iter->ent) &&
3750 (addr < (unsigned long)iter->ent + iter->ent_size))
3753 /* OK if part of the temp seq buffer */
3754 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3755 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3758 /* Core rodata can not be freed */
3759 if (is_kernel_rodata(addr))
3762 if (trace_is_tracepoint_string(str))
3766 * Now this could be a module event, referencing core module
3767 * data, which is OK.
3772 trace_event = ftrace_find_event(iter->ent->type);
3776 event = container_of(trace_event, struct trace_event_call, event);
3777 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3780 /* Would rather have rodata, but this will suffice */
3781 if (within_module_core(addr, event->module))
3787 static const char *show_buffer(struct trace_seq *s)
3789 struct seq_buf *seq = &s->seq;
3791 seq_buf_terminate(seq);
3796 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3798 static int test_can_verify_check(const char *fmt, ...)
3805 * The verifier is dependent on vsnprintf() modifies the va_list
3806 * passed to it, where it is sent as a reference. Some architectures
3807 * (like x86_32) passes it by value, which means that vsnprintf()
3808 * does not modify the va_list passed to it, and the verifier
3809 * would then need to be able to understand all the values that
3810 * vsnprintf can use. If it is passed by value, then the verifier
3814 vsnprintf(buf, 16, "%d", ap);
3815 ret = va_arg(ap, int);
3821 static void test_can_verify(void)
3823 if (!test_can_verify_check("%d %d", 0, 1)) {
3824 pr_info("trace event string verifier disabled\n");
3825 static_branch_inc(&trace_no_verify);
3830 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3831 * @iter: The iterator that holds the seq buffer and the event being printed
3832 * @fmt: The format used to print the event
3833 * @ap: The va_list holding the data to print from @fmt.
3835 * This writes the data into the @iter->seq buffer using the data from
3836 * @fmt and @ap. If the format has a %s, then the source of the string
3837 * is examined to make sure it is safe to print, otherwise it will
3838 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3841 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3844 const char *p = fmt;
3848 if (WARN_ON_ONCE(!fmt))
3851 if (static_branch_unlikely(&trace_no_verify))
3854 /* Don't bother checking when doing a ftrace_dump() */
3855 if (iter->fmt == static_fmt_buf)
3864 /* We only care about %s and variants */
3865 for (i = 0; p[i]; i++) {
3866 if (i + 1 >= iter->fmt_size) {
3868 * If we can't expand the copy buffer,
3871 if (!trace_iter_expand_format(iter))
3875 if (p[i] == '\\' && p[i+1]) {
3880 /* Need to test cases like %08.*s */
3881 for (j = 1; p[i+j]; j++) {
3882 if (isdigit(p[i+j]) ||
3885 if (p[i+j] == '*') {
3897 /* If no %s found then just print normally */
3901 /* Copy up to the %s, and print that */
3902 strncpy(iter->fmt, p, i);
3903 iter->fmt[i] = '\0';
3904 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3907 * If iter->seq is full, the above call no longer guarantees
3908 * that ap is in sync with fmt processing, and further calls
3909 * to va_arg() can return wrong positional arguments.
3911 * Ensure that ap is no longer used in this case.
3913 if (iter->seq.full) {
3919 len = va_arg(ap, int);
3921 /* The ap now points to the string data of the %s */
3922 str = va_arg(ap, const char *);
3925 * If you hit this warning, it is likely that the
3926 * trace event in question used %s on a string that
3927 * was saved at the time of the event, but may not be
3928 * around when the trace is read. Use __string(),
3929 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3930 * instead. See samples/trace_events/trace-events-sample.h
3933 if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
3934 "fmt: '%s' current_buffer: '%s'",
3935 fmt, show_buffer(&iter->seq))) {
3938 /* Try to safely read the string */
3940 if (len + 1 > iter->fmt_size)
3941 len = iter->fmt_size - 1;
3944 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3948 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3952 trace_seq_printf(&iter->seq, "(0x%px)", str);
3954 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3956 str = "[UNSAFE-MEMORY]";
3957 strcpy(iter->fmt, "%s");
3959 strncpy(iter->fmt, p + i, j + 1);
3960 iter->fmt[j+1] = '\0';
3963 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3965 trace_seq_printf(&iter->seq, iter->fmt, str);
3971 trace_seq_vprintf(&iter->seq, p, ap);
3974 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3976 const char *p, *new_fmt;
3979 if (WARN_ON_ONCE(!fmt))
3982 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3986 new_fmt = q = iter->fmt;
3988 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3989 if (!trace_iter_expand_format(iter))
3992 q += iter->fmt - new_fmt;
3993 new_fmt = iter->fmt;
3998 /* Replace %p with %px */
4002 } else if (p[0] == 'p' && !isalnum(p[1])) {
4013 #define STATIC_TEMP_BUF_SIZE 128
4014 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
4016 /* Find the next real entry, without updating the iterator itself */
4017 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
4018 int *ent_cpu, u64 *ent_ts)
4020 /* __find_next_entry will reset ent_size */
4021 int ent_size = iter->ent_size;
4022 struct trace_entry *entry;
4025 * If called from ftrace_dump(), then the iter->temp buffer
4026 * will be the static_temp_buf and not created from kmalloc.
4027 * If the entry size is greater than the buffer, we can
4028 * not save it. Just return NULL in that case. This is only
4029 * used to add markers when two consecutive events' time
4030 * stamps have a large delta. See trace_print_lat_context()
4032 if (iter->temp == static_temp_buf &&
4033 STATIC_TEMP_BUF_SIZE < ent_size)
4037 * The __find_next_entry() may call peek_next_entry(), which may
4038 * call ring_buffer_peek() that may make the contents of iter->ent
4039 * undefined. Need to copy iter->ent now.
4041 if (iter->ent && iter->ent != iter->temp) {
4042 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
4043 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
4045 temp = kmalloc(iter->ent_size, GFP_KERNEL);
4050 iter->temp_size = iter->ent_size;
4052 memcpy(iter->temp, iter->ent, iter->ent_size);
4053 iter->ent = iter->temp;
4055 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
4056 /* Put back the original ent_size */
4057 iter->ent_size = ent_size;
4062 /* Find the next real entry, and increment the iterator to the next entry */
4063 void *trace_find_next_entry_inc(struct trace_iterator *iter)
4065 iter->ent = __find_next_entry(iter, &iter->cpu,
4066 &iter->lost_events, &iter->ts);
4069 trace_iterator_increment(iter);
4071 return iter->ent ? iter : NULL;
4074 static void trace_consume(struct trace_iterator *iter)
4076 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
4077 &iter->lost_events);
4080 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
4082 struct trace_iterator *iter = m->private;
4086 WARN_ON_ONCE(iter->leftover);
4090 /* can't go backwards */
4095 ent = trace_find_next_entry_inc(iter);
4099 while (ent && iter->idx < i)
4100 ent = trace_find_next_entry_inc(iter);
4107 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4109 struct ring_buffer_iter *buf_iter;
4110 unsigned long entries = 0;
4113 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4115 buf_iter = trace_buffer_iter(iter, cpu);
4119 ring_buffer_iter_reset(buf_iter);
4122 * We could have the case with the max latency tracers
4123 * that a reset never took place on a cpu. This is evident
4124 * by the timestamp being before the start of the buffer.
4126 while (ring_buffer_iter_peek(buf_iter, &ts)) {
4127 if (ts >= iter->array_buffer->time_start)
4130 ring_buffer_iter_advance(buf_iter);
4133 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4137 * The current tracer is copied to avoid a global locking
4140 static void *s_start(struct seq_file *m, loff_t *pos)
4142 struct trace_iterator *iter = m->private;
4143 struct trace_array *tr = iter->tr;
4144 int cpu_file = iter->cpu_file;
4149 mutex_lock(&trace_types_lock);
4150 if (unlikely(tr->current_trace != iter->trace)) {
4151 /* Close iter->trace before switching to the new current tracer */
4152 if (iter->trace->close)
4153 iter->trace->close(iter);
4154 iter->trace = tr->current_trace;
4155 /* Reopen the new current tracer */
4156 if (iter->trace->open)
4157 iter->trace->open(iter);
4159 mutex_unlock(&trace_types_lock);
4161 #ifdef CONFIG_TRACER_MAX_TRACE
4162 if (iter->snapshot && iter->trace->use_max_tr)
4163 return ERR_PTR(-EBUSY);
4166 if (*pos != iter->pos) {
4171 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4172 for_each_tracing_cpu(cpu)
4173 tracing_iter_reset(iter, cpu);
4175 tracing_iter_reset(iter, cpu_file);
4178 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4183 * If we overflowed the seq_file before, then we want
4184 * to just reuse the trace_seq buffer again.
4190 p = s_next(m, p, &l);
4194 trace_event_read_lock();
4195 trace_access_lock(cpu_file);
4199 static void s_stop(struct seq_file *m, void *p)
4201 struct trace_iterator *iter = m->private;
4203 #ifdef CONFIG_TRACER_MAX_TRACE
4204 if (iter->snapshot && iter->trace->use_max_tr)
4208 trace_access_unlock(iter->cpu_file);
4209 trace_event_read_unlock();
4213 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4214 unsigned long *entries, int cpu)
4216 unsigned long count;
4218 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4220 * If this buffer has skipped entries, then we hold all
4221 * entries for the trace and we need to ignore the
4222 * ones before the time stamp.
4224 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4225 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4226 /* total is the same as the entries */
4230 ring_buffer_overrun_cpu(buf->buffer, cpu);
4235 get_total_entries(struct array_buffer *buf,
4236 unsigned long *total, unsigned long *entries)
4244 for_each_tracing_cpu(cpu) {
4245 get_total_entries_cpu(buf, &t, &e, cpu);
4251 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4253 unsigned long total, entries;
4258 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4263 unsigned long trace_total_entries(struct trace_array *tr)
4265 unsigned long total, entries;
4270 get_total_entries(&tr->array_buffer, &total, &entries);
4275 static void print_lat_help_header(struct seq_file *m)
4277 seq_puts(m, "# _------=> CPU# \n"
4278 "# / _-----=> irqs-off/BH-disabled\n"
4279 "# | / _----=> need-resched \n"
4280 "# || / _---=> hardirq/softirq \n"
4281 "# ||| / _--=> preempt-depth \n"
4282 "# |||| / _-=> migrate-disable \n"
4283 "# ||||| / delay \n"
4284 "# cmd pid |||||| time | caller \n"
4285 "# \\ / |||||| \\ | / \n");
4288 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4290 unsigned long total;
4291 unsigned long entries;
4293 get_total_entries(buf, &total, &entries);
4294 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4295 entries, total, num_online_cpus());
4299 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4302 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4304 print_event_info(buf, m);
4306 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4307 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4310 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4313 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4314 static const char space[] = " ";
4315 int prec = tgid ? 12 : 2;
4317 print_event_info(buf, m);
4319 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
4320 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4321 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4322 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4323 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4324 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4325 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4326 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
4330 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4332 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4333 struct array_buffer *buf = iter->array_buffer;
4334 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4335 struct tracer *type = iter->trace;
4336 unsigned long entries;
4337 unsigned long total;
4338 const char *name = type->name;
4340 get_total_entries(buf, &total, &entries);
4342 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4344 seq_puts(m, "# -----------------------------------"
4345 "---------------------------------\n");
4346 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4347 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4348 nsecs_to_usecs(data->saved_latency),
4352 preempt_model_none() ? "server" :
4353 preempt_model_voluntary() ? "desktop" :
4354 preempt_model_full() ? "preempt" :
4355 preempt_model_rt() ? "preempt_rt" :
4357 /* These are reserved for later use */
4360 seq_printf(m, " #P:%d)\n", num_online_cpus());
4364 seq_puts(m, "# -----------------\n");
4365 seq_printf(m, "# | task: %.16s-%d "
4366 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4367 data->comm, data->pid,
4368 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4369 data->policy, data->rt_priority);
4370 seq_puts(m, "# -----------------\n");
4372 if (data->critical_start) {
4373 seq_puts(m, "# => started at: ");
4374 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4375 trace_print_seq(m, &iter->seq);
4376 seq_puts(m, "\n# => ended at: ");
4377 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4378 trace_print_seq(m, &iter->seq);
4379 seq_puts(m, "\n#\n");
4385 static void test_cpu_buff_start(struct trace_iterator *iter)
4387 struct trace_seq *s = &iter->seq;
4388 struct trace_array *tr = iter->tr;
4390 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4393 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4396 if (cpumask_available(iter->started) &&
4397 cpumask_test_cpu(iter->cpu, iter->started))
4400 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4403 if (cpumask_available(iter->started))
4404 cpumask_set_cpu(iter->cpu, iter->started);
4406 /* Don't print started cpu buffer for the first entry of the trace */
4408 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4412 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4414 struct trace_array *tr = iter->tr;
4415 struct trace_seq *s = &iter->seq;
4416 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4417 struct trace_entry *entry;
4418 struct trace_event *event;
4422 test_cpu_buff_start(iter);
4424 event = ftrace_find_event(entry->type);
4426 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4427 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4428 trace_print_lat_context(iter);
4430 trace_print_context(iter);
4433 if (trace_seq_has_overflowed(s))
4434 return TRACE_TYPE_PARTIAL_LINE;
4437 if (tr->trace_flags & TRACE_ITER_FIELDS)
4438 return print_event_fields(iter, event);
4439 return event->funcs->trace(iter, sym_flags, event);
4442 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4444 return trace_handle_return(s);
4447 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4449 struct trace_array *tr = iter->tr;
4450 struct trace_seq *s = &iter->seq;
4451 struct trace_entry *entry;
4452 struct trace_event *event;
4456 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4457 trace_seq_printf(s, "%d %d %llu ",
4458 entry->pid, iter->cpu, iter->ts);
4460 if (trace_seq_has_overflowed(s))
4461 return TRACE_TYPE_PARTIAL_LINE;
4463 event = ftrace_find_event(entry->type);
4465 return event->funcs->raw(iter, 0, event);
4467 trace_seq_printf(s, "%d ?\n", entry->type);
4469 return trace_handle_return(s);
4472 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4474 struct trace_array *tr = iter->tr;
4475 struct trace_seq *s = &iter->seq;
4476 unsigned char newline = '\n';
4477 struct trace_entry *entry;
4478 struct trace_event *event;
4482 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4483 SEQ_PUT_HEX_FIELD(s, entry->pid);
4484 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4485 SEQ_PUT_HEX_FIELD(s, iter->ts);
4486 if (trace_seq_has_overflowed(s))
4487 return TRACE_TYPE_PARTIAL_LINE;
4490 event = ftrace_find_event(entry->type);
4492 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4493 if (ret != TRACE_TYPE_HANDLED)
4497 SEQ_PUT_FIELD(s, newline);
4499 return trace_handle_return(s);
4502 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4504 struct trace_array *tr = iter->tr;
4505 struct trace_seq *s = &iter->seq;
4506 struct trace_entry *entry;
4507 struct trace_event *event;
4511 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4512 SEQ_PUT_FIELD(s, entry->pid);
4513 SEQ_PUT_FIELD(s, iter->cpu);
4514 SEQ_PUT_FIELD(s, iter->ts);
4515 if (trace_seq_has_overflowed(s))
4516 return TRACE_TYPE_PARTIAL_LINE;
4519 event = ftrace_find_event(entry->type);
4520 return event ? event->funcs->binary(iter, 0, event) :
4524 int trace_empty(struct trace_iterator *iter)
4526 struct ring_buffer_iter *buf_iter;
4529 /* If we are looking at one CPU buffer, only check that one */
4530 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4531 cpu = iter->cpu_file;
4532 buf_iter = trace_buffer_iter(iter, cpu);
4534 if (!ring_buffer_iter_empty(buf_iter))
4537 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4543 for_each_tracing_cpu(cpu) {
4544 buf_iter = trace_buffer_iter(iter, cpu);
4546 if (!ring_buffer_iter_empty(buf_iter))
4549 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4557 /* Called with trace_event_read_lock() held. */
4558 enum print_line_t print_trace_line(struct trace_iterator *iter)
4560 struct trace_array *tr = iter->tr;
4561 unsigned long trace_flags = tr->trace_flags;
4562 enum print_line_t ret;
4564 if (iter->lost_events) {
4565 if (iter->lost_events == (unsigned long)-1)
4566 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4569 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4570 iter->cpu, iter->lost_events);
4571 if (trace_seq_has_overflowed(&iter->seq))
4572 return TRACE_TYPE_PARTIAL_LINE;
4575 if (iter->trace && iter->trace->print_line) {
4576 ret = iter->trace->print_line(iter);
4577 if (ret != TRACE_TYPE_UNHANDLED)
4581 if (iter->ent->type == TRACE_BPUTS &&
4582 trace_flags & TRACE_ITER_PRINTK &&
4583 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4584 return trace_print_bputs_msg_only(iter);
4586 if (iter->ent->type == TRACE_BPRINT &&
4587 trace_flags & TRACE_ITER_PRINTK &&
4588 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4589 return trace_print_bprintk_msg_only(iter);
4591 if (iter->ent->type == TRACE_PRINT &&
4592 trace_flags & TRACE_ITER_PRINTK &&
4593 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4594 return trace_print_printk_msg_only(iter);
4596 if (trace_flags & TRACE_ITER_BIN)
4597 return print_bin_fmt(iter);
4599 if (trace_flags & TRACE_ITER_HEX)
4600 return print_hex_fmt(iter);
4602 if (trace_flags & TRACE_ITER_RAW)
4603 return print_raw_fmt(iter);
4605 return print_trace_fmt(iter);
4608 void trace_latency_header(struct seq_file *m)
4610 struct trace_iterator *iter = m->private;
4611 struct trace_array *tr = iter->tr;
4613 /* print nothing if the buffers are empty */
4614 if (trace_empty(iter))
4617 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4618 print_trace_header(m, iter);
4620 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4621 print_lat_help_header(m);
4624 void trace_default_header(struct seq_file *m)
4626 struct trace_iterator *iter = m->private;
4627 struct trace_array *tr = iter->tr;
4628 unsigned long trace_flags = tr->trace_flags;
4630 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4633 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4634 /* print nothing if the buffers are empty */
4635 if (trace_empty(iter))
4637 print_trace_header(m, iter);
4638 if (!(trace_flags & TRACE_ITER_VERBOSE))
4639 print_lat_help_header(m);
4641 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4642 if (trace_flags & TRACE_ITER_IRQ_INFO)
4643 print_func_help_header_irq(iter->array_buffer,
4646 print_func_help_header(iter->array_buffer, m,
4652 static void test_ftrace_alive(struct seq_file *m)
4654 if (!ftrace_is_dead())
4656 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4657 "# MAY BE MISSING FUNCTION EVENTS\n");
4660 #ifdef CONFIG_TRACER_MAX_TRACE
4661 static void show_snapshot_main_help(struct seq_file *m)
4663 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4664 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4665 "# Takes a snapshot of the main buffer.\n"
4666 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4667 "# (Doesn't have to be '2' works with any number that\n"
4668 "# is not a '0' or '1')\n");
4671 static void show_snapshot_percpu_help(struct seq_file *m)
4673 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4674 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4675 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4676 "# Takes a snapshot of the main buffer for this cpu.\n");
4678 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4679 "# Must use main snapshot file to allocate.\n");
4681 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4682 "# (Doesn't have to be '2' works with any number that\n"
4683 "# is not a '0' or '1')\n");
4686 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4688 if (iter->tr->allocated_snapshot)
4689 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4691 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4693 seq_puts(m, "# Snapshot commands:\n");
4694 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4695 show_snapshot_main_help(m);
4697 show_snapshot_percpu_help(m);
4700 /* Should never be called */
4701 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4704 static int s_show(struct seq_file *m, void *v)
4706 struct trace_iterator *iter = v;
4709 if (iter->ent == NULL) {
4711 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4713 test_ftrace_alive(m);
4715 if (iter->snapshot && trace_empty(iter))
4716 print_snapshot_help(m, iter);
4717 else if (iter->trace && iter->trace->print_header)
4718 iter->trace->print_header(m);
4720 trace_default_header(m);
4722 } else if (iter->leftover) {
4724 * If we filled the seq_file buffer earlier, we
4725 * want to just show it now.
4727 ret = trace_print_seq(m, &iter->seq);
4729 /* ret should this time be zero, but you never know */
4730 iter->leftover = ret;
4733 print_trace_line(iter);
4734 ret = trace_print_seq(m, &iter->seq);
4736 * If we overflow the seq_file buffer, then it will
4737 * ask us for this data again at start up.
4739 * ret is 0 if seq_file write succeeded.
4742 iter->leftover = ret;
4749 * Should be used after trace_array_get(), trace_types_lock
4750 * ensures that i_cdev was already initialized.
4752 static inline int tracing_get_cpu(struct inode *inode)
4754 if (inode->i_cdev) /* See trace_create_cpu_file() */
4755 return (long)inode->i_cdev - 1;
4756 return RING_BUFFER_ALL_CPUS;
4759 static const struct seq_operations tracer_seq_ops = {
4767 * Note, as iter itself can be allocated and freed in different
4768 * ways, this function is only used to free its content, and not
4769 * the iterator itself. The only requirement to all the allocations
4770 * is that it must zero all fields (kzalloc), as freeing works with
4771 * ethier allocated content or NULL.
4773 static void free_trace_iter_content(struct trace_iterator *iter)
4775 /* The fmt is either NULL, allocated or points to static_fmt_buf */
4776 if (iter->fmt != static_fmt_buf)
4780 kfree(iter->buffer_iter);
4781 mutex_destroy(&iter->mutex);
4782 free_cpumask_var(iter->started);
4785 static struct trace_iterator *
4786 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4788 struct trace_array *tr = inode->i_private;
4789 struct trace_iterator *iter;
4792 if (tracing_disabled)
4793 return ERR_PTR(-ENODEV);
4795 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4797 return ERR_PTR(-ENOMEM);
4799 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4801 if (!iter->buffer_iter)
4805 * trace_find_next_entry() may need to save off iter->ent.
4806 * It will place it into the iter->temp buffer. As most
4807 * events are less than 128, allocate a buffer of that size.
4808 * If one is greater, then trace_find_next_entry() will
4809 * allocate a new buffer to adjust for the bigger iter->ent.
4810 * It's not critical if it fails to get allocated here.
4812 iter->temp = kmalloc(128, GFP_KERNEL);
4814 iter->temp_size = 128;
4817 * trace_event_printf() may need to modify given format
4818 * string to replace %p with %px so that it shows real address
4819 * instead of hash value. However, that is only for the event
4820 * tracing, other tracer may not need. Defer the allocation
4821 * until it is needed.
4826 mutex_lock(&trace_types_lock);
4827 iter->trace = tr->current_trace;
4829 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4834 #ifdef CONFIG_TRACER_MAX_TRACE
4835 /* Currently only the top directory has a snapshot */
4836 if (tr->current_trace->print_max || snapshot)
4837 iter->array_buffer = &tr->max_buffer;
4840 iter->array_buffer = &tr->array_buffer;
4841 iter->snapshot = snapshot;
4843 iter->cpu_file = tracing_get_cpu(inode);
4844 mutex_init(&iter->mutex);
4846 /* Notify the tracer early; before we stop tracing. */
4847 if (iter->trace->open)
4848 iter->trace->open(iter);
4850 /* Annotate start of buffers if we had overruns */
4851 if (ring_buffer_overruns(iter->array_buffer->buffer))
4852 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4854 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4855 if (trace_clocks[tr->clock_id].in_ns)
4856 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4859 * If pause-on-trace is enabled, then stop the trace while
4860 * dumping, unless this is the "snapshot" file
4862 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4863 tracing_stop_tr(tr);
4865 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4866 for_each_tracing_cpu(cpu) {
4867 iter->buffer_iter[cpu] =
4868 ring_buffer_read_prepare(iter->array_buffer->buffer,
4871 ring_buffer_read_prepare_sync();
4872 for_each_tracing_cpu(cpu) {
4873 ring_buffer_read_start(iter->buffer_iter[cpu]);
4874 tracing_iter_reset(iter, cpu);
4877 cpu = iter->cpu_file;
4878 iter->buffer_iter[cpu] =
4879 ring_buffer_read_prepare(iter->array_buffer->buffer,
4881 ring_buffer_read_prepare_sync();
4882 ring_buffer_read_start(iter->buffer_iter[cpu]);
4883 tracing_iter_reset(iter, cpu);
4886 mutex_unlock(&trace_types_lock);
4891 mutex_unlock(&trace_types_lock);
4892 free_trace_iter_content(iter);
4894 seq_release_private(inode, file);
4895 return ERR_PTR(-ENOMEM);
4898 int tracing_open_generic(struct inode *inode, struct file *filp)
4902 ret = tracing_check_open_get_tr(NULL);
4906 filp->private_data = inode->i_private;
4910 bool tracing_is_disabled(void)
4912 return (tracing_disabled) ? true: false;
4916 * Open and update trace_array ref count.
4917 * Must have the current trace_array passed to it.
4919 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4921 struct trace_array *tr = inode->i_private;
4924 ret = tracing_check_open_get_tr(tr);
4928 filp->private_data = inode->i_private;
4934 * The private pointer of the inode is the trace_event_file.
4935 * Update the tr ref count associated to it.
4937 int tracing_open_file_tr(struct inode *inode, struct file *filp)
4939 struct trace_event_file *file = inode->i_private;
4942 ret = tracing_check_open_get_tr(file->tr);
4946 mutex_lock(&event_mutex);
4948 /* Fail if the file is marked for removal */
4949 if (file->flags & EVENT_FILE_FL_FREED) {
4950 trace_array_put(file->tr);
4953 event_file_get(file);
4956 mutex_unlock(&event_mutex);
4960 filp->private_data = inode->i_private;
4965 int tracing_release_file_tr(struct inode *inode, struct file *filp)
4967 struct trace_event_file *file = inode->i_private;
4969 trace_array_put(file->tr);
4970 event_file_put(file);
4975 static int tracing_mark_open(struct inode *inode, struct file *filp)
4977 stream_open(inode, filp);
4978 return tracing_open_generic_tr(inode, filp);
4981 static int tracing_release(struct inode *inode, struct file *file)
4983 struct trace_array *tr = inode->i_private;
4984 struct seq_file *m = file->private_data;
4985 struct trace_iterator *iter;
4988 if (!(file->f_mode & FMODE_READ)) {
4989 trace_array_put(tr);
4993 /* Writes do not use seq_file */
4995 mutex_lock(&trace_types_lock);
4997 for_each_tracing_cpu(cpu) {
4998 if (iter->buffer_iter[cpu])
4999 ring_buffer_read_finish(iter->buffer_iter[cpu]);
5002 if (iter->trace && iter->trace->close)
5003 iter->trace->close(iter);
5005 if (!iter->snapshot && tr->stop_count)
5006 /* reenable tracing if it was previously enabled */
5007 tracing_start_tr(tr);
5009 __trace_array_put(tr);
5011 mutex_unlock(&trace_types_lock);
5013 free_trace_iter_content(iter);
5014 seq_release_private(inode, file);
5019 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
5021 struct trace_array *tr = inode->i_private;
5023 trace_array_put(tr);
5027 static int tracing_single_release_tr(struct inode *inode, struct file *file)
5029 struct trace_array *tr = inode->i_private;
5031 trace_array_put(tr);
5033 return single_release(inode, file);
5036 static int tracing_open(struct inode *inode, struct file *file)
5038 struct trace_array *tr = inode->i_private;
5039 struct trace_iterator *iter;
5042 ret = tracing_check_open_get_tr(tr);
5046 /* If this file was open for write, then erase contents */
5047 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
5048 int cpu = tracing_get_cpu(inode);
5049 struct array_buffer *trace_buf = &tr->array_buffer;
5051 #ifdef CONFIG_TRACER_MAX_TRACE
5052 if (tr->current_trace->print_max)
5053 trace_buf = &tr->max_buffer;
5056 if (cpu == RING_BUFFER_ALL_CPUS)
5057 tracing_reset_online_cpus(trace_buf);
5059 tracing_reset_cpu(trace_buf, cpu);
5062 if (file->f_mode & FMODE_READ) {
5063 iter = __tracing_open(inode, file, false);
5065 ret = PTR_ERR(iter);
5066 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5067 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5071 trace_array_put(tr);
5077 * Some tracers are not suitable for instance buffers.
5078 * A tracer is always available for the global array (toplevel)
5079 * or if it explicitly states that it is.
5082 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
5084 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
5087 /* Find the next tracer that this trace array may use */
5088 static struct tracer *
5089 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
5091 while (t && !trace_ok_for_array(t, tr))
5098 t_next(struct seq_file *m, void *v, loff_t *pos)
5100 struct trace_array *tr = m->private;
5101 struct tracer *t = v;
5106 t = get_tracer_for_array(tr, t->next);
5111 static void *t_start(struct seq_file *m, loff_t *pos)
5113 struct trace_array *tr = m->private;
5117 mutex_lock(&trace_types_lock);
5119 t = get_tracer_for_array(tr, trace_types);
5120 for (; t && l < *pos; t = t_next(m, t, &l))
5126 static void t_stop(struct seq_file *m, void *p)
5128 mutex_unlock(&trace_types_lock);
5131 static int t_show(struct seq_file *m, void *v)
5133 struct tracer *t = v;
5138 seq_puts(m, t->name);
5147 static const struct seq_operations show_traces_seq_ops = {
5154 static int show_traces_open(struct inode *inode, struct file *file)
5156 struct trace_array *tr = inode->i_private;
5160 ret = tracing_check_open_get_tr(tr);
5164 ret = seq_open(file, &show_traces_seq_ops);
5166 trace_array_put(tr);
5170 m = file->private_data;
5176 static int show_traces_release(struct inode *inode, struct file *file)
5178 struct trace_array *tr = inode->i_private;
5180 trace_array_put(tr);
5181 return seq_release(inode, file);
5185 tracing_write_stub(struct file *filp, const char __user *ubuf,
5186 size_t count, loff_t *ppos)
5191 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5195 if (file->f_mode & FMODE_READ)
5196 ret = seq_lseek(file, offset, whence);
5198 file->f_pos = ret = 0;
5203 static const struct file_operations tracing_fops = {
5204 .open = tracing_open,
5206 .read_iter = seq_read_iter,
5207 .splice_read = copy_splice_read,
5208 .write = tracing_write_stub,
5209 .llseek = tracing_lseek,
5210 .release = tracing_release,
5213 static const struct file_operations show_traces_fops = {
5214 .open = show_traces_open,
5216 .llseek = seq_lseek,
5217 .release = show_traces_release,
5221 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5222 size_t count, loff_t *ppos)
5224 struct trace_array *tr = file_inode(filp)->i_private;
5228 len = snprintf(NULL, 0, "%*pb\n",
5229 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5230 mask_str = kmalloc(len, GFP_KERNEL);
5234 len = snprintf(mask_str, len, "%*pb\n",
5235 cpumask_pr_args(tr->tracing_cpumask));
5240 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5248 int tracing_set_cpumask(struct trace_array *tr,
5249 cpumask_var_t tracing_cpumask_new)
5256 local_irq_disable();
5257 arch_spin_lock(&tr->max_lock);
5258 for_each_tracing_cpu(cpu) {
5260 * Increase/decrease the disabled counter if we are
5261 * about to flip a bit in the cpumask:
5263 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5264 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5265 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5266 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5267 #ifdef CONFIG_TRACER_MAX_TRACE
5268 ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
5271 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5272 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5273 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5274 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5275 #ifdef CONFIG_TRACER_MAX_TRACE
5276 ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
5280 arch_spin_unlock(&tr->max_lock);
5283 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5289 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5290 size_t count, loff_t *ppos)
5292 struct trace_array *tr = file_inode(filp)->i_private;
5293 cpumask_var_t tracing_cpumask_new;
5296 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5299 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5303 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5307 free_cpumask_var(tracing_cpumask_new);
5312 free_cpumask_var(tracing_cpumask_new);
5317 static const struct file_operations tracing_cpumask_fops = {
5318 .open = tracing_open_generic_tr,
5319 .read = tracing_cpumask_read,
5320 .write = tracing_cpumask_write,
5321 .release = tracing_release_generic_tr,
5322 .llseek = generic_file_llseek,
5325 static int tracing_trace_options_show(struct seq_file *m, void *v)
5327 struct tracer_opt *trace_opts;
5328 struct trace_array *tr = m->private;
5332 mutex_lock(&trace_types_lock);
5333 tracer_flags = tr->current_trace->flags->val;
5334 trace_opts = tr->current_trace->flags->opts;
5336 for (i = 0; trace_options[i]; i++) {
5337 if (tr->trace_flags & (1 << i))
5338 seq_printf(m, "%s\n", trace_options[i]);
5340 seq_printf(m, "no%s\n", trace_options[i]);
5343 for (i = 0; trace_opts[i].name; i++) {
5344 if (tracer_flags & trace_opts[i].bit)
5345 seq_printf(m, "%s\n", trace_opts[i].name);
5347 seq_printf(m, "no%s\n", trace_opts[i].name);
5349 mutex_unlock(&trace_types_lock);
5354 static int __set_tracer_option(struct trace_array *tr,
5355 struct tracer_flags *tracer_flags,
5356 struct tracer_opt *opts, int neg)
5358 struct tracer *trace = tracer_flags->trace;
5361 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5366 tracer_flags->val &= ~opts->bit;
5368 tracer_flags->val |= opts->bit;
5372 /* Try to assign a tracer specific option */
5373 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5375 struct tracer *trace = tr->current_trace;
5376 struct tracer_flags *tracer_flags = trace->flags;
5377 struct tracer_opt *opts = NULL;
5380 for (i = 0; tracer_flags->opts[i].name; i++) {
5381 opts = &tracer_flags->opts[i];
5383 if (strcmp(cmp, opts->name) == 0)
5384 return __set_tracer_option(tr, trace->flags, opts, neg);
5390 /* Some tracers require overwrite to stay enabled */
5391 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5393 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5399 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5403 if ((mask == TRACE_ITER_RECORD_TGID) ||
5404 (mask == TRACE_ITER_RECORD_CMD))
5405 lockdep_assert_held(&event_mutex);
5407 /* do nothing if flag is already set */
5408 if (!!(tr->trace_flags & mask) == !!enabled)
5411 /* Give the tracer a chance to approve the change */
5412 if (tr->current_trace->flag_changed)
5413 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5417 tr->trace_flags |= mask;
5419 tr->trace_flags &= ~mask;
5421 if (mask == TRACE_ITER_RECORD_CMD)
5422 trace_event_enable_cmd_record(enabled);
5424 if (mask == TRACE_ITER_RECORD_TGID) {
5426 tgid_map_max = pid_max;
5427 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5431 * Pairs with smp_load_acquire() in
5432 * trace_find_tgid_ptr() to ensure that if it observes
5433 * the tgid_map we just allocated then it also observes
5434 * the corresponding tgid_map_max value.
5436 smp_store_release(&tgid_map, map);
5439 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5443 trace_event_enable_tgid_record(enabled);
5446 if (mask == TRACE_ITER_EVENT_FORK)
5447 trace_event_follow_fork(tr, enabled);
5449 if (mask == TRACE_ITER_FUNC_FORK)
5450 ftrace_pid_follow_fork(tr, enabled);
5452 if (mask == TRACE_ITER_OVERWRITE) {
5453 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5454 #ifdef CONFIG_TRACER_MAX_TRACE
5455 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5459 if (mask == TRACE_ITER_PRINTK) {
5460 trace_printk_start_stop_comm(enabled);
5461 trace_printk_control(enabled);
5467 int trace_set_options(struct trace_array *tr, char *option)
5472 size_t orig_len = strlen(option);
5475 cmp = strstrip(option);
5477 len = str_has_prefix(cmp, "no");
5483 mutex_lock(&event_mutex);
5484 mutex_lock(&trace_types_lock);
5486 ret = match_string(trace_options, -1, cmp);
5487 /* If no option could be set, test the specific tracer options */
5489 ret = set_tracer_option(tr, cmp, neg);
5491 ret = set_tracer_flag(tr, 1 << ret, !neg);
5493 mutex_unlock(&trace_types_lock);
5494 mutex_unlock(&event_mutex);
5497 * If the first trailing whitespace is replaced with '\0' by strstrip,
5498 * turn it back into a space.
5500 if (orig_len > strlen(option))
5501 option[strlen(option)] = ' ';
5506 static void __init apply_trace_boot_options(void)
5508 char *buf = trace_boot_options_buf;
5512 option = strsep(&buf, ",");
5518 trace_set_options(&global_trace, option);
5520 /* Put back the comma to allow this to be called again */
5527 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5528 size_t cnt, loff_t *ppos)
5530 struct seq_file *m = filp->private_data;
5531 struct trace_array *tr = m->private;
5535 if (cnt >= sizeof(buf))
5538 if (copy_from_user(buf, ubuf, cnt))
5543 ret = trace_set_options(tr, buf);
5552 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5554 struct trace_array *tr = inode->i_private;
5557 ret = tracing_check_open_get_tr(tr);
5561 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5563 trace_array_put(tr);
5568 static const struct file_operations tracing_iter_fops = {
5569 .open = tracing_trace_options_open,
5571 .llseek = seq_lseek,
5572 .release = tracing_single_release_tr,
5573 .write = tracing_trace_options_write,
5576 static const char readme_msg[] =
5577 "tracing mini-HOWTO:\n\n"
5578 "# echo 0 > tracing_on : quick way to disable tracing\n"
5579 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5580 " Important files:\n"
5581 " trace\t\t\t- The static contents of the buffer\n"
5582 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5583 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5584 " current_tracer\t- function and latency tracers\n"
5585 " available_tracers\t- list of configured tracers for current_tracer\n"
5586 " error_log\t- error log for failed commands (that support it)\n"
5587 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5588 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5589 " trace_clock\t\t- change the clock used to order events\n"
5590 " local: Per cpu clock but may not be synced across CPUs\n"
5591 " global: Synced across CPUs but slows tracing down.\n"
5592 " counter: Not a clock, but just an increment\n"
5593 " uptime: Jiffy counter from time of boot\n"
5594 " perf: Same clock that perf events use\n"
5595 #ifdef CONFIG_X86_64
5596 " x86-tsc: TSC cycle counter\n"
5598 "\n timestamp_mode\t- view the mode used to timestamp events\n"
5599 " delta: Delta difference against a buffer-wide timestamp\n"
5600 " absolute: Absolute (standalone) timestamp\n"
5601 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5602 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5603 " tracing_cpumask\t- Limit which CPUs to trace\n"
5604 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5605 "\t\t\t Remove sub-buffer with rmdir\n"
5606 " trace_options\t\t- Set format or modify how tracing happens\n"
5607 "\t\t\t Disable an option by prefixing 'no' to the\n"
5608 "\t\t\t option name\n"
5609 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5610 #ifdef CONFIG_DYNAMIC_FTRACE
5611 "\n available_filter_functions - list of functions that can be filtered on\n"
5612 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5613 "\t\t\t functions\n"
5614 "\t accepts: func_full_name or glob-matching-pattern\n"
5615 "\t modules: Can select a group via module\n"
5616 "\t Format: :mod:<module-name>\n"
5617 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5618 "\t triggers: a command to perform when function is hit\n"
5619 "\t Format: <function>:<trigger>[:count]\n"
5620 "\t trigger: traceon, traceoff\n"
5621 "\t\t enable_event:<system>:<event>\n"
5622 "\t\t disable_event:<system>:<event>\n"
5623 #ifdef CONFIG_STACKTRACE
5626 #ifdef CONFIG_TRACER_SNAPSHOT
5631 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5632 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5633 "\t The first one will disable tracing every time do_fault is hit\n"
5634 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5635 "\t The first time do trap is hit and it disables tracing, the\n"
5636 "\t counter will decrement to 2. If tracing is already disabled,\n"
5637 "\t the counter will not decrement. It only decrements when the\n"
5638 "\t trigger did work\n"
5639 "\t To remove trigger without count:\n"
5640 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5641 "\t To remove trigger with a count:\n"
5642 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5643 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5644 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5645 "\t modules: Can select a group via module command :mod:\n"
5646 "\t Does not accept triggers\n"
5647 #endif /* CONFIG_DYNAMIC_FTRACE */
5648 #ifdef CONFIG_FUNCTION_TRACER
5649 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5651 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5654 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5655 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5656 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5657 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5659 #ifdef CONFIG_TRACER_SNAPSHOT
5660 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5661 "\t\t\t snapshot buffer. Read the contents for more\n"
5662 "\t\t\t information\n"
5664 #ifdef CONFIG_STACK_TRACER
5665 " stack_trace\t\t- Shows the max stack trace when active\n"
5666 " stack_max_size\t- Shows current max stack size that was traced\n"
5667 "\t\t\t Write into this file to reset the max size (trigger a\n"
5668 "\t\t\t new trace)\n"
5669 #ifdef CONFIG_DYNAMIC_FTRACE
5670 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5673 #endif /* CONFIG_STACK_TRACER */
5674 #ifdef CONFIG_DYNAMIC_EVENTS
5675 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5676 "\t\t\t Write into this file to define/undefine new trace events.\n"
5678 #ifdef CONFIG_KPROBE_EVENTS
5679 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5680 "\t\t\t Write into this file to define/undefine new trace events.\n"
5682 #ifdef CONFIG_UPROBE_EVENTS
5683 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5684 "\t\t\t Write into this file to define/undefine new trace events.\n"
5686 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \
5687 defined(CONFIG_FPROBE_EVENTS)
5688 "\t accepts: event-definitions (one definition per line)\n"
5689 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5690 "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n"
5691 "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
5693 #ifdef CONFIG_FPROBE_EVENTS
5694 "\t f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
5695 "\t t[:[<group>/][<event>]] <tracepoint> [<args>]\n"
5697 #ifdef CONFIG_HIST_TRIGGERS
5698 "\t s:[synthetic/]<event> <field> [<field>]\n"
5700 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
5701 "\t -:[<group>/][<event>]\n"
5702 #ifdef CONFIG_KPROBE_EVENTS
5703 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5704 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5706 #ifdef CONFIG_UPROBE_EVENTS
5707 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5709 "\t args: <name>=fetcharg[:type]\n"
5710 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5711 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5712 #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS
5713 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5714 "\t <argname>[->field[->field|.field...]],\n"
5716 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5719 "\t $stack<index>, $stack, $retval, $comm,\n"
5721 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5722 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n"
5723 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5724 "\t symstr, <type>\\[<array-size>\\]\n"
5725 #ifdef CONFIG_HIST_TRIGGERS
5726 "\t field: <stype> <name>;\n"
5727 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5728 "\t [unsigned] char/int/long\n"
5730 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5731 "\t of the <attached-group>/<attached-event>.\n"
5733 " events/\t\t- Directory containing all trace event subsystems:\n"
5734 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5735 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5736 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5738 " filter\t\t- If set, only events passing filter are traced\n"
5739 " events/<system>/<event>/\t- Directory containing control files for\n"
5741 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5742 " filter\t\t- If set, only events passing filter are traced\n"
5743 " trigger\t\t- If set, a command to perform when event is hit\n"
5744 "\t Format: <trigger>[:count][if <filter>]\n"
5745 "\t trigger: traceon, traceoff\n"
5746 "\t enable_event:<system>:<event>\n"
5747 "\t disable_event:<system>:<event>\n"
5748 #ifdef CONFIG_HIST_TRIGGERS
5749 "\t enable_hist:<system>:<event>\n"
5750 "\t disable_hist:<system>:<event>\n"
5752 #ifdef CONFIG_STACKTRACE
5755 #ifdef CONFIG_TRACER_SNAPSHOT
5758 #ifdef CONFIG_HIST_TRIGGERS
5759 "\t\t hist (see below)\n"
5761 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5762 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5763 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5764 "\t events/block/block_unplug/trigger\n"
5765 "\t The first disables tracing every time block_unplug is hit.\n"
5766 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5767 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5768 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5769 "\t Like function triggers, the counter is only decremented if it\n"
5770 "\t enabled or disabled tracing.\n"
5771 "\t To remove a trigger without a count:\n"
5772 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5773 "\t To remove a trigger with a count:\n"
5774 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5775 "\t Filters can be ignored when removing a trigger.\n"
5776 #ifdef CONFIG_HIST_TRIGGERS
5777 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5778 "\t Format: hist:keys=<field1[,field2,...]>\n"
5779 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5780 "\t [:values=<field1[,field2,...]>]\n"
5781 "\t [:sort=<field1[,field2,...]>]\n"
5782 "\t [:size=#entries]\n"
5783 "\t [:pause][:continue][:clear]\n"
5784 "\t [:name=histname1]\n"
5785 "\t [:nohitcount]\n"
5786 "\t [:<handler>.<action>]\n"
5787 "\t [if <filter>]\n\n"
5788 "\t Note, special fields can be used as well:\n"
5789 "\t common_timestamp - to record current timestamp\n"
5790 "\t common_cpu - to record the CPU the event happened on\n"
5792 "\t A hist trigger variable can be:\n"
5793 "\t - a reference to a field e.g. x=current_timestamp,\n"
5794 "\t - a reference to another variable e.g. y=$x,\n"
5795 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5796 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5798 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5799 "\t multiplication(*) and division(/) operators. An operand can be either a\n"
5800 "\t variable reference, field or numeric literal.\n"
5802 "\t When a matching event is hit, an entry is added to a hash\n"
5803 "\t table using the key(s) and value(s) named, and the value of a\n"
5804 "\t sum called 'hitcount' is incremented. Keys and values\n"
5805 "\t correspond to fields in the event's format description. Keys\n"
5806 "\t can be any field, or the special string 'common_stacktrace'.\n"
5807 "\t Compound keys consisting of up to two fields can be specified\n"
5808 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5809 "\t fields. Sort keys consisting of up to two fields can be\n"
5810 "\t specified using the 'sort' keyword. The sort direction can\n"
5811 "\t be modified by appending '.descending' or '.ascending' to a\n"
5812 "\t sort field. The 'size' parameter can be used to specify more\n"
5813 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5814 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5815 "\t its histogram data will be shared with other triggers of the\n"
5816 "\t same name, and trigger hits will update this common data.\n\n"
5817 "\t Reading the 'hist' file for the event will dump the hash\n"
5818 "\t table in its entirety to stdout. If there are multiple hist\n"
5819 "\t triggers attached to an event, there will be a table for each\n"
5820 "\t trigger in the output. The table displayed for a named\n"
5821 "\t trigger will be the same as any other instance having the\n"
5822 "\t same name. The default format used to display a given field\n"
5823 "\t can be modified by appending any of the following modifiers\n"
5824 "\t to the field name, as applicable:\n\n"
5825 "\t .hex display a number as a hex value\n"
5826 "\t .sym display an address as a symbol\n"
5827 "\t .sym-offset display an address as a symbol and offset\n"
5828 "\t .execname display a common_pid as a program name\n"
5829 "\t .syscall display a syscall id as a syscall name\n"
5830 "\t .log2 display log2 value rather than raw number\n"
5831 "\t .buckets=size display values in groups of size rather than raw number\n"
5832 "\t .usecs display a common_timestamp in microseconds\n"
5833 "\t .percent display a number of percentage value\n"
5834 "\t .graph display a bar-graph of a value\n\n"
5835 "\t The 'pause' parameter can be used to pause an existing hist\n"
5836 "\t trigger or to start a hist trigger but not log any events\n"
5837 "\t until told to do so. 'continue' can be used to start or\n"
5838 "\t restart a paused hist trigger.\n\n"
5839 "\t The 'clear' parameter will clear the contents of a running\n"
5840 "\t hist trigger and leave its current paused/active state\n"
5842 "\t The 'nohitcount' (or NOHC) parameter will suppress display of\n"
5843 "\t raw hitcount in the histogram.\n\n"
5844 "\t The enable_hist and disable_hist triggers can be used to\n"
5845 "\t have one event conditionally start and stop another event's\n"
5846 "\t already-attached hist trigger. The syntax is analogous to\n"
5847 "\t the enable_event and disable_event triggers.\n\n"
5848 "\t Hist trigger handlers and actions are executed whenever a\n"
5849 "\t a histogram entry is added or updated. They take the form:\n\n"
5850 "\t <handler>.<action>\n\n"
5851 "\t The available handlers are:\n\n"
5852 "\t onmatch(matching.event) - invoke on addition or update\n"
5853 "\t onmax(var) - invoke if var exceeds current max\n"
5854 "\t onchange(var) - invoke action if var changes\n\n"
5855 "\t The available actions are:\n\n"
5856 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5857 "\t save(field,...) - save current event fields\n"
5858 #ifdef CONFIG_TRACER_SNAPSHOT
5859 "\t snapshot() - snapshot the trace buffer\n\n"
5861 #ifdef CONFIG_SYNTH_EVENTS
5862 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5863 "\t Write into this file to define/undefine new synthetic events.\n"
5864 "\t example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n"
5870 tracing_readme_read(struct file *filp, char __user *ubuf,
5871 size_t cnt, loff_t *ppos)
5873 return simple_read_from_buffer(ubuf, cnt, ppos,
5874 readme_msg, strlen(readme_msg));
5877 static const struct file_operations tracing_readme_fops = {
5878 .open = tracing_open_generic,
5879 .read = tracing_readme_read,
5880 .llseek = generic_file_llseek,
5883 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5887 return trace_find_tgid_ptr(pid);
5890 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5894 return trace_find_tgid_ptr(pid);
5897 static void saved_tgids_stop(struct seq_file *m, void *v)
5901 static int saved_tgids_show(struct seq_file *m, void *v)
5903 int *entry = (int *)v;
5904 int pid = entry - tgid_map;
5910 seq_printf(m, "%d %d\n", pid, tgid);
5914 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5915 .start = saved_tgids_start,
5916 .stop = saved_tgids_stop,
5917 .next = saved_tgids_next,
5918 .show = saved_tgids_show,
5921 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5925 ret = tracing_check_open_get_tr(NULL);
5929 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5933 static const struct file_operations tracing_saved_tgids_fops = {
5934 .open = tracing_saved_tgids_open,
5936 .llseek = seq_lseek,
5937 .release = seq_release,
5940 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5942 unsigned int *ptr = v;
5944 if (*pos || m->count)
5949 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5951 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5960 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5966 arch_spin_lock(&trace_cmdline_lock);
5968 v = &savedcmd->map_cmdline_to_pid[0];
5970 v = saved_cmdlines_next(m, v, &l);
5978 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5980 arch_spin_unlock(&trace_cmdline_lock);
5984 static int saved_cmdlines_show(struct seq_file *m, void *v)
5986 char buf[TASK_COMM_LEN];
5987 unsigned int *pid = v;
5989 __trace_find_cmdline(*pid, buf);
5990 seq_printf(m, "%d %s\n", *pid, buf);
5994 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5995 .start = saved_cmdlines_start,
5996 .next = saved_cmdlines_next,
5997 .stop = saved_cmdlines_stop,
5998 .show = saved_cmdlines_show,
6001 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
6005 ret = tracing_check_open_get_tr(NULL);
6009 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
6012 static const struct file_operations tracing_saved_cmdlines_fops = {
6013 .open = tracing_saved_cmdlines_open,
6015 .llseek = seq_lseek,
6016 .release = seq_release,
6020 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
6021 size_t cnt, loff_t *ppos)
6027 arch_spin_lock(&trace_cmdline_lock);
6028 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
6029 arch_spin_unlock(&trace_cmdline_lock);
6032 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6035 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
6037 kfree(s->saved_cmdlines);
6038 kfree(s->map_cmdline_to_pid);
6042 static int tracing_resize_saved_cmdlines(unsigned int val)
6044 struct saved_cmdlines_buffer *s, *savedcmd_temp;
6046 s = kmalloc(sizeof(*s), GFP_KERNEL);
6050 if (allocate_cmdlines_buffer(val, s) < 0) {
6056 arch_spin_lock(&trace_cmdline_lock);
6057 savedcmd_temp = savedcmd;
6059 arch_spin_unlock(&trace_cmdline_lock);
6061 free_saved_cmdlines_buffer(savedcmd_temp);
6067 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
6068 size_t cnt, loff_t *ppos)
6073 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6077 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
6078 if (!val || val > PID_MAX_DEFAULT)
6081 ret = tracing_resize_saved_cmdlines((unsigned int)val);
6090 static const struct file_operations tracing_saved_cmdlines_size_fops = {
6091 .open = tracing_open_generic,
6092 .read = tracing_saved_cmdlines_size_read,
6093 .write = tracing_saved_cmdlines_size_write,
6096 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
6097 static union trace_eval_map_item *
6098 update_eval_map(union trace_eval_map_item *ptr)
6100 if (!ptr->map.eval_string) {
6101 if (ptr->tail.next) {
6102 ptr = ptr->tail.next;
6103 /* Set ptr to the next real item (skip head) */
6111 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
6113 union trace_eval_map_item *ptr = v;
6116 * Paranoid! If ptr points to end, we don't want to increment past it.
6117 * This really should never happen.
6120 ptr = update_eval_map(ptr);
6121 if (WARN_ON_ONCE(!ptr))
6125 ptr = update_eval_map(ptr);
6130 static void *eval_map_start(struct seq_file *m, loff_t *pos)
6132 union trace_eval_map_item *v;
6135 mutex_lock(&trace_eval_mutex);
6137 v = trace_eval_maps;
6141 while (v && l < *pos) {
6142 v = eval_map_next(m, v, &l);
6148 static void eval_map_stop(struct seq_file *m, void *v)
6150 mutex_unlock(&trace_eval_mutex);
6153 static int eval_map_show(struct seq_file *m, void *v)
6155 union trace_eval_map_item *ptr = v;
6157 seq_printf(m, "%s %ld (%s)\n",
6158 ptr->map.eval_string, ptr->map.eval_value,
6164 static const struct seq_operations tracing_eval_map_seq_ops = {
6165 .start = eval_map_start,
6166 .next = eval_map_next,
6167 .stop = eval_map_stop,
6168 .show = eval_map_show,
6171 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
6175 ret = tracing_check_open_get_tr(NULL);
6179 return seq_open(filp, &tracing_eval_map_seq_ops);
6182 static const struct file_operations tracing_eval_map_fops = {
6183 .open = tracing_eval_map_open,
6185 .llseek = seq_lseek,
6186 .release = seq_release,
6189 static inline union trace_eval_map_item *
6190 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
6192 /* Return tail of array given the head */
6193 return ptr + ptr->head.length + 1;
6197 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
6200 struct trace_eval_map **stop;
6201 struct trace_eval_map **map;
6202 union trace_eval_map_item *map_array;
6203 union trace_eval_map_item *ptr;
6208 * The trace_eval_maps contains the map plus a head and tail item,
6209 * where the head holds the module and length of array, and the
6210 * tail holds a pointer to the next list.
6212 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6214 pr_warn("Unable to allocate trace eval mapping\n");
6218 mutex_lock(&trace_eval_mutex);
6220 if (!trace_eval_maps)
6221 trace_eval_maps = map_array;
6223 ptr = trace_eval_maps;
6225 ptr = trace_eval_jmp_to_tail(ptr);
6226 if (!ptr->tail.next)
6228 ptr = ptr->tail.next;
6231 ptr->tail.next = map_array;
6233 map_array->head.mod = mod;
6234 map_array->head.length = len;
6237 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6238 map_array->map = **map;
6241 memset(map_array, 0, sizeof(*map_array));
6243 mutex_unlock(&trace_eval_mutex);
6246 static void trace_create_eval_file(struct dentry *d_tracer)
6248 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
6249 NULL, &tracing_eval_map_fops);
6252 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
6253 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6254 static inline void trace_insert_eval_map_file(struct module *mod,
6255 struct trace_eval_map **start, int len) { }
6256 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6258 static void trace_insert_eval_map(struct module *mod,
6259 struct trace_eval_map **start, int len)
6261 struct trace_eval_map **map;
6268 trace_event_eval_update(map, len);
6270 trace_insert_eval_map_file(mod, start, len);
6274 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6275 size_t cnt, loff_t *ppos)
6277 struct trace_array *tr = filp->private_data;
6278 char buf[MAX_TRACER_SIZE+2];
6281 mutex_lock(&trace_types_lock);
6282 r = sprintf(buf, "%s\n", tr->current_trace->name);
6283 mutex_unlock(&trace_types_lock);
6285 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6288 int tracer_init(struct tracer *t, struct trace_array *tr)
6290 tracing_reset_online_cpus(&tr->array_buffer);
6294 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6298 for_each_tracing_cpu(cpu)
6299 per_cpu_ptr(buf->data, cpu)->entries = val;
6302 static void update_buffer_entries(struct array_buffer *buf, int cpu)
6304 if (cpu == RING_BUFFER_ALL_CPUS) {
6305 set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
6307 per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
6311 #ifdef CONFIG_TRACER_MAX_TRACE
6312 /* resize @tr's buffer to the size of @size_tr's entries */
6313 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6314 struct array_buffer *size_buf, int cpu_id)
6318 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6319 for_each_tracing_cpu(cpu) {
6320 ret = ring_buffer_resize(trace_buf->buffer,
6321 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6324 per_cpu_ptr(trace_buf->data, cpu)->entries =
6325 per_cpu_ptr(size_buf->data, cpu)->entries;
6328 ret = ring_buffer_resize(trace_buf->buffer,
6329 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6331 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6332 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6337 #endif /* CONFIG_TRACER_MAX_TRACE */
6339 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6340 unsigned long size, int cpu)
6345 * If kernel or user changes the size of the ring buffer
6346 * we use the size that was given, and we can forget about
6347 * expanding it later.
6349 ring_buffer_expanded = true;
6351 /* May be called before buffers are initialized */
6352 if (!tr->array_buffer.buffer)
6355 /* Do not allow tracing while resizing ring buffer */
6356 tracing_stop_tr(tr);
6358 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6362 #ifdef CONFIG_TRACER_MAX_TRACE
6363 if (!tr->allocated_snapshot)
6366 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6368 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6369 &tr->array_buffer, cpu);
6372 * AARGH! We are left with different
6373 * size max buffer!!!!
6374 * The max buffer is our "snapshot" buffer.
6375 * When a tracer needs a snapshot (one of the
6376 * latency tracers), it swaps the max buffer
6377 * with the saved snap shot. We succeeded to
6378 * update the size of the main buffer, but failed to
6379 * update the size of the max buffer. But when we tried
6380 * to reset the main buffer to the original size, we
6381 * failed there too. This is very unlikely to
6382 * happen, but if it does, warn and kill all
6386 tracing_disabled = 1;
6391 update_buffer_entries(&tr->max_buffer, cpu);
6394 #endif /* CONFIG_TRACER_MAX_TRACE */
6396 update_buffer_entries(&tr->array_buffer, cpu);
6398 tracing_start_tr(tr);
6402 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6403 unsigned long size, int cpu_id)
6407 mutex_lock(&trace_types_lock);
6409 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6410 /* make sure, this cpu is enabled in the mask */
6411 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6417 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6422 mutex_unlock(&trace_types_lock);
6429 * tracing_update_buffers - used by tracing facility to expand ring buffers
6431 * To save on memory when the tracing is never used on a system with it
6432 * configured in. The ring buffers are set to a minimum size. But once
6433 * a user starts to use the tracing facility, then they need to grow
6434 * to their default size.
6436 * This function is to be called when a tracer is about to be used.
6438 int tracing_update_buffers(void)
6442 mutex_lock(&trace_types_lock);
6443 if (!ring_buffer_expanded)
6444 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6445 RING_BUFFER_ALL_CPUS);
6446 mutex_unlock(&trace_types_lock);
6451 struct trace_option_dentry;
6454 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6457 * Used to clear out the tracer before deletion of an instance.
6458 * Must have trace_types_lock held.
6460 static void tracing_set_nop(struct trace_array *tr)
6462 if (tr->current_trace == &nop_trace)
6465 tr->current_trace->enabled--;
6467 if (tr->current_trace->reset)
6468 tr->current_trace->reset(tr);
6470 tr->current_trace = &nop_trace;
6473 static bool tracer_options_updated;
6475 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6477 /* Only enable if the directory has been created already. */
6481 /* Only create trace option files after update_tracer_options finish */
6482 if (!tracer_options_updated)
6485 create_trace_option_files(tr, t);
6488 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6491 #ifdef CONFIG_TRACER_MAX_TRACE
6496 mutex_lock(&trace_types_lock);
6498 if (!ring_buffer_expanded) {
6499 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6500 RING_BUFFER_ALL_CPUS);
6506 for (t = trace_types; t; t = t->next) {
6507 if (strcmp(t->name, buf) == 0)
6514 if (t == tr->current_trace)
6517 #ifdef CONFIG_TRACER_SNAPSHOT
6518 if (t->use_max_tr) {
6519 local_irq_disable();
6520 arch_spin_lock(&tr->max_lock);
6521 if (tr->cond_snapshot)
6523 arch_spin_unlock(&tr->max_lock);
6529 /* Some tracers won't work on kernel command line */
6530 if (system_state < SYSTEM_RUNNING && t->noboot) {
6531 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6536 /* Some tracers are only allowed for the top level buffer */
6537 if (!trace_ok_for_array(t, tr)) {
6542 /* If trace pipe files are being read, we can't change the tracer */
6543 if (tr->trace_ref) {
6548 trace_branch_disable();
6550 tr->current_trace->enabled--;
6552 if (tr->current_trace->reset)
6553 tr->current_trace->reset(tr);
6555 #ifdef CONFIG_TRACER_MAX_TRACE
6556 had_max_tr = tr->current_trace->use_max_tr;
6558 /* Current trace needs to be nop_trace before synchronize_rcu */
6559 tr->current_trace = &nop_trace;
6561 if (had_max_tr && !t->use_max_tr) {
6563 * We need to make sure that the update_max_tr sees that
6564 * current_trace changed to nop_trace to keep it from
6565 * swapping the buffers after we resize it.
6566 * The update_max_tr is called from interrupts disabled
6567 * so a synchronized_sched() is sufficient.
6573 if (t->use_max_tr && !tr->allocated_snapshot) {
6574 ret = tracing_alloc_snapshot_instance(tr);
6579 tr->current_trace = &nop_trace;
6583 ret = tracer_init(t, tr);
6588 tr->current_trace = t;
6589 tr->current_trace->enabled++;
6590 trace_branch_enable(tr);
6592 mutex_unlock(&trace_types_lock);
6598 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6599 size_t cnt, loff_t *ppos)
6601 struct trace_array *tr = filp->private_data;
6602 char buf[MAX_TRACER_SIZE+1];
6609 if (cnt > MAX_TRACER_SIZE)
6610 cnt = MAX_TRACER_SIZE;
6612 if (copy_from_user(buf, ubuf, cnt))
6619 err = tracing_set_tracer(tr, name);
6629 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6630 size_t cnt, loff_t *ppos)
6635 r = snprintf(buf, sizeof(buf), "%ld\n",
6636 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6637 if (r > sizeof(buf))
6639 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6643 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6644 size_t cnt, loff_t *ppos)
6649 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6659 tracing_thresh_read(struct file *filp, char __user *ubuf,
6660 size_t cnt, loff_t *ppos)
6662 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6666 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6667 size_t cnt, loff_t *ppos)
6669 struct trace_array *tr = filp->private_data;
6672 mutex_lock(&trace_types_lock);
6673 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6677 if (tr->current_trace->update_thresh) {
6678 ret = tr->current_trace->update_thresh(tr);
6685 mutex_unlock(&trace_types_lock);
6690 #ifdef CONFIG_TRACER_MAX_TRACE
6693 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6694 size_t cnt, loff_t *ppos)
6696 struct trace_array *tr = filp->private_data;
6698 return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
6702 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6703 size_t cnt, loff_t *ppos)
6705 struct trace_array *tr = filp->private_data;
6707 return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
6712 static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
6714 if (cpu == RING_BUFFER_ALL_CPUS) {
6715 if (cpumask_empty(tr->pipe_cpumask)) {
6716 cpumask_setall(tr->pipe_cpumask);
6719 } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
6720 cpumask_set_cpu(cpu, tr->pipe_cpumask);
6726 static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
6728 if (cpu == RING_BUFFER_ALL_CPUS) {
6729 WARN_ON(!cpumask_full(tr->pipe_cpumask));
6730 cpumask_clear(tr->pipe_cpumask);
6732 WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
6733 cpumask_clear_cpu(cpu, tr->pipe_cpumask);
6737 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6739 struct trace_array *tr = inode->i_private;
6740 struct trace_iterator *iter;
6744 ret = tracing_check_open_get_tr(tr);
6748 mutex_lock(&trace_types_lock);
6749 cpu = tracing_get_cpu(inode);
6750 ret = open_pipe_on_cpu(tr, cpu);
6752 goto fail_pipe_on_cpu;
6754 /* create a buffer to store the information to pass to userspace */
6755 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6758 goto fail_alloc_iter;
6761 trace_seq_init(&iter->seq);
6762 iter->trace = tr->current_trace;
6764 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6769 /* trace pipe does not show start of buffer */
6770 cpumask_setall(iter->started);
6772 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6773 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6775 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6776 if (trace_clocks[tr->clock_id].in_ns)
6777 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6780 iter->array_buffer = &tr->array_buffer;
6781 iter->cpu_file = cpu;
6782 mutex_init(&iter->mutex);
6783 filp->private_data = iter;
6785 if (iter->trace->pipe_open)
6786 iter->trace->pipe_open(iter);
6788 nonseekable_open(inode, filp);
6792 mutex_unlock(&trace_types_lock);
6798 close_pipe_on_cpu(tr, cpu);
6800 __trace_array_put(tr);
6801 mutex_unlock(&trace_types_lock);
6805 static int tracing_release_pipe(struct inode *inode, struct file *file)
6807 struct trace_iterator *iter = file->private_data;
6808 struct trace_array *tr = inode->i_private;
6810 mutex_lock(&trace_types_lock);
6814 if (iter->trace->pipe_close)
6815 iter->trace->pipe_close(iter);
6816 close_pipe_on_cpu(tr, iter->cpu_file);
6817 mutex_unlock(&trace_types_lock);
6819 free_trace_iter_content(iter);
6822 trace_array_put(tr);
6828 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6830 struct trace_array *tr = iter->tr;
6832 /* Iterators are static, they should be filled or empty */
6833 if (trace_buffer_iter(iter, iter->cpu_file))
6834 return EPOLLIN | EPOLLRDNORM;
6836 if (tr->trace_flags & TRACE_ITER_BLOCK)
6838 * Always select as readable when in blocking mode
6840 return EPOLLIN | EPOLLRDNORM;
6842 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6843 filp, poll_table, iter->tr->buffer_percent);
6847 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6849 struct trace_iterator *iter = filp->private_data;
6851 return trace_poll(iter, filp, poll_table);
6854 /* Must be called with iter->mutex held. */
6855 static int tracing_wait_pipe(struct file *filp)
6857 struct trace_iterator *iter = filp->private_data;
6860 while (trace_empty(iter)) {
6862 if ((filp->f_flags & O_NONBLOCK)) {
6867 * We block until we read something and tracing is disabled.
6868 * We still block if tracing is disabled, but we have never
6869 * read anything. This allows a user to cat this file, and
6870 * then enable tracing. But after we have read something,
6871 * we give an EOF when tracing is again disabled.
6873 * iter->pos will be 0 if we haven't read anything.
6875 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6878 mutex_unlock(&iter->mutex);
6880 ret = wait_on_pipe(iter, 0);
6882 mutex_lock(&iter->mutex);
6895 tracing_read_pipe(struct file *filp, char __user *ubuf,
6896 size_t cnt, loff_t *ppos)
6898 struct trace_iterator *iter = filp->private_data;
6902 * Avoid more than one consumer on a single file descriptor
6903 * This is just a matter of traces coherency, the ring buffer itself
6906 mutex_lock(&iter->mutex);
6908 /* return any leftover data */
6909 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6913 trace_seq_init(&iter->seq);
6915 if (iter->trace->read) {
6916 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6922 sret = tracing_wait_pipe(filp);
6926 /* stop when tracing is finished */
6927 if (trace_empty(iter)) {
6932 if (cnt >= PAGE_SIZE)
6933 cnt = PAGE_SIZE - 1;
6935 /* reset all but tr, trace, and overruns */
6936 trace_iterator_reset(iter);
6937 cpumask_clear(iter->started);
6938 trace_seq_init(&iter->seq);
6940 trace_event_read_lock();
6941 trace_access_lock(iter->cpu_file);
6942 while (trace_find_next_entry_inc(iter) != NULL) {
6943 enum print_line_t ret;
6944 int save_len = iter->seq.seq.len;
6946 ret = print_trace_line(iter);
6947 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6949 * If one print_trace_line() fills entire trace_seq in one shot,
6950 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6951 * In this case, we need to consume it, otherwise, loop will peek
6952 * this event next time, resulting in an infinite loop.
6954 if (save_len == 0) {
6956 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6957 trace_consume(iter);
6961 /* In other cases, don't print partial lines */
6962 iter->seq.seq.len = save_len;
6965 if (ret != TRACE_TYPE_NO_CONSUME)
6966 trace_consume(iter);
6968 if (trace_seq_used(&iter->seq) >= cnt)
6972 * Setting the full flag means we reached the trace_seq buffer
6973 * size and we should leave by partial output condition above.
6974 * One of the trace_seq_* functions is not used properly.
6976 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6979 trace_access_unlock(iter->cpu_file);
6980 trace_event_read_unlock();
6982 /* Now copy what we have to the user */
6983 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6984 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6985 trace_seq_init(&iter->seq);
6988 * If there was nothing to send to user, in spite of consuming trace
6989 * entries, go back to wait for more entries.
6995 mutex_unlock(&iter->mutex);
7000 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
7003 __free_page(spd->pages[idx]);
7007 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
7013 /* Seq buffer is page-sized, exactly what we need. */
7015 save_len = iter->seq.seq.len;
7016 ret = print_trace_line(iter);
7018 if (trace_seq_has_overflowed(&iter->seq)) {
7019 iter->seq.seq.len = save_len;
7024 * This should not be hit, because it should only
7025 * be set if the iter->seq overflowed. But check it
7026 * anyway to be safe.
7028 if (ret == TRACE_TYPE_PARTIAL_LINE) {
7029 iter->seq.seq.len = save_len;
7033 count = trace_seq_used(&iter->seq) - save_len;
7036 iter->seq.seq.len = save_len;
7040 if (ret != TRACE_TYPE_NO_CONSUME)
7041 trace_consume(iter);
7043 if (!trace_find_next_entry_inc(iter)) {
7053 static ssize_t tracing_splice_read_pipe(struct file *filp,
7055 struct pipe_inode_info *pipe,
7059 struct page *pages_def[PIPE_DEF_BUFFERS];
7060 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7061 struct trace_iterator *iter = filp->private_data;
7062 struct splice_pipe_desc spd = {
7064 .partial = partial_def,
7065 .nr_pages = 0, /* This gets updated below. */
7066 .nr_pages_max = PIPE_DEF_BUFFERS,
7067 .ops = &default_pipe_buf_ops,
7068 .spd_release = tracing_spd_release_pipe,
7074 if (splice_grow_spd(pipe, &spd))
7077 mutex_lock(&iter->mutex);
7079 if (iter->trace->splice_read) {
7080 ret = iter->trace->splice_read(iter, filp,
7081 ppos, pipe, len, flags);
7086 ret = tracing_wait_pipe(filp);
7090 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
7095 trace_event_read_lock();
7096 trace_access_lock(iter->cpu_file);
7098 /* Fill as many pages as possible. */
7099 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
7100 spd.pages[i] = alloc_page(GFP_KERNEL);
7104 rem = tracing_fill_pipe_page(rem, iter);
7106 /* Copy the data into the page, so we can start over. */
7107 ret = trace_seq_to_buffer(&iter->seq,
7108 page_address(spd.pages[i]),
7109 trace_seq_used(&iter->seq));
7111 __free_page(spd.pages[i]);
7114 spd.partial[i].offset = 0;
7115 spd.partial[i].len = trace_seq_used(&iter->seq);
7117 trace_seq_init(&iter->seq);
7120 trace_access_unlock(iter->cpu_file);
7121 trace_event_read_unlock();
7122 mutex_unlock(&iter->mutex);
7127 ret = splice_to_pipe(pipe, &spd);
7131 splice_shrink_spd(&spd);
7135 mutex_unlock(&iter->mutex);
7140 tracing_entries_read(struct file *filp, char __user *ubuf,
7141 size_t cnt, loff_t *ppos)
7143 struct inode *inode = file_inode(filp);
7144 struct trace_array *tr = inode->i_private;
7145 int cpu = tracing_get_cpu(inode);
7150 mutex_lock(&trace_types_lock);
7152 if (cpu == RING_BUFFER_ALL_CPUS) {
7153 int cpu, buf_size_same;
7158 /* check if all cpu sizes are same */
7159 for_each_tracing_cpu(cpu) {
7160 /* fill in the size from first enabled cpu */
7162 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
7163 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
7169 if (buf_size_same) {
7170 if (!ring_buffer_expanded)
7171 r = sprintf(buf, "%lu (expanded: %lu)\n",
7173 trace_buf_size >> 10);
7175 r = sprintf(buf, "%lu\n", size >> 10);
7177 r = sprintf(buf, "X\n");
7179 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
7181 mutex_unlock(&trace_types_lock);
7183 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7188 tracing_entries_write(struct file *filp, const char __user *ubuf,
7189 size_t cnt, loff_t *ppos)
7191 struct inode *inode = file_inode(filp);
7192 struct trace_array *tr = inode->i_private;
7196 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7200 /* must have at least 1 entry */
7204 /* value is in KB */
7206 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
7216 tracing_total_entries_read(struct file *filp, char __user *ubuf,
7217 size_t cnt, loff_t *ppos)
7219 struct trace_array *tr = filp->private_data;
7222 unsigned long size = 0, expanded_size = 0;
7224 mutex_lock(&trace_types_lock);
7225 for_each_tracing_cpu(cpu) {
7226 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7227 if (!ring_buffer_expanded)
7228 expanded_size += trace_buf_size >> 10;
7230 if (ring_buffer_expanded)
7231 r = sprintf(buf, "%lu\n", size);
7233 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
7234 mutex_unlock(&trace_types_lock);
7236 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7240 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7241 size_t cnt, loff_t *ppos)
7244 * There is no need to read what the user has written, this function
7245 * is just to make sure that there is no error when "echo" is used
7254 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7256 struct trace_array *tr = inode->i_private;
7258 /* disable tracing ? */
7259 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7260 tracer_tracing_off(tr);
7261 /* resize the ring buffer to 0 */
7262 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7264 trace_array_put(tr);
7270 tracing_mark_write(struct file *filp, const char __user *ubuf,
7271 size_t cnt, loff_t *fpos)
7273 struct trace_array *tr = filp->private_data;
7274 struct ring_buffer_event *event;
7275 enum event_trigger_type tt = ETT_NONE;
7276 struct trace_buffer *buffer;
7277 struct print_entry *entry;
7282 /* Used in tracing_mark_raw_write() as well */
7283 #define FAULTED_STR "<faulted>"
7284 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7286 if (tracing_disabled)
7289 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7292 if (cnt > TRACE_BUF_SIZE)
7293 cnt = TRACE_BUF_SIZE;
7295 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7297 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7299 /* If less than "<faulted>", then make sure we can still add that */
7300 if (cnt < FAULTED_SIZE)
7301 size += FAULTED_SIZE - cnt;
7303 buffer = tr->array_buffer.buffer;
7304 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7306 if (unlikely(!event))
7307 /* Ring buffer disabled, return as if not open for write */
7310 entry = ring_buffer_event_data(event);
7311 entry->ip = _THIS_IP_;
7313 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7315 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7321 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7322 /* do not add \n before testing triggers, but add \0 */
7323 entry->buf[cnt] = '\0';
7324 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7327 if (entry->buf[cnt - 1] != '\n') {
7328 entry->buf[cnt] = '\n';
7329 entry->buf[cnt + 1] = '\0';
7331 entry->buf[cnt] = '\0';
7333 if (static_branch_unlikely(&trace_marker_exports_enabled))
7334 ftrace_exports(event, TRACE_EXPORT_MARKER);
7335 __buffer_unlock_commit(buffer, event);
7338 event_triggers_post_call(tr->trace_marker_file, tt);
7343 /* Limit it for now to 3K (including tag) */
7344 #define RAW_DATA_MAX_SIZE (1024*3)
7347 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7348 size_t cnt, loff_t *fpos)
7350 struct trace_array *tr = filp->private_data;
7351 struct ring_buffer_event *event;
7352 struct trace_buffer *buffer;
7353 struct raw_data_entry *entry;
7358 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7360 if (tracing_disabled)
7363 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7366 /* The marker must at least have a tag id */
7367 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7370 if (cnt > TRACE_BUF_SIZE)
7371 cnt = TRACE_BUF_SIZE;
7373 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7375 size = sizeof(*entry) + cnt;
7376 if (cnt < FAULT_SIZE_ID)
7377 size += FAULT_SIZE_ID - cnt;
7379 buffer = tr->array_buffer.buffer;
7380 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7383 /* Ring buffer disabled, return as if not open for write */
7386 entry = ring_buffer_event_data(event);
7388 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7391 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7396 __buffer_unlock_commit(buffer, event);
7401 static int tracing_clock_show(struct seq_file *m, void *v)
7403 struct trace_array *tr = m->private;
7406 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7408 "%s%s%s%s", i ? " " : "",
7409 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7410 i == tr->clock_id ? "]" : "");
7416 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7420 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7421 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7424 if (i == ARRAY_SIZE(trace_clocks))
7427 mutex_lock(&trace_types_lock);
7431 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7434 * New clock may not be consistent with the previous clock.
7435 * Reset the buffer so that it doesn't have incomparable timestamps.
7437 tracing_reset_online_cpus(&tr->array_buffer);
7439 #ifdef CONFIG_TRACER_MAX_TRACE
7440 if (tr->max_buffer.buffer)
7441 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7442 tracing_reset_online_cpus(&tr->max_buffer);
7445 mutex_unlock(&trace_types_lock);
7450 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7451 size_t cnt, loff_t *fpos)
7453 struct seq_file *m = filp->private_data;
7454 struct trace_array *tr = m->private;
7456 const char *clockstr;
7459 if (cnt >= sizeof(buf))
7462 if (copy_from_user(buf, ubuf, cnt))
7467 clockstr = strstrip(buf);
7469 ret = tracing_set_clock(tr, clockstr);
7478 static int tracing_clock_open(struct inode *inode, struct file *file)
7480 struct trace_array *tr = inode->i_private;
7483 ret = tracing_check_open_get_tr(tr);
7487 ret = single_open(file, tracing_clock_show, inode->i_private);
7489 trace_array_put(tr);
7494 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7496 struct trace_array *tr = m->private;
7498 mutex_lock(&trace_types_lock);
7500 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7501 seq_puts(m, "delta [absolute]\n");
7503 seq_puts(m, "[delta] absolute\n");
7505 mutex_unlock(&trace_types_lock);
7510 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7512 struct trace_array *tr = inode->i_private;
7515 ret = tracing_check_open_get_tr(tr);
7519 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7521 trace_array_put(tr);
7526 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7528 if (rbe == this_cpu_read(trace_buffered_event))
7529 return ring_buffer_time_stamp(buffer);
7531 return ring_buffer_event_time_stamp(buffer, rbe);
7535 * Set or disable using the per CPU trace_buffer_event when possible.
7537 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7541 mutex_lock(&trace_types_lock);
7543 if (set && tr->no_filter_buffering_ref++)
7547 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7552 --tr->no_filter_buffering_ref;
7555 mutex_unlock(&trace_types_lock);
7560 struct ftrace_buffer_info {
7561 struct trace_iterator iter;
7563 unsigned int spare_cpu;
7567 #ifdef CONFIG_TRACER_SNAPSHOT
7568 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7570 struct trace_array *tr = inode->i_private;
7571 struct trace_iterator *iter;
7575 ret = tracing_check_open_get_tr(tr);
7579 if (file->f_mode & FMODE_READ) {
7580 iter = __tracing_open(inode, file, true);
7582 ret = PTR_ERR(iter);
7584 /* Writes still need the seq_file to hold the private data */
7586 m = kzalloc(sizeof(*m), GFP_KERNEL);
7589 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7597 iter->array_buffer = &tr->max_buffer;
7598 iter->cpu_file = tracing_get_cpu(inode);
7600 file->private_data = m;
7604 trace_array_put(tr);
7609 static void tracing_swap_cpu_buffer(void *tr)
7611 update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
7615 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7618 struct seq_file *m = filp->private_data;
7619 struct trace_iterator *iter = m->private;
7620 struct trace_array *tr = iter->tr;
7624 ret = tracing_update_buffers();
7628 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7632 mutex_lock(&trace_types_lock);
7634 if (tr->current_trace->use_max_tr) {
7639 local_irq_disable();
7640 arch_spin_lock(&tr->max_lock);
7641 if (tr->cond_snapshot)
7643 arch_spin_unlock(&tr->max_lock);
7650 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7654 if (tr->allocated_snapshot)
7658 /* Only allow per-cpu swap if the ring buffer supports it */
7659 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7660 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7665 if (tr->allocated_snapshot)
7666 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7667 &tr->array_buffer, iter->cpu_file);
7669 ret = tracing_alloc_snapshot_instance(tr);
7672 /* Now, we're going to swap */
7673 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
7674 local_irq_disable();
7675 update_max_tr(tr, current, smp_processor_id(), NULL);
7678 smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
7683 if (tr->allocated_snapshot) {
7684 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7685 tracing_reset_online_cpus(&tr->max_buffer);
7687 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7697 mutex_unlock(&trace_types_lock);
7701 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7703 struct seq_file *m = file->private_data;
7706 ret = tracing_release(inode, file);
7708 if (file->f_mode & FMODE_READ)
7711 /* If write only, the seq_file is just a stub */
7719 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7720 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7721 size_t count, loff_t *ppos);
7722 static int tracing_buffers_release(struct inode *inode, struct file *file);
7723 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7724 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7726 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7728 struct ftrace_buffer_info *info;
7731 /* The following checks for tracefs lockdown */
7732 ret = tracing_buffers_open(inode, filp);
7736 info = filp->private_data;
7738 if (info->iter.trace->use_max_tr) {
7739 tracing_buffers_release(inode, filp);
7743 info->iter.snapshot = true;
7744 info->iter.array_buffer = &info->iter.tr->max_buffer;
7749 #endif /* CONFIG_TRACER_SNAPSHOT */
7752 static const struct file_operations tracing_thresh_fops = {
7753 .open = tracing_open_generic,
7754 .read = tracing_thresh_read,
7755 .write = tracing_thresh_write,
7756 .llseek = generic_file_llseek,
7759 #ifdef CONFIG_TRACER_MAX_TRACE
7760 static const struct file_operations tracing_max_lat_fops = {
7761 .open = tracing_open_generic_tr,
7762 .read = tracing_max_lat_read,
7763 .write = tracing_max_lat_write,
7764 .llseek = generic_file_llseek,
7765 .release = tracing_release_generic_tr,
7769 static const struct file_operations set_tracer_fops = {
7770 .open = tracing_open_generic_tr,
7771 .read = tracing_set_trace_read,
7772 .write = tracing_set_trace_write,
7773 .llseek = generic_file_llseek,
7774 .release = tracing_release_generic_tr,
7777 static const struct file_operations tracing_pipe_fops = {
7778 .open = tracing_open_pipe,
7779 .poll = tracing_poll_pipe,
7780 .read = tracing_read_pipe,
7781 .splice_read = tracing_splice_read_pipe,
7782 .release = tracing_release_pipe,
7783 .llseek = no_llseek,
7786 static const struct file_operations tracing_entries_fops = {
7787 .open = tracing_open_generic_tr,
7788 .read = tracing_entries_read,
7789 .write = tracing_entries_write,
7790 .llseek = generic_file_llseek,
7791 .release = tracing_release_generic_tr,
7794 static const struct file_operations tracing_total_entries_fops = {
7795 .open = tracing_open_generic_tr,
7796 .read = tracing_total_entries_read,
7797 .llseek = generic_file_llseek,
7798 .release = tracing_release_generic_tr,
7801 static const struct file_operations tracing_free_buffer_fops = {
7802 .open = tracing_open_generic_tr,
7803 .write = tracing_free_buffer_write,
7804 .release = tracing_free_buffer_release,
7807 static const struct file_operations tracing_mark_fops = {
7808 .open = tracing_mark_open,
7809 .write = tracing_mark_write,
7810 .release = tracing_release_generic_tr,
7813 static const struct file_operations tracing_mark_raw_fops = {
7814 .open = tracing_mark_open,
7815 .write = tracing_mark_raw_write,
7816 .release = tracing_release_generic_tr,
7819 static const struct file_operations trace_clock_fops = {
7820 .open = tracing_clock_open,
7822 .llseek = seq_lseek,
7823 .release = tracing_single_release_tr,
7824 .write = tracing_clock_write,
7827 static const struct file_operations trace_time_stamp_mode_fops = {
7828 .open = tracing_time_stamp_mode_open,
7830 .llseek = seq_lseek,
7831 .release = tracing_single_release_tr,
7834 #ifdef CONFIG_TRACER_SNAPSHOT
7835 static const struct file_operations snapshot_fops = {
7836 .open = tracing_snapshot_open,
7838 .write = tracing_snapshot_write,
7839 .llseek = tracing_lseek,
7840 .release = tracing_snapshot_release,
7843 static const struct file_operations snapshot_raw_fops = {
7844 .open = snapshot_raw_open,
7845 .read = tracing_buffers_read,
7846 .release = tracing_buffers_release,
7847 .splice_read = tracing_buffers_splice_read,
7848 .llseek = no_llseek,
7851 #endif /* CONFIG_TRACER_SNAPSHOT */
7854 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7855 * @filp: The active open file structure
7856 * @ubuf: The userspace provided buffer to read value into
7857 * @cnt: The maximum number of bytes to read
7858 * @ppos: The current "file" position
7860 * This function implements the write interface for a struct trace_min_max_param.
7861 * The filp->private_data must point to a trace_min_max_param structure that
7862 * defines where to write the value, the min and the max acceptable values,
7863 * and a lock to protect the write.
7866 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7868 struct trace_min_max_param *param = filp->private_data;
7875 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7880 mutex_lock(param->lock);
7882 if (param->min && val < *param->min)
7885 if (param->max && val > *param->max)
7892 mutex_unlock(param->lock);
7901 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7902 * @filp: The active open file structure
7903 * @ubuf: The userspace provided buffer to read value into
7904 * @cnt: The maximum number of bytes to read
7905 * @ppos: The current "file" position
7907 * This function implements the read interface for a struct trace_min_max_param.
7908 * The filp->private_data must point to a trace_min_max_param struct with valid
7912 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7914 struct trace_min_max_param *param = filp->private_data;
7915 char buf[U64_STR_SIZE];
7924 if (cnt > sizeof(buf))
7927 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7929 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7932 const struct file_operations trace_min_max_fops = {
7933 .open = tracing_open_generic,
7934 .read = trace_min_max_read,
7935 .write = trace_min_max_write,
7938 #define TRACING_LOG_ERRS_MAX 8
7939 #define TRACING_LOG_LOC_MAX 128
7941 #define CMD_PREFIX " Command: "
7944 const char **errs; /* ptr to loc-specific array of err strings */
7945 u8 type; /* index into errs -> specific err string */
7946 u16 pos; /* caret position */
7950 struct tracing_log_err {
7951 struct list_head list;
7952 struct err_info info;
7953 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7954 char *cmd; /* what caused err */
7957 static DEFINE_MUTEX(tracing_err_log_lock);
7959 static struct tracing_log_err *alloc_tracing_log_err(int len)
7961 struct tracing_log_err *err;
7963 err = kzalloc(sizeof(*err), GFP_KERNEL);
7965 return ERR_PTR(-ENOMEM);
7967 err->cmd = kzalloc(len, GFP_KERNEL);
7970 return ERR_PTR(-ENOMEM);
7976 static void free_tracing_log_err(struct tracing_log_err *err)
7982 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
7985 struct tracing_log_err *err;
7988 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7989 err = alloc_tracing_log_err(len);
7990 if (PTR_ERR(err) != -ENOMEM)
7991 tr->n_err_log_entries++;
7995 cmd = kzalloc(len, GFP_KERNEL);
7997 return ERR_PTR(-ENOMEM);
7998 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
8001 list_del(&err->list);
8007 * err_pos - find the position of a string within a command for error careting
8008 * @cmd: The tracing command that caused the error
8009 * @str: The string to position the caret at within @cmd
8011 * Finds the position of the first occurrence of @str within @cmd. The
8012 * return value can be passed to tracing_log_err() for caret placement
8015 * Returns the index within @cmd of the first occurrence of @str or 0
8016 * if @str was not found.
8018 unsigned int err_pos(char *cmd, const char *str)
8022 if (WARN_ON(!strlen(cmd)))
8025 found = strstr(cmd, str);
8033 * tracing_log_err - write an error to the tracing error log
8034 * @tr: The associated trace array for the error (NULL for top level array)
8035 * @loc: A string describing where the error occurred
8036 * @cmd: The tracing command that caused the error
8037 * @errs: The array of loc-specific static error strings
8038 * @type: The index into errs[], which produces the specific static err string
8039 * @pos: The position the caret should be placed in the cmd
8041 * Writes an error into tracing/error_log of the form:
8043 * <loc>: error: <text>
8047 * tracing/error_log is a small log file containing the last
8048 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
8049 * unless there has been a tracing error, and the error log can be
8050 * cleared and have its memory freed by writing the empty string in
8051 * truncation mode to it i.e. echo > tracing/error_log.
8053 * NOTE: the @errs array along with the @type param are used to
8054 * produce a static error string - this string is not copied and saved
8055 * when the error is logged - only a pointer to it is saved. See
8056 * existing callers for examples of how static strings are typically
8057 * defined for use with tracing_log_err().
8059 void tracing_log_err(struct trace_array *tr,
8060 const char *loc, const char *cmd,
8061 const char **errs, u8 type, u16 pos)
8063 struct tracing_log_err *err;
8069 len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
8071 mutex_lock(&tracing_err_log_lock);
8072 err = get_tracing_log_err(tr, len);
8073 if (PTR_ERR(err) == -ENOMEM) {
8074 mutex_unlock(&tracing_err_log_lock);
8078 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
8079 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
8081 err->info.errs = errs;
8082 err->info.type = type;
8083 err->info.pos = pos;
8084 err->info.ts = local_clock();
8086 list_add_tail(&err->list, &tr->err_log);
8087 mutex_unlock(&tracing_err_log_lock);
8090 static void clear_tracing_err_log(struct trace_array *tr)
8092 struct tracing_log_err *err, *next;
8094 mutex_lock(&tracing_err_log_lock);
8095 list_for_each_entry_safe(err, next, &tr->err_log, list) {
8096 list_del(&err->list);
8097 free_tracing_log_err(err);
8100 tr->n_err_log_entries = 0;
8101 mutex_unlock(&tracing_err_log_lock);
8104 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
8106 struct trace_array *tr = m->private;
8108 mutex_lock(&tracing_err_log_lock);
8110 return seq_list_start(&tr->err_log, *pos);
8113 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
8115 struct trace_array *tr = m->private;
8117 return seq_list_next(v, &tr->err_log, pos);
8120 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
8122 mutex_unlock(&tracing_err_log_lock);
8125 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
8129 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
8131 for (i = 0; i < pos; i++)
8136 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
8138 struct tracing_log_err *err = v;
8141 const char *err_text = err->info.errs[err->info.type];
8142 u64 sec = err->info.ts;
8145 nsec = do_div(sec, NSEC_PER_SEC);
8146 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
8147 err->loc, err_text);
8148 seq_printf(m, "%s", err->cmd);
8149 tracing_err_log_show_pos(m, err->info.pos);
8155 static const struct seq_operations tracing_err_log_seq_ops = {
8156 .start = tracing_err_log_seq_start,
8157 .next = tracing_err_log_seq_next,
8158 .stop = tracing_err_log_seq_stop,
8159 .show = tracing_err_log_seq_show
8162 static int tracing_err_log_open(struct inode *inode, struct file *file)
8164 struct trace_array *tr = inode->i_private;
8167 ret = tracing_check_open_get_tr(tr);
8171 /* If this file was opened for write, then erase contents */
8172 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
8173 clear_tracing_err_log(tr);
8175 if (file->f_mode & FMODE_READ) {
8176 ret = seq_open(file, &tracing_err_log_seq_ops);
8178 struct seq_file *m = file->private_data;
8181 trace_array_put(tr);
8187 static ssize_t tracing_err_log_write(struct file *file,
8188 const char __user *buffer,
8189 size_t count, loff_t *ppos)
8194 static int tracing_err_log_release(struct inode *inode, struct file *file)
8196 struct trace_array *tr = inode->i_private;
8198 trace_array_put(tr);
8200 if (file->f_mode & FMODE_READ)
8201 seq_release(inode, file);
8206 static const struct file_operations tracing_err_log_fops = {
8207 .open = tracing_err_log_open,
8208 .write = tracing_err_log_write,
8210 .llseek = tracing_lseek,
8211 .release = tracing_err_log_release,
8214 static int tracing_buffers_open(struct inode *inode, struct file *filp)
8216 struct trace_array *tr = inode->i_private;
8217 struct ftrace_buffer_info *info;
8220 ret = tracing_check_open_get_tr(tr);
8224 info = kvzalloc(sizeof(*info), GFP_KERNEL);
8226 trace_array_put(tr);
8230 mutex_lock(&trace_types_lock);
8233 info->iter.cpu_file = tracing_get_cpu(inode);
8234 info->iter.trace = tr->current_trace;
8235 info->iter.array_buffer = &tr->array_buffer;
8237 /* Force reading ring buffer for first read */
8238 info->read = (unsigned int)-1;
8240 filp->private_data = info;
8244 mutex_unlock(&trace_types_lock);
8246 ret = nonseekable_open(inode, filp);
8248 trace_array_put(tr);
8254 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8256 struct ftrace_buffer_info *info = filp->private_data;
8257 struct trace_iterator *iter = &info->iter;
8259 return trace_poll(iter, filp, poll_table);
8263 tracing_buffers_read(struct file *filp, char __user *ubuf,
8264 size_t count, loff_t *ppos)
8266 struct ftrace_buffer_info *info = filp->private_data;
8267 struct trace_iterator *iter = &info->iter;
8274 #ifdef CONFIG_TRACER_MAX_TRACE
8275 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8280 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8282 if (IS_ERR(info->spare)) {
8283 ret = PTR_ERR(info->spare);
8286 info->spare_cpu = iter->cpu_file;
8292 /* Do we have previous read data to read? */
8293 if (info->read < PAGE_SIZE)
8297 trace_access_lock(iter->cpu_file);
8298 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8302 trace_access_unlock(iter->cpu_file);
8305 if (trace_empty(iter)) {
8306 if ((filp->f_flags & O_NONBLOCK))
8309 ret = wait_on_pipe(iter, 0);
8320 size = PAGE_SIZE - info->read;
8324 ret = copy_to_user(ubuf, info->spare + info->read, size);
8336 static int tracing_buffers_release(struct inode *inode, struct file *file)
8338 struct ftrace_buffer_info *info = file->private_data;
8339 struct trace_iterator *iter = &info->iter;
8341 mutex_lock(&trace_types_lock);
8343 iter->tr->trace_ref--;
8345 __trace_array_put(iter->tr);
8348 /* Make sure the waiters see the new wait_index */
8351 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8354 ring_buffer_free_read_page(iter->array_buffer->buffer,
8355 info->spare_cpu, info->spare);
8358 mutex_unlock(&trace_types_lock);
8364 struct trace_buffer *buffer;
8367 refcount_t refcount;
8370 static void buffer_ref_release(struct buffer_ref *ref)
8372 if (!refcount_dec_and_test(&ref->refcount))
8374 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8378 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8379 struct pipe_buffer *buf)
8381 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8383 buffer_ref_release(ref);
8387 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8388 struct pipe_buffer *buf)
8390 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8392 if (refcount_read(&ref->refcount) > INT_MAX/2)
8395 refcount_inc(&ref->refcount);
8399 /* Pipe buffer operations for a buffer. */
8400 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8401 .release = buffer_pipe_buf_release,
8402 .get = buffer_pipe_buf_get,
8406 * Callback from splice_to_pipe(), if we need to release some pages
8407 * at the end of the spd in case we error'ed out in filling the pipe.
8409 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8411 struct buffer_ref *ref =
8412 (struct buffer_ref *)spd->partial[i].private;
8414 buffer_ref_release(ref);
8415 spd->partial[i].private = 0;
8419 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8420 struct pipe_inode_info *pipe, size_t len,
8423 struct ftrace_buffer_info *info = file->private_data;
8424 struct trace_iterator *iter = &info->iter;
8425 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8426 struct page *pages_def[PIPE_DEF_BUFFERS];
8427 struct splice_pipe_desc spd = {
8429 .partial = partial_def,
8430 .nr_pages_max = PIPE_DEF_BUFFERS,
8431 .ops = &buffer_pipe_buf_ops,
8432 .spd_release = buffer_spd_release,
8434 struct buffer_ref *ref;
8438 #ifdef CONFIG_TRACER_MAX_TRACE
8439 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8443 if (*ppos & (PAGE_SIZE - 1))
8446 if (len & (PAGE_SIZE - 1)) {
8447 if (len < PAGE_SIZE)
8452 if (splice_grow_spd(pipe, &spd))
8456 trace_access_lock(iter->cpu_file);
8457 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8459 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8463 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8469 refcount_set(&ref->refcount, 1);
8470 ref->buffer = iter->array_buffer->buffer;
8471 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8472 if (IS_ERR(ref->page)) {
8473 ret = PTR_ERR(ref->page);
8478 ref->cpu = iter->cpu_file;
8480 r = ring_buffer_read_page(ref->buffer, &ref->page,
8481 len, iter->cpu_file, 1);
8483 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8489 page = virt_to_page(ref->page);
8491 spd.pages[i] = page;
8492 spd.partial[i].len = PAGE_SIZE;
8493 spd.partial[i].offset = 0;
8494 spd.partial[i].private = (unsigned long)ref;
8498 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8501 trace_access_unlock(iter->cpu_file);
8504 /* did we read anything? */
8505 if (!spd.nr_pages) {
8512 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8515 wait_index = READ_ONCE(iter->wait_index);
8517 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8521 /* No need to wait after waking up when tracing is off */
8522 if (!tracer_tracing_is_on(iter->tr))
8525 /* Make sure we see the new wait_index */
8527 if (wait_index != iter->wait_index)
8533 ret = splice_to_pipe(pipe, &spd);
8535 splice_shrink_spd(&spd);
8540 /* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */
8541 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8543 struct ftrace_buffer_info *info = file->private_data;
8544 struct trace_iterator *iter = &info->iter;
8547 return -ENOIOCTLCMD;
8549 mutex_lock(&trace_types_lock);
8552 /* Make sure the waiters see the new wait_index */
8555 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8557 mutex_unlock(&trace_types_lock);
8561 static const struct file_operations tracing_buffers_fops = {
8562 .open = tracing_buffers_open,
8563 .read = tracing_buffers_read,
8564 .poll = tracing_buffers_poll,
8565 .release = tracing_buffers_release,
8566 .splice_read = tracing_buffers_splice_read,
8567 .unlocked_ioctl = tracing_buffers_ioctl,
8568 .llseek = no_llseek,
8572 tracing_stats_read(struct file *filp, char __user *ubuf,
8573 size_t count, loff_t *ppos)
8575 struct inode *inode = file_inode(filp);
8576 struct trace_array *tr = inode->i_private;
8577 struct array_buffer *trace_buf = &tr->array_buffer;
8578 int cpu = tracing_get_cpu(inode);
8579 struct trace_seq *s;
8581 unsigned long long t;
8582 unsigned long usec_rem;
8584 s = kmalloc(sizeof(*s), GFP_KERNEL);
8590 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8591 trace_seq_printf(s, "entries: %ld\n", cnt);
8593 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8594 trace_seq_printf(s, "overrun: %ld\n", cnt);
8596 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8597 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8599 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8600 trace_seq_printf(s, "bytes: %ld\n", cnt);
8602 if (trace_clocks[tr->clock_id].in_ns) {
8603 /* local or global for trace_clock */
8604 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8605 usec_rem = do_div(t, USEC_PER_SEC);
8606 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8609 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8610 usec_rem = do_div(t, USEC_PER_SEC);
8611 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8613 /* counter or tsc mode for trace_clock */
8614 trace_seq_printf(s, "oldest event ts: %llu\n",
8615 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8617 trace_seq_printf(s, "now ts: %llu\n",
8618 ring_buffer_time_stamp(trace_buf->buffer));
8621 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8622 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8624 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8625 trace_seq_printf(s, "read events: %ld\n", cnt);
8627 count = simple_read_from_buffer(ubuf, count, ppos,
8628 s->buffer, trace_seq_used(s));
8635 static const struct file_operations tracing_stats_fops = {
8636 .open = tracing_open_generic_tr,
8637 .read = tracing_stats_read,
8638 .llseek = generic_file_llseek,
8639 .release = tracing_release_generic_tr,
8642 #ifdef CONFIG_DYNAMIC_FTRACE
8645 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8646 size_t cnt, loff_t *ppos)
8652 /* 256 should be plenty to hold the amount needed */
8653 buf = kmalloc(256, GFP_KERNEL);
8657 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8658 ftrace_update_tot_cnt,
8659 ftrace_number_of_pages,
8660 ftrace_number_of_groups);
8662 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8667 static const struct file_operations tracing_dyn_info_fops = {
8668 .open = tracing_open_generic,
8669 .read = tracing_read_dyn_info,
8670 .llseek = generic_file_llseek,
8672 #endif /* CONFIG_DYNAMIC_FTRACE */
8674 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8676 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8677 struct trace_array *tr, struct ftrace_probe_ops *ops,
8680 tracing_snapshot_instance(tr);
8684 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8685 struct trace_array *tr, struct ftrace_probe_ops *ops,
8688 struct ftrace_func_mapper *mapper = data;
8692 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8702 tracing_snapshot_instance(tr);
8706 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8707 struct ftrace_probe_ops *ops, void *data)
8709 struct ftrace_func_mapper *mapper = data;
8712 seq_printf(m, "%ps:", (void *)ip);
8714 seq_puts(m, "snapshot");
8717 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8720 seq_printf(m, ":count=%ld\n", *count);
8722 seq_puts(m, ":unlimited\n");
8728 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8729 unsigned long ip, void *init_data, void **data)
8731 struct ftrace_func_mapper *mapper = *data;
8734 mapper = allocate_ftrace_func_mapper();
8740 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8744 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8745 unsigned long ip, void *data)
8747 struct ftrace_func_mapper *mapper = data;
8752 free_ftrace_func_mapper(mapper, NULL);
8756 ftrace_func_mapper_remove_ip(mapper, ip);
8759 static struct ftrace_probe_ops snapshot_probe_ops = {
8760 .func = ftrace_snapshot,
8761 .print = ftrace_snapshot_print,
8764 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8765 .func = ftrace_count_snapshot,
8766 .print = ftrace_snapshot_print,
8767 .init = ftrace_snapshot_init,
8768 .free = ftrace_snapshot_free,
8772 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8773 char *glob, char *cmd, char *param, int enable)
8775 struct ftrace_probe_ops *ops;
8776 void *count = (void *)-1;
8783 /* hash funcs only work with set_ftrace_filter */
8787 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8790 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8795 number = strsep(¶m, ":");
8797 if (!strlen(number))
8801 * We use the callback data field (which is a pointer)
8804 ret = kstrtoul(number, 0, (unsigned long *)&count);
8809 ret = tracing_alloc_snapshot_instance(tr);
8813 ret = register_ftrace_function_probe(glob, tr, ops, count);
8816 return ret < 0 ? ret : 0;
8819 static struct ftrace_func_command ftrace_snapshot_cmd = {
8821 .func = ftrace_trace_snapshot_callback,
8824 static __init int register_snapshot_cmd(void)
8826 return register_ftrace_command(&ftrace_snapshot_cmd);
8829 static inline __init int register_snapshot_cmd(void) { return 0; }
8830 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8832 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8834 if (WARN_ON(!tr->dir))
8835 return ERR_PTR(-ENODEV);
8837 /* Top directory uses NULL as the parent */
8838 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8841 /* All sub buffers have a descriptor */
8845 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8847 struct dentry *d_tracer;
8850 return tr->percpu_dir;
8852 d_tracer = tracing_get_dentry(tr);
8853 if (IS_ERR(d_tracer))
8856 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8858 MEM_FAIL(!tr->percpu_dir,
8859 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8861 return tr->percpu_dir;
8864 static struct dentry *
8865 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8866 void *data, long cpu, const struct file_operations *fops)
8868 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8870 if (ret) /* See tracing_get_cpu() */
8871 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8876 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8878 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8879 struct dentry *d_cpu;
8880 char cpu_dir[30]; /* 30 characters should be more than enough */
8885 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8886 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8888 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8892 /* per cpu trace_pipe */
8893 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8894 tr, cpu, &tracing_pipe_fops);
8897 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8898 tr, cpu, &tracing_fops);
8900 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8901 tr, cpu, &tracing_buffers_fops);
8903 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8904 tr, cpu, &tracing_stats_fops);
8906 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8907 tr, cpu, &tracing_entries_fops);
8909 #ifdef CONFIG_TRACER_SNAPSHOT
8910 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8911 tr, cpu, &snapshot_fops);
8913 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8914 tr, cpu, &snapshot_raw_fops);
8918 #ifdef CONFIG_FTRACE_SELFTEST
8919 /* Let selftest have access to static functions in this file */
8920 #include "trace_selftest.c"
8924 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8927 struct trace_option_dentry *topt = filp->private_data;
8930 if (topt->flags->val & topt->opt->bit)
8935 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8939 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8942 struct trace_option_dentry *topt = filp->private_data;
8946 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8950 if (val != 0 && val != 1)
8953 if (!!(topt->flags->val & topt->opt->bit) != val) {
8954 mutex_lock(&trace_types_lock);
8955 ret = __set_tracer_option(topt->tr, topt->flags,
8957 mutex_unlock(&trace_types_lock);
8967 static int tracing_open_options(struct inode *inode, struct file *filp)
8969 struct trace_option_dentry *topt = inode->i_private;
8972 ret = tracing_check_open_get_tr(topt->tr);
8976 filp->private_data = inode->i_private;
8980 static int tracing_release_options(struct inode *inode, struct file *file)
8982 struct trace_option_dentry *topt = file->private_data;
8984 trace_array_put(topt->tr);
8988 static const struct file_operations trace_options_fops = {
8989 .open = tracing_open_options,
8990 .read = trace_options_read,
8991 .write = trace_options_write,
8992 .llseek = generic_file_llseek,
8993 .release = tracing_release_options,
8997 * In order to pass in both the trace_array descriptor as well as the index
8998 * to the flag that the trace option file represents, the trace_array
8999 * has a character array of trace_flags_index[], which holds the index
9000 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
9001 * The address of this character array is passed to the flag option file
9002 * read/write callbacks.
9004 * In order to extract both the index and the trace_array descriptor,
9005 * get_tr_index() uses the following algorithm.
9009 * As the pointer itself contains the address of the index (remember
9012 * Then to get the trace_array descriptor, by subtracting that index
9013 * from the ptr, we get to the start of the index itself.
9015 * ptr - idx == &index[0]
9017 * Then a simple container_of() from that pointer gets us to the
9018 * trace_array descriptor.
9020 static void get_tr_index(void *data, struct trace_array **ptr,
9021 unsigned int *pindex)
9023 *pindex = *(unsigned char *)data;
9025 *ptr = container_of(data - *pindex, struct trace_array,
9030 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
9033 void *tr_index = filp->private_data;
9034 struct trace_array *tr;
9038 get_tr_index(tr_index, &tr, &index);
9040 if (tr->trace_flags & (1 << index))
9045 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
9049 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
9052 void *tr_index = filp->private_data;
9053 struct trace_array *tr;
9058 get_tr_index(tr_index, &tr, &index);
9060 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9064 if (val != 0 && val != 1)
9067 mutex_lock(&event_mutex);
9068 mutex_lock(&trace_types_lock);
9069 ret = set_tracer_flag(tr, 1 << index, val);
9070 mutex_unlock(&trace_types_lock);
9071 mutex_unlock(&event_mutex);
9081 static const struct file_operations trace_options_core_fops = {
9082 .open = tracing_open_generic,
9083 .read = trace_options_core_read,
9084 .write = trace_options_core_write,
9085 .llseek = generic_file_llseek,
9088 struct dentry *trace_create_file(const char *name,
9090 struct dentry *parent,
9092 const struct file_operations *fops)
9096 ret = tracefs_create_file(name, mode, parent, data, fops);
9098 pr_warn("Could not create tracefs '%s' entry\n", name);
9104 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
9106 struct dentry *d_tracer;
9111 d_tracer = tracing_get_dentry(tr);
9112 if (IS_ERR(d_tracer))
9115 tr->options = tracefs_create_dir("options", d_tracer);
9117 pr_warn("Could not create tracefs directory 'options'\n");
9125 create_trace_option_file(struct trace_array *tr,
9126 struct trace_option_dentry *topt,
9127 struct tracer_flags *flags,
9128 struct tracer_opt *opt)
9130 struct dentry *t_options;
9132 t_options = trace_options_init_dentry(tr);
9136 topt->flags = flags;
9140 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
9141 t_options, topt, &trace_options_fops);
9146 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
9148 struct trace_option_dentry *topts;
9149 struct trace_options *tr_topts;
9150 struct tracer_flags *flags;
9151 struct tracer_opt *opts;
9158 flags = tracer->flags;
9160 if (!flags || !flags->opts)
9164 * If this is an instance, only create flags for tracers
9165 * the instance may have.
9167 if (!trace_ok_for_array(tracer, tr))
9170 for (i = 0; i < tr->nr_topts; i++) {
9171 /* Make sure there's no duplicate flags. */
9172 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
9178 for (cnt = 0; opts[cnt].name; cnt++)
9181 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
9185 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
9192 tr->topts = tr_topts;
9193 tr->topts[tr->nr_topts].tracer = tracer;
9194 tr->topts[tr->nr_topts].topts = topts;
9197 for (cnt = 0; opts[cnt].name; cnt++) {
9198 create_trace_option_file(tr, &topts[cnt], flags,
9200 MEM_FAIL(topts[cnt].entry == NULL,
9201 "Failed to create trace option: %s",
9206 static struct dentry *
9207 create_trace_option_core_file(struct trace_array *tr,
9208 const char *option, long index)
9210 struct dentry *t_options;
9212 t_options = trace_options_init_dentry(tr);
9216 return trace_create_file(option, TRACE_MODE_WRITE, t_options,
9217 (void *)&tr->trace_flags_index[index],
9218 &trace_options_core_fops);
9221 static void create_trace_options_dir(struct trace_array *tr)
9223 struct dentry *t_options;
9224 bool top_level = tr == &global_trace;
9227 t_options = trace_options_init_dentry(tr);
9231 for (i = 0; trace_options[i]; i++) {
9233 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
9234 create_trace_option_core_file(tr, trace_options[i], i);
9239 rb_simple_read(struct file *filp, char __user *ubuf,
9240 size_t cnt, loff_t *ppos)
9242 struct trace_array *tr = filp->private_data;
9246 r = tracer_tracing_is_on(tr);
9247 r = sprintf(buf, "%d\n", r);
9249 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9253 rb_simple_write(struct file *filp, const char __user *ubuf,
9254 size_t cnt, loff_t *ppos)
9256 struct trace_array *tr = filp->private_data;
9257 struct trace_buffer *buffer = tr->array_buffer.buffer;
9261 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9266 mutex_lock(&trace_types_lock);
9267 if (!!val == tracer_tracing_is_on(tr)) {
9268 val = 0; /* do nothing */
9270 tracer_tracing_on(tr);
9271 if (tr->current_trace->start)
9272 tr->current_trace->start(tr);
9274 tracer_tracing_off(tr);
9275 if (tr->current_trace->stop)
9276 tr->current_trace->stop(tr);
9277 /* Wake up any waiters */
9278 ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
9280 mutex_unlock(&trace_types_lock);
9288 static const struct file_operations rb_simple_fops = {
9289 .open = tracing_open_generic_tr,
9290 .read = rb_simple_read,
9291 .write = rb_simple_write,
9292 .release = tracing_release_generic_tr,
9293 .llseek = default_llseek,
9297 buffer_percent_read(struct file *filp, char __user *ubuf,
9298 size_t cnt, loff_t *ppos)
9300 struct trace_array *tr = filp->private_data;
9304 r = tr->buffer_percent;
9305 r = sprintf(buf, "%d\n", r);
9307 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9311 buffer_percent_write(struct file *filp, const char __user *ubuf,
9312 size_t cnt, loff_t *ppos)
9314 struct trace_array *tr = filp->private_data;
9318 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9325 tr->buffer_percent = val;
9332 static const struct file_operations buffer_percent_fops = {
9333 .open = tracing_open_generic_tr,
9334 .read = buffer_percent_read,
9335 .write = buffer_percent_write,
9336 .release = tracing_release_generic_tr,
9337 .llseek = default_llseek,
9340 static struct dentry *trace_instance_dir;
9343 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9346 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9348 enum ring_buffer_flags rb_flags;
9350 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9354 buf->buffer = ring_buffer_alloc(size, rb_flags);
9358 buf->data = alloc_percpu(struct trace_array_cpu);
9360 ring_buffer_free(buf->buffer);
9365 /* Allocate the first page for all buffers */
9366 set_buffer_entries(&tr->array_buffer,
9367 ring_buffer_size(tr->array_buffer.buffer, 0));
9372 static void free_trace_buffer(struct array_buffer *buf)
9375 ring_buffer_free(buf->buffer);
9377 free_percpu(buf->data);
9382 static int allocate_trace_buffers(struct trace_array *tr, int size)
9386 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9390 #ifdef CONFIG_TRACER_MAX_TRACE
9391 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9392 allocate_snapshot ? size : 1);
9393 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9394 free_trace_buffer(&tr->array_buffer);
9397 tr->allocated_snapshot = allocate_snapshot;
9399 allocate_snapshot = false;
9405 static void free_trace_buffers(struct trace_array *tr)
9410 free_trace_buffer(&tr->array_buffer);
9412 #ifdef CONFIG_TRACER_MAX_TRACE
9413 free_trace_buffer(&tr->max_buffer);
9417 static void init_trace_flags_index(struct trace_array *tr)
9421 /* Used by the trace options files */
9422 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9423 tr->trace_flags_index[i] = i;
9426 static void __update_tracer_options(struct trace_array *tr)
9430 for (t = trace_types; t; t = t->next)
9431 add_tracer_options(tr, t);
9434 static void update_tracer_options(struct trace_array *tr)
9436 mutex_lock(&trace_types_lock);
9437 tracer_options_updated = true;
9438 __update_tracer_options(tr);
9439 mutex_unlock(&trace_types_lock);
9442 /* Must have trace_types_lock held */
9443 struct trace_array *trace_array_find(const char *instance)
9445 struct trace_array *tr, *found = NULL;
9447 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9448 if (tr->name && strcmp(tr->name, instance) == 0) {
9457 struct trace_array *trace_array_find_get(const char *instance)
9459 struct trace_array *tr;
9461 mutex_lock(&trace_types_lock);
9462 tr = trace_array_find(instance);
9465 mutex_unlock(&trace_types_lock);
9470 static int trace_array_create_dir(struct trace_array *tr)
9474 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9478 ret = event_trace_add_tracer(tr->dir, tr);
9480 tracefs_remove(tr->dir);
9484 init_tracer_tracefs(tr, tr->dir);
9485 __update_tracer_options(tr);
9490 static struct trace_array *trace_array_create(const char *name)
9492 struct trace_array *tr;
9496 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9498 return ERR_PTR(ret);
9500 tr->name = kstrdup(name, GFP_KERNEL);
9504 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9507 if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
9510 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9512 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9514 raw_spin_lock_init(&tr->start_lock);
9516 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9518 tr->current_trace = &nop_trace;
9520 INIT_LIST_HEAD(&tr->systems);
9521 INIT_LIST_HEAD(&tr->events);
9522 INIT_LIST_HEAD(&tr->hist_vars);
9523 INIT_LIST_HEAD(&tr->err_log);
9525 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9528 if (ftrace_allocate_ftrace_ops(tr) < 0)
9531 ftrace_init_trace_array(tr);
9533 init_trace_flags_index(tr);
9535 if (trace_instance_dir) {
9536 ret = trace_array_create_dir(tr);
9540 __trace_early_add_events(tr);
9542 list_add(&tr->list, &ftrace_trace_arrays);
9549 ftrace_free_ftrace_ops(tr);
9550 free_trace_buffers(tr);
9551 free_cpumask_var(tr->pipe_cpumask);
9552 free_cpumask_var(tr->tracing_cpumask);
9556 return ERR_PTR(ret);
9559 static int instance_mkdir(const char *name)
9561 struct trace_array *tr;
9564 mutex_lock(&event_mutex);
9565 mutex_lock(&trace_types_lock);
9568 if (trace_array_find(name))
9571 tr = trace_array_create(name);
9573 ret = PTR_ERR_OR_ZERO(tr);
9576 mutex_unlock(&trace_types_lock);
9577 mutex_unlock(&event_mutex);
9582 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9583 * @name: The name of the trace array to be looked up/created.
9585 * Returns pointer to trace array with given name.
9586 * NULL, if it cannot be created.
9588 * NOTE: This function increments the reference counter associated with the
9589 * trace array returned. This makes sure it cannot be freed while in use.
9590 * Use trace_array_put() once the trace array is no longer needed.
9591 * If the trace_array is to be freed, trace_array_destroy() needs to
9592 * be called after the trace_array_put(), or simply let user space delete
9593 * it from the tracefs instances directory. But until the
9594 * trace_array_put() is called, user space can not delete it.
9597 struct trace_array *trace_array_get_by_name(const char *name)
9599 struct trace_array *tr;
9601 mutex_lock(&event_mutex);
9602 mutex_lock(&trace_types_lock);
9604 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9605 if (tr->name && strcmp(tr->name, name) == 0)
9609 tr = trace_array_create(name);
9617 mutex_unlock(&trace_types_lock);
9618 mutex_unlock(&event_mutex);
9621 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9623 static int __remove_instance(struct trace_array *tr)
9627 /* Reference counter for a newly created trace array = 1. */
9628 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9631 list_del(&tr->list);
9633 /* Disable all the flags that were enabled coming in */
9634 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9635 if ((1 << i) & ZEROED_TRACE_FLAGS)
9636 set_tracer_flag(tr, 1 << i, 0);
9639 tracing_set_nop(tr);
9640 clear_ftrace_function_probes(tr);
9641 event_trace_del_tracer(tr);
9642 ftrace_clear_pids(tr);
9643 ftrace_destroy_function_files(tr);
9644 tracefs_remove(tr->dir);
9645 free_percpu(tr->last_func_repeats);
9646 free_trace_buffers(tr);
9647 clear_tracing_err_log(tr);
9649 for (i = 0; i < tr->nr_topts; i++) {
9650 kfree(tr->topts[i].topts);
9654 free_cpumask_var(tr->pipe_cpumask);
9655 free_cpumask_var(tr->tracing_cpumask);
9662 int trace_array_destroy(struct trace_array *this_tr)
9664 struct trace_array *tr;
9670 mutex_lock(&event_mutex);
9671 mutex_lock(&trace_types_lock);
9675 /* Making sure trace array exists before destroying it. */
9676 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9677 if (tr == this_tr) {
9678 ret = __remove_instance(tr);
9683 mutex_unlock(&trace_types_lock);
9684 mutex_unlock(&event_mutex);
9688 EXPORT_SYMBOL_GPL(trace_array_destroy);
9690 static int instance_rmdir(const char *name)
9692 struct trace_array *tr;
9695 mutex_lock(&event_mutex);
9696 mutex_lock(&trace_types_lock);
9699 tr = trace_array_find(name);
9701 ret = __remove_instance(tr);
9703 mutex_unlock(&trace_types_lock);
9704 mutex_unlock(&event_mutex);
9709 static __init void create_trace_instances(struct dentry *d_tracer)
9711 struct trace_array *tr;
9713 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9716 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9719 mutex_lock(&event_mutex);
9720 mutex_lock(&trace_types_lock);
9722 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9725 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9726 "Failed to create instance directory\n"))
9730 mutex_unlock(&trace_types_lock);
9731 mutex_unlock(&event_mutex);
9735 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9737 struct trace_event_file *file;
9740 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9741 tr, &show_traces_fops);
9743 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9744 tr, &set_tracer_fops);
9746 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9747 tr, &tracing_cpumask_fops);
9749 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9750 tr, &tracing_iter_fops);
9752 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9755 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9756 tr, &tracing_pipe_fops);
9758 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9759 tr, &tracing_entries_fops);
9761 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9762 tr, &tracing_total_entries_fops);
9764 trace_create_file("free_buffer", 0200, d_tracer,
9765 tr, &tracing_free_buffer_fops);
9767 trace_create_file("trace_marker", 0220, d_tracer,
9768 tr, &tracing_mark_fops);
9770 file = __find_event_file(tr, "ftrace", "print");
9771 if (file && file->ef)
9772 eventfs_add_file("trigger", TRACE_MODE_WRITE, file->ef,
9773 file, &event_trigger_fops);
9774 tr->trace_marker_file = file;
9776 trace_create_file("trace_marker_raw", 0220, d_tracer,
9777 tr, &tracing_mark_raw_fops);
9779 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9782 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9783 tr, &rb_simple_fops);
9785 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9786 &trace_time_stamp_mode_fops);
9788 tr->buffer_percent = 50;
9790 trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
9791 tr, &buffer_percent_fops);
9793 create_trace_options_dir(tr);
9795 #ifdef CONFIG_TRACER_MAX_TRACE
9796 trace_create_maxlat_file(tr, d_tracer);
9799 if (ftrace_create_function_files(tr, d_tracer))
9800 MEM_FAIL(1, "Could not allocate function filter files");
9802 #ifdef CONFIG_TRACER_SNAPSHOT
9803 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9804 tr, &snapshot_fops);
9807 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9808 tr, &tracing_err_log_fops);
9810 for_each_tracing_cpu(cpu)
9811 tracing_init_tracefs_percpu(tr, cpu);
9813 ftrace_init_tracefs(tr, d_tracer);
9816 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9818 struct vfsmount *mnt;
9819 struct file_system_type *type;
9822 * To maintain backward compatibility for tools that mount
9823 * debugfs to get to the tracing facility, tracefs is automatically
9824 * mounted to the debugfs/tracing directory.
9826 type = get_fs_type("tracefs");
9829 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9830 put_filesystem(type);
9839 * tracing_init_dentry - initialize top level trace array
9841 * This is called when creating files or directories in the tracing
9842 * directory. It is called via fs_initcall() by any of the boot up code
9843 * and expects to return the dentry of the top level tracing directory.
9845 int tracing_init_dentry(void)
9847 struct trace_array *tr = &global_trace;
9849 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9850 pr_warn("Tracing disabled due to lockdown\n");
9854 /* The top level trace array uses NULL as parent */
9858 if (WARN_ON(!tracefs_initialized()))
9862 * As there may still be users that expect the tracing
9863 * files to exist in debugfs/tracing, we must automount
9864 * the tracefs file system there, so older tools still
9865 * work with the newer kernel.
9867 tr->dir = debugfs_create_automount("tracing", NULL,
9868 trace_automount, NULL);
9873 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9874 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9876 static struct workqueue_struct *eval_map_wq __initdata;
9877 static struct work_struct eval_map_work __initdata;
9878 static struct work_struct tracerfs_init_work __initdata;
9880 static void __init eval_map_work_func(struct work_struct *work)
9884 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9885 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9888 static int __init trace_eval_init(void)
9890 INIT_WORK(&eval_map_work, eval_map_work_func);
9892 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9894 pr_err("Unable to allocate eval_map_wq\n");
9896 eval_map_work_func(&eval_map_work);
9900 queue_work(eval_map_wq, &eval_map_work);
9904 subsys_initcall(trace_eval_init);
9906 static int __init trace_eval_sync(void)
9908 /* Make sure the eval map updates are finished */
9910 destroy_workqueue(eval_map_wq);
9914 late_initcall_sync(trace_eval_sync);
9917 #ifdef CONFIG_MODULES
9918 static void trace_module_add_evals(struct module *mod)
9920 if (!mod->num_trace_evals)
9924 * Modules with bad taint do not have events created, do
9925 * not bother with enums either.
9927 if (trace_module_has_bad_taint(mod))
9930 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9933 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9934 static void trace_module_remove_evals(struct module *mod)
9936 union trace_eval_map_item *map;
9937 union trace_eval_map_item **last = &trace_eval_maps;
9939 if (!mod->num_trace_evals)
9942 mutex_lock(&trace_eval_mutex);
9944 map = trace_eval_maps;
9947 if (map->head.mod == mod)
9949 map = trace_eval_jmp_to_tail(map);
9950 last = &map->tail.next;
9951 map = map->tail.next;
9956 *last = trace_eval_jmp_to_tail(map)->tail.next;
9959 mutex_unlock(&trace_eval_mutex);
9962 static inline void trace_module_remove_evals(struct module *mod) { }
9963 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9965 static int trace_module_notify(struct notifier_block *self,
9966 unsigned long val, void *data)
9968 struct module *mod = data;
9971 case MODULE_STATE_COMING:
9972 trace_module_add_evals(mod);
9974 case MODULE_STATE_GOING:
9975 trace_module_remove_evals(mod);
9982 static struct notifier_block trace_module_nb = {
9983 .notifier_call = trace_module_notify,
9986 #endif /* CONFIG_MODULES */
9988 static __init void tracer_init_tracefs_work_func(struct work_struct *work)
9993 init_tracer_tracefs(&global_trace, NULL);
9994 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9996 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
9997 &global_trace, &tracing_thresh_fops);
9999 trace_create_file("README", TRACE_MODE_READ, NULL,
10000 NULL, &tracing_readme_fops);
10002 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
10003 NULL, &tracing_saved_cmdlines_fops);
10005 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
10006 NULL, &tracing_saved_cmdlines_size_fops);
10008 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
10009 NULL, &tracing_saved_tgids_fops);
10011 trace_create_eval_file(NULL);
10013 #ifdef CONFIG_MODULES
10014 register_module_notifier(&trace_module_nb);
10017 #ifdef CONFIG_DYNAMIC_FTRACE
10018 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
10019 NULL, &tracing_dyn_info_fops);
10022 create_trace_instances(NULL);
10024 update_tracer_options(&global_trace);
10027 static __init int tracer_init_tracefs(void)
10031 trace_access_lock_init();
10033 ret = tracing_init_dentry();
10038 INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
10039 queue_work(eval_map_wq, &tracerfs_init_work);
10041 tracer_init_tracefs_work_func(NULL);
10044 rv_init_interface();
10049 fs_initcall(tracer_init_tracefs);
10051 static int trace_die_panic_handler(struct notifier_block *self,
10052 unsigned long ev, void *unused);
10054 static struct notifier_block trace_panic_notifier = {
10055 .notifier_call = trace_die_panic_handler,
10056 .priority = INT_MAX - 1,
10059 static struct notifier_block trace_die_notifier = {
10060 .notifier_call = trace_die_panic_handler,
10061 .priority = INT_MAX - 1,
10065 * The idea is to execute the following die/panic callback early, in order
10066 * to avoid showing irrelevant information in the trace (like other panic
10067 * notifier functions); we are the 2nd to run, after hung_task/rcu_stall
10068 * warnings get disabled (to prevent potential log flooding).
10070 static int trace_die_panic_handler(struct notifier_block *self,
10071 unsigned long ev, void *unused)
10073 if (!ftrace_dump_on_oops)
10074 return NOTIFY_DONE;
10076 /* The die notifier requires DIE_OOPS to trigger */
10077 if (self == &trace_die_notifier && ev != DIE_OOPS)
10078 return NOTIFY_DONE;
10080 ftrace_dump(ftrace_dump_on_oops);
10082 return NOTIFY_DONE;
10086 * printk is set to max of 1024, we really don't need it that big.
10087 * Nothing should be printing 1000 characters anyway.
10089 #define TRACE_MAX_PRINT 1000
10092 * Define here KERN_TRACE so that we have one place to modify
10093 * it if we decide to change what log level the ftrace dump
10096 #define KERN_TRACE KERN_EMERG
10099 trace_printk_seq(struct trace_seq *s)
10101 /* Probably should print a warning here. */
10102 if (s->seq.len >= TRACE_MAX_PRINT)
10103 s->seq.len = TRACE_MAX_PRINT;
10106 * More paranoid code. Although the buffer size is set to
10107 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
10108 * an extra layer of protection.
10110 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
10111 s->seq.len = s->seq.size - 1;
10113 /* should be zero ended, but we are paranoid. */
10114 s->buffer[s->seq.len] = 0;
10116 printk(KERN_TRACE "%s", s->buffer);
10121 void trace_init_global_iter(struct trace_iterator *iter)
10123 iter->tr = &global_trace;
10124 iter->trace = iter->tr->current_trace;
10125 iter->cpu_file = RING_BUFFER_ALL_CPUS;
10126 iter->array_buffer = &global_trace.array_buffer;
10128 if (iter->trace && iter->trace->open)
10129 iter->trace->open(iter);
10131 /* Annotate start of buffers if we had overruns */
10132 if (ring_buffer_overruns(iter->array_buffer->buffer))
10133 iter->iter_flags |= TRACE_FILE_ANNOTATE;
10135 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
10136 if (trace_clocks[iter->tr->clock_id].in_ns)
10137 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
10139 /* Can not use kmalloc for iter.temp and iter.fmt */
10140 iter->temp = static_temp_buf;
10141 iter->temp_size = STATIC_TEMP_BUF_SIZE;
10142 iter->fmt = static_fmt_buf;
10143 iter->fmt_size = STATIC_FMT_BUF_SIZE;
10146 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
10148 /* use static because iter can be a bit big for the stack */
10149 static struct trace_iterator iter;
10150 static atomic_t dump_running;
10151 struct trace_array *tr = &global_trace;
10152 unsigned int old_userobj;
10153 unsigned long flags;
10156 /* Only allow one dump user at a time. */
10157 if (atomic_inc_return(&dump_running) != 1) {
10158 atomic_dec(&dump_running);
10163 * Always turn off tracing when we dump.
10164 * We don't need to show trace output of what happens
10165 * between multiple crashes.
10167 * If the user does a sysrq-z, then they can re-enable
10168 * tracing with echo 1 > tracing_on.
10172 local_irq_save(flags);
10174 /* Simulate the iterator */
10175 trace_init_global_iter(&iter);
10177 for_each_tracing_cpu(cpu) {
10178 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10181 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10183 /* don't look at user memory in panic mode */
10184 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10186 switch (oops_dump_mode) {
10188 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10191 iter.cpu_file = raw_smp_processor_id();
10196 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
10197 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10200 printk(KERN_TRACE "Dumping ftrace buffer:\n");
10202 /* Did function tracer already get disabled? */
10203 if (ftrace_is_dead()) {
10204 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
10205 printk("# MAY BE MISSING FUNCTION EVENTS\n");
10209 * We need to stop all tracing on all CPUS to read
10210 * the next buffer. This is a bit expensive, but is
10211 * not done often. We fill all what we can read,
10212 * and then release the locks again.
10215 while (!trace_empty(&iter)) {
10218 printk(KERN_TRACE "---------------------------------\n");
10222 trace_iterator_reset(&iter);
10223 iter.iter_flags |= TRACE_FILE_LAT_FMT;
10225 if (trace_find_next_entry_inc(&iter) != NULL) {
10228 ret = print_trace_line(&iter);
10229 if (ret != TRACE_TYPE_NO_CONSUME)
10230 trace_consume(&iter);
10232 touch_nmi_watchdog();
10234 trace_printk_seq(&iter.seq);
10238 printk(KERN_TRACE " (ftrace buffer empty)\n");
10240 printk(KERN_TRACE "---------------------------------\n");
10243 tr->trace_flags |= old_userobj;
10245 for_each_tracing_cpu(cpu) {
10246 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10248 atomic_dec(&dump_running);
10249 local_irq_restore(flags);
10251 EXPORT_SYMBOL_GPL(ftrace_dump);
10253 #define WRITE_BUFSIZE 4096
10255 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
10256 size_t count, loff_t *ppos,
10257 int (*createfn)(const char *))
10259 char *kbuf, *buf, *tmp;
10264 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
10268 while (done < count) {
10269 size = count - done;
10271 if (size >= WRITE_BUFSIZE)
10272 size = WRITE_BUFSIZE - 1;
10274 if (copy_from_user(kbuf, buffer + done, size)) {
10281 tmp = strchr(buf, '\n');
10284 size = tmp - buf + 1;
10286 size = strlen(buf);
10287 if (done + size < count) {
10290 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10291 pr_warn("Line length is too long: Should be less than %d\n",
10292 WRITE_BUFSIZE - 2);
10299 /* Remove comments */
10300 tmp = strchr(buf, '#');
10305 ret = createfn(buf);
10310 } while (done < count);
10320 #ifdef CONFIG_TRACER_MAX_TRACE
10321 __init static bool tr_needs_alloc_snapshot(const char *name)
10324 int len = strlen(name);
10327 if (!boot_snapshot_index)
10330 if (strncmp(name, boot_snapshot_info, len) == 0 &&
10331 boot_snapshot_info[len] == '\t')
10334 test = kmalloc(strlen(name) + 3, GFP_KERNEL);
10338 sprintf(test, "\t%s\t", name);
10339 ret = strstr(boot_snapshot_info, test) == NULL;
10344 __init static void do_allocate_snapshot(const char *name)
10346 if (!tr_needs_alloc_snapshot(name))
10350 * When allocate_snapshot is set, the next call to
10351 * allocate_trace_buffers() (called by trace_array_get_by_name())
10352 * will allocate the snapshot buffer. That will alse clear
10355 allocate_snapshot = true;
10358 static inline void do_allocate_snapshot(const char *name) { }
10361 __init static void enable_instances(void)
10363 struct trace_array *tr;
10368 /* A tab is always appended */
10369 boot_instance_info[boot_instance_index - 1] = '\0';
10370 str = boot_instance_info;
10372 while ((curr_str = strsep(&str, "\t"))) {
10374 tok = strsep(&curr_str, ",");
10376 if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
10377 do_allocate_snapshot(tok);
10379 tr = trace_array_get_by_name(tok);
10381 pr_warn("Failed to create instance buffer %s\n", curr_str);
10384 /* Allow user space to delete it */
10385 trace_array_put(tr);
10387 while ((tok = strsep(&curr_str, ","))) {
10388 early_enable_events(tr, tok, true);
10393 __init static int tracer_alloc_buffers(void)
10399 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10400 pr_warn("Tracing disabled due to lockdown\n");
10405 * Make sure we don't accidentally add more trace options
10406 * than we have bits for.
10408 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10410 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10413 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10414 goto out_free_buffer_mask;
10416 /* Only allocate trace_printk buffers if a trace_printk exists */
10417 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10418 /* Must be called before global_trace.buffer is allocated */
10419 trace_printk_init_buffers();
10421 /* To save memory, keep the ring buffer size to its minimum */
10422 if (ring_buffer_expanded)
10423 ring_buf_size = trace_buf_size;
10427 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10428 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10430 raw_spin_lock_init(&global_trace.start_lock);
10433 * The prepare callbacks allocates some memory for the ring buffer. We
10434 * don't free the buffer if the CPU goes down. If we were to free
10435 * the buffer, then the user would lose any trace that was in the
10436 * buffer. The memory will be removed once the "instance" is removed.
10438 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10439 "trace/RB:prepare", trace_rb_cpu_prepare,
10442 goto out_free_cpumask;
10443 /* Used for event triggers */
10445 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10447 goto out_rm_hp_state;
10449 if (trace_create_savedcmd() < 0)
10450 goto out_free_temp_buffer;
10452 if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
10453 goto out_free_savedcmd;
10455 /* TODO: make the number of buffers hot pluggable with CPUS */
10456 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10457 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10458 goto out_free_pipe_cpumask;
10460 if (global_trace.buffer_disabled)
10463 if (trace_boot_clock) {
10464 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10466 pr_warn("Trace clock %s not defined, going back to default\n",
10471 * register_tracer() might reference current_trace, so it
10472 * needs to be set before we register anything. This is
10473 * just a bootstrap of current_trace anyway.
10475 global_trace.current_trace = &nop_trace;
10477 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10479 ftrace_init_global_array_ops(&global_trace);
10481 init_trace_flags_index(&global_trace);
10483 register_tracer(&nop_trace);
10485 /* Function tracing may start here (via kernel command line) */
10486 init_function_trace();
10488 /* All seems OK, enable tracing */
10489 tracing_disabled = 0;
10491 atomic_notifier_chain_register(&panic_notifier_list,
10492 &trace_panic_notifier);
10494 register_die_notifier(&trace_die_notifier);
10496 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10498 INIT_LIST_HEAD(&global_trace.systems);
10499 INIT_LIST_HEAD(&global_trace.events);
10500 INIT_LIST_HEAD(&global_trace.hist_vars);
10501 INIT_LIST_HEAD(&global_trace.err_log);
10502 list_add(&global_trace.list, &ftrace_trace_arrays);
10504 apply_trace_boot_options();
10506 register_snapshot_cmd();
10512 out_free_pipe_cpumask:
10513 free_cpumask_var(global_trace.pipe_cpumask);
10515 free_saved_cmdlines_buffer(savedcmd);
10516 out_free_temp_buffer:
10517 ring_buffer_free(temp_buffer);
10519 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10521 free_cpumask_var(global_trace.tracing_cpumask);
10522 out_free_buffer_mask:
10523 free_cpumask_var(tracing_buffer_mask);
10528 void __init ftrace_boot_snapshot(void)
10530 #ifdef CONFIG_TRACER_MAX_TRACE
10531 struct trace_array *tr;
10533 if (!snapshot_at_boot)
10536 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
10537 if (!tr->allocated_snapshot)
10540 tracing_snapshot_instance(tr);
10541 trace_array_puts(tr, "** Boot snapshot taken **\n");
10546 void __init early_trace_init(void)
10548 if (tracepoint_printk) {
10549 tracepoint_print_iter =
10550 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10551 if (MEM_FAIL(!tracepoint_print_iter,
10552 "Failed to allocate trace iterator\n"))
10553 tracepoint_printk = 0;
10555 static_key_enable(&tracepoint_printk_key.key);
10557 tracer_alloc_buffers();
10562 void __init trace_init(void)
10564 trace_event_init();
10566 if (boot_instance_index)
10567 enable_instances();
10570 __init static void clear_boot_tracer(void)
10573 * The default tracer at boot buffer is an init section.
10574 * This function is called in lateinit. If we did not
10575 * find the boot tracer, then clear it out, to prevent
10576 * later registration from accessing the buffer that is
10577 * about to be freed.
10579 if (!default_bootup_tracer)
10582 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10583 default_bootup_tracer);
10584 default_bootup_tracer = NULL;
10587 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10588 __init static void tracing_set_default_clock(void)
10590 /* sched_clock_stable() is determined in late_initcall */
10591 if (!trace_boot_clock && !sched_clock_stable()) {
10592 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10593 pr_warn("Can not set tracing clock due to lockdown\n");
10597 printk(KERN_WARNING
10598 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10599 "If you want to keep using the local clock, then add:\n"
10600 " \"trace_clock=local\"\n"
10601 "on the kernel command line\n");
10602 tracing_set_clock(&global_trace, "global");
10606 static inline void tracing_set_default_clock(void) { }
10609 __init static int late_trace_init(void)
10611 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10612 static_key_disable(&tracepoint_printk_key.key);
10613 tracepoint_printk = 0;
10616 tracing_set_default_clock();
10617 clear_boot_tracer();
10621 late_initcall_sync(late_trace_init);