1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/panic_notifier.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
52 #include <asm/setup.h> /* COMMAND_LINE_SIZE */
55 #include "trace_output.h"
58 * On boot up, the ring buffer is set to the minimum size, so that
59 * we do not waste memory on systems that are not using tracing.
61 bool ring_buffer_expanded;
64 * We need to change this state when a selftest is running.
65 * A selftest will lurk into the ring-buffer to count the
66 * entries inserted during the selftest although some concurrent
67 * insertions into the ring-buffer such as trace_printk could occurred
68 * at the same time, giving false positive or negative results.
70 static bool __read_mostly tracing_selftest_running;
73 * If boot-time tracing including tracers/events via kernel cmdline
74 * is running, we do not want to run SELFTEST.
76 bool __read_mostly tracing_selftest_disabled;
78 #ifdef CONFIG_FTRACE_STARTUP_TEST
79 void __init disable_tracing_selftest(const char *reason)
81 if (!tracing_selftest_disabled) {
82 tracing_selftest_disabled = true;
83 pr_info("Ftrace startup test is disabled due to %s\n", reason);
88 /* Pipe tracepoints to printk */
89 static struct trace_iterator *tracepoint_print_iter;
90 int tracepoint_printk;
91 static bool tracepoint_printk_stop_on_boot __initdata;
92 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
94 /* For tracers that don't implement custom flags */
95 static struct tracer_opt dummy_tracer_opt[] = {
100 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
106 * To prevent the comm cache from being overwritten when no
107 * tracing is active, only save the comm when a trace event
110 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
113 * Kill all tracing for good (never come back).
114 * It is initialized to 1 but will turn to zero if the initialization
115 * of the tracer is successful. But that is the only place that sets
118 static int tracing_disabled = 1;
120 cpumask_var_t __read_mostly tracing_buffer_mask;
123 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
125 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
126 * is set, then ftrace_dump is called. This will output the contents
127 * of the ftrace buffers to the console. This is very useful for
128 * capturing traces that lead to crashes and outputing it to a
131 * It is default off, but you can enable it with either specifying
132 * "ftrace_dump_on_oops" in the kernel command line, or setting
133 * /proc/sys/kernel/ftrace_dump_on_oops
134 * Set 1 if you want to dump buffers of all CPUs
135 * Set 2 if you want to dump the buffer of the CPU that triggered oops
138 enum ftrace_dump_mode ftrace_dump_on_oops;
140 /* When set, tracing will stop when a WARN*() is hit */
141 int __disable_trace_on_warning;
143 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
144 /* Map of enums to their values, for "eval_map" file */
145 struct trace_eval_map_head {
147 unsigned long length;
150 union trace_eval_map_item;
152 struct trace_eval_map_tail {
154 * "end" is first and points to NULL as it must be different
155 * than "mod" or "eval_string"
157 union trace_eval_map_item *next;
158 const char *end; /* points to NULL */
161 static DEFINE_MUTEX(trace_eval_mutex);
164 * The trace_eval_maps are saved in an array with two extra elements,
165 * one at the beginning, and one at the end. The beginning item contains
166 * the count of the saved maps (head.length), and the module they
167 * belong to if not built in (head.mod). The ending item contains a
168 * pointer to the next array of saved eval_map items.
170 union trace_eval_map_item {
171 struct trace_eval_map map;
172 struct trace_eval_map_head head;
173 struct trace_eval_map_tail tail;
176 static union trace_eval_map_item *trace_eval_maps;
177 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
179 int tracing_set_tracer(struct trace_array *tr, const char *buf);
180 static void ftrace_trace_userstack(struct trace_array *tr,
181 struct trace_buffer *buffer,
182 unsigned int trace_ctx);
184 #define MAX_TRACER_SIZE 100
185 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
186 static char *default_bootup_tracer;
188 static bool allocate_snapshot;
189 static bool snapshot_at_boot;
191 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata;
192 static int boot_instance_index;
194 static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
195 static int boot_snapshot_index;
197 static int __init set_cmdline_ftrace(char *str)
199 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
200 default_bootup_tracer = bootup_tracer_buf;
201 /* We are using ftrace early, expand it */
202 ring_buffer_expanded = true;
205 __setup("ftrace=", set_cmdline_ftrace);
207 static int __init set_ftrace_dump_on_oops(char *str)
209 if (*str++ != '=' || !*str || !strcmp("1", str)) {
210 ftrace_dump_on_oops = DUMP_ALL;
214 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
215 ftrace_dump_on_oops = DUMP_ORIG;
221 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
223 static int __init stop_trace_on_warning(char *str)
225 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
226 __disable_trace_on_warning = 1;
229 __setup("traceoff_on_warning", stop_trace_on_warning);
231 static int __init boot_alloc_snapshot(char *str)
233 char *slot = boot_snapshot_info + boot_snapshot_index;
234 int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
239 if (strlen(str) >= left)
242 ret = snprintf(slot, left, "%s\t", str);
243 boot_snapshot_index += ret;
245 allocate_snapshot = true;
246 /* We also need the main ring buffer expanded */
247 ring_buffer_expanded = true;
251 __setup("alloc_snapshot", boot_alloc_snapshot);
254 static int __init boot_snapshot(char *str)
256 snapshot_at_boot = true;
257 boot_alloc_snapshot(str);
260 __setup("ftrace_boot_snapshot", boot_snapshot);
263 static int __init boot_instance(char *str)
265 char *slot = boot_instance_info + boot_instance_index;
266 int left = sizeof(boot_instance_info) - boot_instance_index;
269 if (strlen(str) >= left)
272 ret = snprintf(slot, left, "%s\t", str);
273 boot_instance_index += ret;
277 __setup("trace_instance=", boot_instance);
280 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
282 static int __init set_trace_boot_options(char *str)
284 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
287 __setup("trace_options=", set_trace_boot_options);
289 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
290 static char *trace_boot_clock __initdata;
292 static int __init set_trace_boot_clock(char *str)
294 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
295 trace_boot_clock = trace_boot_clock_buf;
298 __setup("trace_clock=", set_trace_boot_clock);
300 static int __init set_tracepoint_printk(char *str)
302 /* Ignore the "tp_printk_stop_on_boot" param */
306 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
307 tracepoint_printk = 1;
310 __setup("tp_printk", set_tracepoint_printk);
312 static int __init set_tracepoint_printk_stop(char *str)
314 tracepoint_printk_stop_on_boot = true;
317 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
319 unsigned long long ns2usecs(u64 nsec)
327 trace_process_export(struct trace_export *export,
328 struct ring_buffer_event *event, int flag)
330 struct trace_entry *entry;
331 unsigned int size = 0;
333 if (export->flags & flag) {
334 entry = ring_buffer_event_data(event);
335 size = ring_buffer_event_length(event);
336 export->write(export, entry, size);
340 static DEFINE_MUTEX(ftrace_export_lock);
342 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
344 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
345 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
346 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
348 static inline void ftrace_exports_enable(struct trace_export *export)
350 if (export->flags & TRACE_EXPORT_FUNCTION)
351 static_branch_inc(&trace_function_exports_enabled);
353 if (export->flags & TRACE_EXPORT_EVENT)
354 static_branch_inc(&trace_event_exports_enabled);
356 if (export->flags & TRACE_EXPORT_MARKER)
357 static_branch_inc(&trace_marker_exports_enabled);
360 static inline void ftrace_exports_disable(struct trace_export *export)
362 if (export->flags & TRACE_EXPORT_FUNCTION)
363 static_branch_dec(&trace_function_exports_enabled);
365 if (export->flags & TRACE_EXPORT_EVENT)
366 static_branch_dec(&trace_event_exports_enabled);
368 if (export->flags & TRACE_EXPORT_MARKER)
369 static_branch_dec(&trace_marker_exports_enabled);
372 static void ftrace_exports(struct ring_buffer_event *event, int flag)
374 struct trace_export *export;
376 preempt_disable_notrace();
378 export = rcu_dereference_raw_check(ftrace_exports_list);
380 trace_process_export(export, event, flag);
381 export = rcu_dereference_raw_check(export->next);
384 preempt_enable_notrace();
388 add_trace_export(struct trace_export **list, struct trace_export *export)
390 rcu_assign_pointer(export->next, *list);
392 * We are entering export into the list but another
393 * CPU might be walking that list. We need to make sure
394 * the export->next pointer is valid before another CPU sees
395 * the export pointer included into the list.
397 rcu_assign_pointer(*list, export);
401 rm_trace_export(struct trace_export **list, struct trace_export *export)
403 struct trace_export **p;
405 for (p = list; *p != NULL; p = &(*p)->next)
412 rcu_assign_pointer(*p, (*p)->next);
418 add_ftrace_export(struct trace_export **list, struct trace_export *export)
420 ftrace_exports_enable(export);
422 add_trace_export(list, export);
426 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
430 ret = rm_trace_export(list, export);
431 ftrace_exports_disable(export);
436 int register_ftrace_export(struct trace_export *export)
438 if (WARN_ON_ONCE(!export->write))
441 mutex_lock(&ftrace_export_lock);
443 add_ftrace_export(&ftrace_exports_list, export);
445 mutex_unlock(&ftrace_export_lock);
449 EXPORT_SYMBOL_GPL(register_ftrace_export);
451 int unregister_ftrace_export(struct trace_export *export)
455 mutex_lock(&ftrace_export_lock);
457 ret = rm_ftrace_export(&ftrace_exports_list, export);
459 mutex_unlock(&ftrace_export_lock);
463 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
465 /* trace_flags holds trace_options default values */
466 #define TRACE_DEFAULT_FLAGS \
467 (FUNCTION_DEFAULT_FLAGS | \
468 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
469 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
470 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
471 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
474 /* trace_options that are only supported by global_trace */
475 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
476 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
478 /* trace_flags that are default zero for instances */
479 #define ZEROED_TRACE_FLAGS \
480 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
483 * The global_trace is the descriptor that holds the top-level tracing
484 * buffers for the live tracing.
486 static struct trace_array global_trace = {
487 .trace_flags = TRACE_DEFAULT_FLAGS,
490 LIST_HEAD(ftrace_trace_arrays);
492 int trace_array_get(struct trace_array *this_tr)
494 struct trace_array *tr;
497 mutex_lock(&trace_types_lock);
498 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
505 mutex_unlock(&trace_types_lock);
510 static void __trace_array_put(struct trace_array *this_tr)
512 WARN_ON(!this_tr->ref);
517 * trace_array_put - Decrement the reference counter for this trace array.
518 * @this_tr : pointer to the trace array
520 * NOTE: Use this when we no longer need the trace array returned by
521 * trace_array_get_by_name(). This ensures the trace array can be later
525 void trace_array_put(struct trace_array *this_tr)
530 mutex_lock(&trace_types_lock);
531 __trace_array_put(this_tr);
532 mutex_unlock(&trace_types_lock);
534 EXPORT_SYMBOL_GPL(trace_array_put);
536 int tracing_check_open_get_tr(struct trace_array *tr)
540 ret = security_locked_down(LOCKDOWN_TRACEFS);
544 if (tracing_disabled)
547 if (tr && trace_array_get(tr) < 0)
553 int call_filter_check_discard(struct trace_event_call *call, void *rec,
554 struct trace_buffer *buffer,
555 struct ring_buffer_event *event)
557 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
558 !filter_match_preds(call->filter, rec)) {
559 __trace_event_discard_commit(buffer, event);
567 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
568 * @filtered_pids: The list of pids to check
569 * @search_pid: The PID to find in @filtered_pids
571 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
574 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
576 return trace_pid_list_is_set(filtered_pids, search_pid);
580 * trace_ignore_this_task - should a task be ignored for tracing
581 * @filtered_pids: The list of pids to check
582 * @filtered_no_pids: The list of pids not to be traced
583 * @task: The task that should be ignored if not filtered
585 * Checks if @task should be traced or not from @filtered_pids.
586 * Returns true if @task should *NOT* be traced.
587 * Returns false if @task should be traced.
590 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
591 struct trace_pid_list *filtered_no_pids,
592 struct task_struct *task)
595 * If filtered_no_pids is not empty, and the task's pid is listed
596 * in filtered_no_pids, then return true.
597 * Otherwise, if filtered_pids is empty, that means we can
598 * trace all tasks. If it has content, then only trace pids
599 * within filtered_pids.
602 return (filtered_pids &&
603 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
605 trace_find_filtered_pid(filtered_no_pids, task->pid));
609 * trace_filter_add_remove_task - Add or remove a task from a pid_list
610 * @pid_list: The list to modify
611 * @self: The current task for fork or NULL for exit
612 * @task: The task to add or remove
614 * If adding a task, if @self is defined, the task is only added if @self
615 * is also included in @pid_list. This happens on fork and tasks should
616 * only be added when the parent is listed. If @self is NULL, then the
617 * @task pid will be removed from the list, which would happen on exit
620 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
621 struct task_struct *self,
622 struct task_struct *task)
627 /* For forks, we only add if the forking task is listed */
629 if (!trace_find_filtered_pid(pid_list, self->pid))
633 /* "self" is set for forks, and NULL for exits */
635 trace_pid_list_set(pid_list, task->pid);
637 trace_pid_list_clear(pid_list, task->pid);
641 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
642 * @pid_list: The pid list to show
643 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
644 * @pos: The position of the file
646 * This is used by the seq_file "next" operation to iterate the pids
647 * listed in a trace_pid_list structure.
649 * Returns the pid+1 as we want to display pid of zero, but NULL would
650 * stop the iteration.
652 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
654 long pid = (unsigned long)v;
659 /* pid already is +1 of the actual previous bit */
660 if (trace_pid_list_next(pid_list, pid, &next) < 0)
665 /* Return pid + 1 to allow zero to be represented */
666 return (void *)(pid + 1);
670 * trace_pid_start - Used for seq_file to start reading pid lists
671 * @pid_list: The pid list to show
672 * @pos: The position of the file
674 * This is used by seq_file "start" operation to start the iteration
677 * Returns the pid+1 as we want to display pid of zero, but NULL would
678 * stop the iteration.
680 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
686 if (trace_pid_list_first(pid_list, &first) < 0)
691 /* Return pid + 1 so that zero can be the exit value */
692 for (pid++; pid && l < *pos;
693 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
699 * trace_pid_show - show the current pid in seq_file processing
700 * @m: The seq_file structure to write into
701 * @v: A void pointer of the pid (+1) value to display
703 * Can be directly used by seq_file operations to display the current
706 int trace_pid_show(struct seq_file *m, void *v)
708 unsigned long pid = (unsigned long)v - 1;
710 seq_printf(m, "%lu\n", pid);
714 /* 128 should be much more than enough */
715 #define PID_BUF_SIZE 127
717 int trace_pid_write(struct trace_pid_list *filtered_pids,
718 struct trace_pid_list **new_pid_list,
719 const char __user *ubuf, size_t cnt)
721 struct trace_pid_list *pid_list;
722 struct trace_parser parser;
730 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
734 * Always recreate a new array. The write is an all or nothing
735 * operation. Always create a new array when adding new pids by
736 * the user. If the operation fails, then the current list is
739 pid_list = trace_pid_list_alloc();
741 trace_parser_put(&parser);
746 /* copy the current bits to the new max */
747 ret = trace_pid_list_first(filtered_pids, &pid);
749 trace_pid_list_set(pid_list, pid);
750 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
760 ret = trace_get_user(&parser, ubuf, cnt, &pos);
768 if (!trace_parser_loaded(&parser))
772 if (kstrtoul(parser.buffer, 0, &val))
777 if (trace_pid_list_set(pid_list, pid) < 0) {
783 trace_parser_clear(&parser);
786 trace_parser_put(&parser);
789 trace_pid_list_free(pid_list);
794 /* Cleared the list of pids */
795 trace_pid_list_free(pid_list);
799 *new_pid_list = pid_list;
804 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
808 /* Early boot up does not have a buffer yet */
810 return trace_clock_local();
812 ts = ring_buffer_time_stamp(buf->buffer);
813 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
818 u64 ftrace_now(int cpu)
820 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
824 * tracing_is_enabled - Show if global_trace has been enabled
826 * Shows if the global trace has been enabled or not. It uses the
827 * mirror flag "buffer_disabled" to be used in fast paths such as for
828 * the irqsoff tracer. But it may be inaccurate due to races. If you
829 * need to know the accurate state, use tracing_is_on() which is a little
830 * slower, but accurate.
832 int tracing_is_enabled(void)
835 * For quick access (irqsoff uses this in fast path), just
836 * return the mirror variable of the state of the ring buffer.
837 * It's a little racy, but we don't really care.
840 return !global_trace.buffer_disabled;
844 * trace_buf_size is the size in bytes that is allocated
845 * for a buffer. Note, the number of bytes is always rounded
848 * This number is purposely set to a low number of 16384.
849 * If the dump on oops happens, it will be much appreciated
850 * to not have to wait for all that output. Anyway this can be
851 * boot time and run time configurable.
853 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
855 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
857 /* trace_types holds a link list of available tracers. */
858 static struct tracer *trace_types __read_mostly;
861 * trace_types_lock is used to protect the trace_types list.
863 DEFINE_MUTEX(trace_types_lock);
866 * serialize the access of the ring buffer
868 * ring buffer serializes readers, but it is low level protection.
869 * The validity of the events (which returns by ring_buffer_peek() ..etc)
870 * are not protected by ring buffer.
872 * The content of events may become garbage if we allow other process consumes
873 * these events concurrently:
874 * A) the page of the consumed events may become a normal page
875 * (not reader page) in ring buffer, and this page will be rewritten
876 * by events producer.
877 * B) The page of the consumed events may become a page for splice_read,
878 * and this page will be returned to system.
880 * These primitives allow multi process access to different cpu ring buffer
883 * These primitives don't distinguish read-only and read-consume access.
884 * Multi read-only access are also serialized.
888 static DECLARE_RWSEM(all_cpu_access_lock);
889 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
891 static inline void trace_access_lock(int cpu)
893 if (cpu == RING_BUFFER_ALL_CPUS) {
894 /* gain it for accessing the whole ring buffer. */
895 down_write(&all_cpu_access_lock);
897 /* gain it for accessing a cpu ring buffer. */
899 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
900 down_read(&all_cpu_access_lock);
902 /* Secondly block other access to this @cpu ring buffer. */
903 mutex_lock(&per_cpu(cpu_access_lock, cpu));
907 static inline void trace_access_unlock(int cpu)
909 if (cpu == RING_BUFFER_ALL_CPUS) {
910 up_write(&all_cpu_access_lock);
912 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
913 up_read(&all_cpu_access_lock);
917 static inline void trace_access_lock_init(void)
921 for_each_possible_cpu(cpu)
922 mutex_init(&per_cpu(cpu_access_lock, cpu));
927 static DEFINE_MUTEX(access_lock);
929 static inline void trace_access_lock(int cpu)
932 mutex_lock(&access_lock);
935 static inline void trace_access_unlock(int cpu)
938 mutex_unlock(&access_lock);
941 static inline void trace_access_lock_init(void)
947 #ifdef CONFIG_STACKTRACE
948 static void __ftrace_trace_stack(struct trace_buffer *buffer,
949 unsigned int trace_ctx,
950 int skip, struct pt_regs *regs);
951 static inline void ftrace_trace_stack(struct trace_array *tr,
952 struct trace_buffer *buffer,
953 unsigned int trace_ctx,
954 int skip, struct pt_regs *regs);
957 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
958 unsigned int trace_ctx,
959 int skip, struct pt_regs *regs)
962 static inline void ftrace_trace_stack(struct trace_array *tr,
963 struct trace_buffer *buffer,
964 unsigned long trace_ctx,
965 int skip, struct pt_regs *regs)
971 static __always_inline void
972 trace_event_setup(struct ring_buffer_event *event,
973 int type, unsigned int trace_ctx)
975 struct trace_entry *ent = ring_buffer_event_data(event);
977 tracing_generic_entry_update(ent, type, trace_ctx);
980 static __always_inline struct ring_buffer_event *
981 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
984 unsigned int trace_ctx)
986 struct ring_buffer_event *event;
988 event = ring_buffer_lock_reserve(buffer, len);
990 trace_event_setup(event, type, trace_ctx);
995 void tracer_tracing_on(struct trace_array *tr)
997 if (tr->array_buffer.buffer)
998 ring_buffer_record_on(tr->array_buffer.buffer);
1000 * This flag is looked at when buffers haven't been allocated
1001 * yet, or by some tracers (like irqsoff), that just want to
1002 * know if the ring buffer has been disabled, but it can handle
1003 * races of where it gets disabled but we still do a record.
1004 * As the check is in the fast path of the tracers, it is more
1005 * important to be fast than accurate.
1007 tr->buffer_disabled = 0;
1008 /* Make the flag seen by readers */
1013 * tracing_on - enable tracing buffers
1015 * This function enables tracing buffers that may have been
1016 * disabled with tracing_off.
1018 void tracing_on(void)
1020 tracer_tracing_on(&global_trace);
1022 EXPORT_SYMBOL_GPL(tracing_on);
1025 static __always_inline void
1026 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
1028 __this_cpu_write(trace_taskinfo_save, true);
1030 /* If this is the temp buffer, we need to commit fully */
1031 if (this_cpu_read(trace_buffered_event) == event) {
1032 /* Length is in event->array[0] */
1033 ring_buffer_write(buffer, event->array[0], &event->array[1]);
1034 /* Release the temp buffer */
1035 this_cpu_dec(trace_buffered_event_cnt);
1036 /* ring_buffer_unlock_commit() enables preemption */
1037 preempt_enable_notrace();
1039 ring_buffer_unlock_commit(buffer);
1042 int __trace_array_puts(struct trace_array *tr, unsigned long ip,
1043 const char *str, int size)
1045 struct ring_buffer_event *event;
1046 struct trace_buffer *buffer;
1047 struct print_entry *entry;
1048 unsigned int trace_ctx;
1051 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1054 if (unlikely(tracing_selftest_running || tracing_disabled))
1057 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1059 trace_ctx = tracing_gen_ctx();
1060 buffer = tr->array_buffer.buffer;
1061 ring_buffer_nest_start(buffer);
1062 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1069 entry = ring_buffer_event_data(event);
1072 memcpy(&entry->buf, str, size);
1074 /* Add a newline if necessary */
1075 if (entry->buf[size - 1] != '\n') {
1076 entry->buf[size] = '\n';
1077 entry->buf[size + 1] = '\0';
1079 entry->buf[size] = '\0';
1081 __buffer_unlock_commit(buffer, event);
1082 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1084 ring_buffer_nest_end(buffer);
1087 EXPORT_SYMBOL_GPL(__trace_array_puts);
1090 * __trace_puts - write a constant string into the trace buffer.
1091 * @ip: The address of the caller
1092 * @str: The constant string to write
1093 * @size: The size of the string.
1095 int __trace_puts(unsigned long ip, const char *str, int size)
1097 return __trace_array_puts(&global_trace, ip, str, size);
1099 EXPORT_SYMBOL_GPL(__trace_puts);
1102 * __trace_bputs - write the pointer to a constant string into trace buffer
1103 * @ip: The address of the caller
1104 * @str: The constant string to write to the buffer to
1106 int __trace_bputs(unsigned long ip, const char *str)
1108 struct ring_buffer_event *event;
1109 struct trace_buffer *buffer;
1110 struct bputs_entry *entry;
1111 unsigned int trace_ctx;
1112 int size = sizeof(struct bputs_entry);
1115 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1118 if (unlikely(tracing_selftest_running || tracing_disabled))
1121 trace_ctx = tracing_gen_ctx();
1122 buffer = global_trace.array_buffer.buffer;
1124 ring_buffer_nest_start(buffer);
1125 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1130 entry = ring_buffer_event_data(event);
1134 __buffer_unlock_commit(buffer, event);
1135 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1139 ring_buffer_nest_end(buffer);
1142 EXPORT_SYMBOL_GPL(__trace_bputs);
1144 #ifdef CONFIG_TRACER_SNAPSHOT
1145 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1148 struct tracer *tracer = tr->current_trace;
1149 unsigned long flags;
1152 trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1153 trace_array_puts(tr, "*** snapshot is being ignored ***\n");
1157 if (!tr->allocated_snapshot) {
1158 trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
1159 trace_array_puts(tr, "*** stopping trace here! ***\n");
1160 tracer_tracing_off(tr);
1164 /* Note, snapshot can not be used when the tracer uses it */
1165 if (tracer->use_max_tr) {
1166 trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
1167 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1171 local_irq_save(flags);
1172 update_max_tr(tr, current, smp_processor_id(), cond_data);
1173 local_irq_restore(flags);
1176 void tracing_snapshot_instance(struct trace_array *tr)
1178 tracing_snapshot_instance_cond(tr, NULL);
1182 * tracing_snapshot - take a snapshot of the current buffer.
1184 * This causes a swap between the snapshot buffer and the current live
1185 * tracing buffer. You can use this to take snapshots of the live
1186 * trace when some condition is triggered, but continue to trace.
1188 * Note, make sure to allocate the snapshot with either
1189 * a tracing_snapshot_alloc(), or by doing it manually
1190 * with: echo 1 > /sys/kernel/tracing/snapshot
1192 * If the snapshot buffer is not allocated, it will stop tracing.
1193 * Basically making a permanent snapshot.
1195 void tracing_snapshot(void)
1197 struct trace_array *tr = &global_trace;
1199 tracing_snapshot_instance(tr);
1201 EXPORT_SYMBOL_GPL(tracing_snapshot);
1204 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1205 * @tr: The tracing instance to snapshot
1206 * @cond_data: The data to be tested conditionally, and possibly saved
1208 * This is the same as tracing_snapshot() except that the snapshot is
1209 * conditional - the snapshot will only happen if the
1210 * cond_snapshot.update() implementation receiving the cond_data
1211 * returns true, which means that the trace array's cond_snapshot
1212 * update() operation used the cond_data to determine whether the
1213 * snapshot should be taken, and if it was, presumably saved it along
1214 * with the snapshot.
1216 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1218 tracing_snapshot_instance_cond(tr, cond_data);
1220 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1223 * tracing_cond_snapshot_data - get the user data associated with a snapshot
1224 * @tr: The tracing instance
1226 * When the user enables a conditional snapshot using
1227 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1228 * with the snapshot. This accessor is used to retrieve it.
1230 * Should not be called from cond_snapshot.update(), since it takes
1231 * the tr->max_lock lock, which the code calling
1232 * cond_snapshot.update() has already done.
1234 * Returns the cond_data associated with the trace array's snapshot.
1236 void *tracing_cond_snapshot_data(struct trace_array *tr)
1238 void *cond_data = NULL;
1240 local_irq_disable();
1241 arch_spin_lock(&tr->max_lock);
1243 if (tr->cond_snapshot)
1244 cond_data = tr->cond_snapshot->cond_data;
1246 arch_spin_unlock(&tr->max_lock);
1251 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1253 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1254 struct array_buffer *size_buf, int cpu_id);
1255 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1257 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1261 if (!tr->allocated_snapshot) {
1263 /* allocate spare buffer */
1264 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1265 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1269 tr->allocated_snapshot = true;
1275 static void free_snapshot(struct trace_array *tr)
1278 * We don't free the ring buffer. instead, resize it because
1279 * The max_tr ring buffer has some state (e.g. ring->clock) and
1280 * we want preserve it.
1282 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1283 set_buffer_entries(&tr->max_buffer, 1);
1284 tracing_reset_online_cpus(&tr->max_buffer);
1285 tr->allocated_snapshot = false;
1289 * tracing_alloc_snapshot - allocate snapshot buffer.
1291 * This only allocates the snapshot buffer if it isn't already
1292 * allocated - it doesn't also take a snapshot.
1294 * This is meant to be used in cases where the snapshot buffer needs
1295 * to be set up for events that can't sleep but need to be able to
1296 * trigger a snapshot.
1298 int tracing_alloc_snapshot(void)
1300 struct trace_array *tr = &global_trace;
1303 ret = tracing_alloc_snapshot_instance(tr);
1308 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1311 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1313 * This is similar to tracing_snapshot(), but it will allocate the
1314 * snapshot buffer if it isn't already allocated. Use this only
1315 * where it is safe to sleep, as the allocation may sleep.
1317 * This causes a swap between the snapshot buffer and the current live
1318 * tracing buffer. You can use this to take snapshots of the live
1319 * trace when some condition is triggered, but continue to trace.
1321 void tracing_snapshot_alloc(void)
1325 ret = tracing_alloc_snapshot();
1331 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1334 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1335 * @tr: The tracing instance
1336 * @cond_data: User data to associate with the snapshot
1337 * @update: Implementation of the cond_snapshot update function
1339 * Check whether the conditional snapshot for the given instance has
1340 * already been enabled, or if the current tracer is already using a
1341 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1342 * save the cond_data and update function inside.
1344 * Returns 0 if successful, error otherwise.
1346 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1347 cond_update_fn_t update)
1349 struct cond_snapshot *cond_snapshot;
1352 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1356 cond_snapshot->cond_data = cond_data;
1357 cond_snapshot->update = update;
1359 mutex_lock(&trace_types_lock);
1361 ret = tracing_alloc_snapshot_instance(tr);
1365 if (tr->current_trace->use_max_tr) {
1371 * The cond_snapshot can only change to NULL without the
1372 * trace_types_lock. We don't care if we race with it going
1373 * to NULL, but we want to make sure that it's not set to
1374 * something other than NULL when we get here, which we can
1375 * do safely with only holding the trace_types_lock and not
1376 * having to take the max_lock.
1378 if (tr->cond_snapshot) {
1383 local_irq_disable();
1384 arch_spin_lock(&tr->max_lock);
1385 tr->cond_snapshot = cond_snapshot;
1386 arch_spin_unlock(&tr->max_lock);
1389 mutex_unlock(&trace_types_lock);
1394 mutex_unlock(&trace_types_lock);
1395 kfree(cond_snapshot);
1398 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1401 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1402 * @tr: The tracing instance
1404 * Check whether the conditional snapshot for the given instance is
1405 * enabled; if so, free the cond_snapshot associated with it,
1406 * otherwise return -EINVAL.
1408 * Returns 0 if successful, error otherwise.
1410 int tracing_snapshot_cond_disable(struct trace_array *tr)
1414 local_irq_disable();
1415 arch_spin_lock(&tr->max_lock);
1417 if (!tr->cond_snapshot)
1420 kfree(tr->cond_snapshot);
1421 tr->cond_snapshot = NULL;
1424 arch_spin_unlock(&tr->max_lock);
1429 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1431 void tracing_snapshot(void)
1433 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1435 EXPORT_SYMBOL_GPL(tracing_snapshot);
1436 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1438 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1440 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1441 int tracing_alloc_snapshot(void)
1443 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1446 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1447 void tracing_snapshot_alloc(void)
1452 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1453 void *tracing_cond_snapshot_data(struct trace_array *tr)
1457 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1458 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1462 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1463 int tracing_snapshot_cond_disable(struct trace_array *tr)
1467 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1468 #define free_snapshot(tr) do { } while (0)
1469 #endif /* CONFIG_TRACER_SNAPSHOT */
1471 void tracer_tracing_off(struct trace_array *tr)
1473 if (tr->array_buffer.buffer)
1474 ring_buffer_record_off(tr->array_buffer.buffer);
1476 * This flag is looked at when buffers haven't been allocated
1477 * yet, or by some tracers (like irqsoff), that just want to
1478 * know if the ring buffer has been disabled, but it can handle
1479 * races of where it gets disabled but we still do a record.
1480 * As the check is in the fast path of the tracers, it is more
1481 * important to be fast than accurate.
1483 tr->buffer_disabled = 1;
1484 /* Make the flag seen by readers */
1489 * tracing_off - turn off tracing buffers
1491 * This function stops the tracing buffers from recording data.
1492 * It does not disable any overhead the tracers themselves may
1493 * be causing. This function simply causes all recording to
1494 * the ring buffers to fail.
1496 void tracing_off(void)
1498 tracer_tracing_off(&global_trace);
1500 EXPORT_SYMBOL_GPL(tracing_off);
1502 void disable_trace_on_warning(void)
1504 if (__disable_trace_on_warning) {
1505 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1506 "Disabling tracing due to warning\n");
1512 * tracer_tracing_is_on - show real state of ring buffer enabled
1513 * @tr : the trace array to know if ring buffer is enabled
1515 * Shows real state of the ring buffer if it is enabled or not.
1517 bool tracer_tracing_is_on(struct trace_array *tr)
1519 if (tr->array_buffer.buffer)
1520 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1521 return !tr->buffer_disabled;
1525 * tracing_is_on - show state of ring buffers enabled
1527 int tracing_is_on(void)
1529 return tracer_tracing_is_on(&global_trace);
1531 EXPORT_SYMBOL_GPL(tracing_is_on);
1533 static int __init set_buf_size(char *str)
1535 unsigned long buf_size;
1539 buf_size = memparse(str, &str);
1541 * nr_entries can not be zero and the startup
1542 * tests require some buffer space. Therefore
1543 * ensure we have at least 4096 bytes of buffer.
1545 trace_buf_size = max(4096UL, buf_size);
1548 __setup("trace_buf_size=", set_buf_size);
1550 static int __init set_tracing_thresh(char *str)
1552 unsigned long threshold;
1557 ret = kstrtoul(str, 0, &threshold);
1560 tracing_thresh = threshold * 1000;
1563 __setup("tracing_thresh=", set_tracing_thresh);
1565 unsigned long nsecs_to_usecs(unsigned long nsecs)
1567 return nsecs / 1000;
1571 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1572 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1573 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1574 * of strings in the order that the evals (enum) were defined.
1579 /* These must match the bit positions in trace_iterator_flags */
1580 static const char *trace_options[] = {
1588 int in_ns; /* is this clock in nanoseconds? */
1589 } trace_clocks[] = {
1590 { trace_clock_local, "local", 1 },
1591 { trace_clock_global, "global", 1 },
1592 { trace_clock_counter, "counter", 0 },
1593 { trace_clock_jiffies, "uptime", 0 },
1594 { trace_clock, "perf", 1 },
1595 { ktime_get_mono_fast_ns, "mono", 1 },
1596 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1597 { ktime_get_boot_fast_ns, "boot", 1 },
1598 { ktime_get_tai_fast_ns, "tai", 1 },
1602 bool trace_clock_in_ns(struct trace_array *tr)
1604 if (trace_clocks[tr->clock_id].in_ns)
1611 * trace_parser_get_init - gets the buffer for trace parser
1613 int trace_parser_get_init(struct trace_parser *parser, int size)
1615 memset(parser, 0, sizeof(*parser));
1617 parser->buffer = kmalloc(size, GFP_KERNEL);
1618 if (!parser->buffer)
1621 parser->size = size;
1626 * trace_parser_put - frees the buffer for trace parser
1628 void trace_parser_put(struct trace_parser *parser)
1630 kfree(parser->buffer);
1631 parser->buffer = NULL;
1635 * trace_get_user - reads the user input string separated by space
1636 * (matched by isspace(ch))
1638 * For each string found the 'struct trace_parser' is updated,
1639 * and the function returns.
1641 * Returns number of bytes read.
1643 * See kernel/trace/trace.h for 'struct trace_parser' details.
1645 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1646 size_t cnt, loff_t *ppos)
1653 trace_parser_clear(parser);
1655 ret = get_user(ch, ubuf++);
1663 * The parser is not finished with the last write,
1664 * continue reading the user input without skipping spaces.
1666 if (!parser->cont) {
1667 /* skip white space */
1668 while (cnt && isspace(ch)) {
1669 ret = get_user(ch, ubuf++);
1678 /* only spaces were written */
1679 if (isspace(ch) || !ch) {
1686 /* read the non-space input */
1687 while (cnt && !isspace(ch) && ch) {
1688 if (parser->idx < parser->size - 1)
1689 parser->buffer[parser->idx++] = ch;
1694 ret = get_user(ch, ubuf++);
1701 /* We either got finished input or we have to wait for another call. */
1702 if (isspace(ch) || !ch) {
1703 parser->buffer[parser->idx] = 0;
1704 parser->cont = false;
1705 } else if (parser->idx < parser->size - 1) {
1706 parser->cont = true;
1707 parser->buffer[parser->idx++] = ch;
1708 /* Make sure the parsed string always terminates with '\0'. */
1709 parser->buffer[parser->idx] = 0;
1722 /* TODO add a seq_buf_to_buffer() */
1723 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1727 if (trace_seq_used(s) <= s->seq.readpos)
1730 len = trace_seq_used(s) - s->seq.readpos;
1733 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1735 s->seq.readpos += cnt;
1739 unsigned long __read_mostly tracing_thresh;
1741 #ifdef CONFIG_TRACER_MAX_TRACE
1742 static const struct file_operations tracing_max_lat_fops;
1744 #ifdef LATENCY_FS_NOTIFY
1746 static struct workqueue_struct *fsnotify_wq;
1748 static void latency_fsnotify_workfn(struct work_struct *work)
1750 struct trace_array *tr = container_of(work, struct trace_array,
1752 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1755 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1757 struct trace_array *tr = container_of(iwork, struct trace_array,
1759 queue_work(fsnotify_wq, &tr->fsnotify_work);
1762 static void trace_create_maxlat_file(struct trace_array *tr,
1763 struct dentry *d_tracer)
1765 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1766 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1767 tr->d_max_latency = trace_create_file("tracing_max_latency",
1769 d_tracer, &tr->max_latency,
1770 &tracing_max_lat_fops);
1773 __init static int latency_fsnotify_init(void)
1775 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1776 WQ_UNBOUND | WQ_HIGHPRI, 0);
1778 pr_err("Unable to allocate tr_max_lat_wq\n");
1784 late_initcall_sync(latency_fsnotify_init);
1786 void latency_fsnotify(struct trace_array *tr)
1791 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1792 * possible that we are called from __schedule() or do_idle(), which
1793 * could cause a deadlock.
1795 irq_work_queue(&tr->fsnotify_irqwork);
1798 #else /* !LATENCY_FS_NOTIFY */
1800 #define trace_create_maxlat_file(tr, d_tracer) \
1801 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1802 d_tracer, &tr->max_latency, &tracing_max_lat_fops)
1807 * Copy the new maximum trace into the separate maximum-trace
1808 * structure. (this way the maximum trace is permanently saved,
1809 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1812 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1814 struct array_buffer *trace_buf = &tr->array_buffer;
1815 struct array_buffer *max_buf = &tr->max_buffer;
1816 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1817 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1820 max_buf->time_start = data->preempt_timestamp;
1822 max_data->saved_latency = tr->max_latency;
1823 max_data->critical_start = data->critical_start;
1824 max_data->critical_end = data->critical_end;
1826 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1827 max_data->pid = tsk->pid;
1829 * If tsk == current, then use current_uid(), as that does not use
1830 * RCU. The irq tracer can be called out of RCU scope.
1833 max_data->uid = current_uid();
1835 max_data->uid = task_uid(tsk);
1837 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1838 max_data->policy = tsk->policy;
1839 max_data->rt_priority = tsk->rt_priority;
1841 /* record this tasks comm */
1842 tracing_record_cmdline(tsk);
1843 latency_fsnotify(tr);
1847 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1849 * @tsk: the task with the latency
1850 * @cpu: The cpu that initiated the trace.
1851 * @cond_data: User data associated with a conditional snapshot
1853 * Flip the buffers between the @tr and the max_tr and record information
1854 * about which task was the cause of this latency.
1857 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1863 WARN_ON_ONCE(!irqs_disabled());
1865 if (!tr->allocated_snapshot) {
1866 /* Only the nop tracer should hit this when disabling */
1867 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1871 arch_spin_lock(&tr->max_lock);
1873 /* Inherit the recordable setting from array_buffer */
1874 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1875 ring_buffer_record_on(tr->max_buffer.buffer);
1877 ring_buffer_record_off(tr->max_buffer.buffer);
1879 #ifdef CONFIG_TRACER_SNAPSHOT
1880 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1881 arch_spin_unlock(&tr->max_lock);
1885 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1887 __update_max_tr(tr, tsk, cpu);
1889 arch_spin_unlock(&tr->max_lock);
1893 * update_max_tr_single - only copy one trace over, and reset the rest
1895 * @tsk: task with the latency
1896 * @cpu: the cpu of the buffer to copy.
1898 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1901 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1908 WARN_ON_ONCE(!irqs_disabled());
1909 if (!tr->allocated_snapshot) {
1910 /* Only the nop tracer should hit this when disabling */
1911 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1915 arch_spin_lock(&tr->max_lock);
1917 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1919 if (ret == -EBUSY) {
1921 * We failed to swap the buffer due to a commit taking
1922 * place on this CPU. We fail to record, but we reset
1923 * the max trace buffer (no one writes directly to it)
1924 * and flag that it failed.
1926 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1927 "Failed to swap buffers due to commit in progress\n");
1930 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1932 __update_max_tr(tr, tsk, cpu);
1933 arch_spin_unlock(&tr->max_lock);
1936 #endif /* CONFIG_TRACER_MAX_TRACE */
1938 static int wait_on_pipe(struct trace_iterator *iter, int full)
1940 /* Iterators are static, they should be filled or empty */
1941 if (trace_buffer_iter(iter, iter->cpu_file))
1944 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1948 #ifdef CONFIG_FTRACE_STARTUP_TEST
1949 static bool selftests_can_run;
1951 struct trace_selftests {
1952 struct list_head list;
1953 struct tracer *type;
1956 static LIST_HEAD(postponed_selftests);
1958 static int save_selftest(struct tracer *type)
1960 struct trace_selftests *selftest;
1962 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1966 selftest->type = type;
1967 list_add(&selftest->list, &postponed_selftests);
1971 static int run_tracer_selftest(struct tracer *type)
1973 struct trace_array *tr = &global_trace;
1974 struct tracer *saved_tracer = tr->current_trace;
1977 if (!type->selftest || tracing_selftest_disabled)
1981 * If a tracer registers early in boot up (before scheduling is
1982 * initialized and such), then do not run its selftests yet.
1983 * Instead, run it a little later in the boot process.
1985 if (!selftests_can_run)
1986 return save_selftest(type);
1988 if (!tracing_is_on()) {
1989 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1995 * Run a selftest on this tracer.
1996 * Here we reset the trace buffer, and set the current
1997 * tracer to be this tracer. The tracer can then run some
1998 * internal tracing to verify that everything is in order.
1999 * If we fail, we do not register this tracer.
2001 tracing_reset_online_cpus(&tr->array_buffer);
2003 tr->current_trace = type;
2005 #ifdef CONFIG_TRACER_MAX_TRACE
2006 if (type->use_max_tr) {
2007 /* If we expanded the buffers, make sure the max is expanded too */
2008 if (ring_buffer_expanded)
2009 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
2010 RING_BUFFER_ALL_CPUS);
2011 tr->allocated_snapshot = true;
2015 /* the test is responsible for initializing and enabling */
2016 pr_info("Testing tracer %s: ", type->name);
2017 ret = type->selftest(type, tr);
2018 /* the test is responsible for resetting too */
2019 tr->current_trace = saved_tracer;
2021 printk(KERN_CONT "FAILED!\n");
2022 /* Add the warning after printing 'FAILED' */
2026 /* Only reset on passing, to avoid touching corrupted buffers */
2027 tracing_reset_online_cpus(&tr->array_buffer);
2029 #ifdef CONFIG_TRACER_MAX_TRACE
2030 if (type->use_max_tr) {
2031 tr->allocated_snapshot = false;
2033 /* Shrink the max buffer again */
2034 if (ring_buffer_expanded)
2035 ring_buffer_resize(tr->max_buffer.buffer, 1,
2036 RING_BUFFER_ALL_CPUS);
2040 printk(KERN_CONT "PASSED\n");
2044 static __init int init_trace_selftests(void)
2046 struct trace_selftests *p, *n;
2047 struct tracer *t, **last;
2050 selftests_can_run = true;
2052 mutex_lock(&trace_types_lock);
2054 if (list_empty(&postponed_selftests))
2057 pr_info("Running postponed tracer tests:\n");
2059 tracing_selftest_running = true;
2060 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2061 /* This loop can take minutes when sanitizers are enabled, so
2062 * lets make sure we allow RCU processing.
2065 ret = run_tracer_selftest(p->type);
2066 /* If the test fails, then warn and remove from available_tracers */
2068 WARN(1, "tracer: %s failed selftest, disabling\n",
2070 last = &trace_types;
2071 for (t = trace_types; t; t = t->next) {
2082 tracing_selftest_running = false;
2085 mutex_unlock(&trace_types_lock);
2089 core_initcall(init_trace_selftests);
2091 static inline int run_tracer_selftest(struct tracer *type)
2095 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2097 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2099 static void __init apply_trace_boot_options(void);
2102 * register_tracer - register a tracer with the ftrace system.
2103 * @type: the plugin for the tracer
2105 * Register a new plugin tracer.
2107 int __init register_tracer(struct tracer *type)
2113 pr_info("Tracer must have a name\n");
2117 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2118 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2122 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2123 pr_warn("Can not register tracer %s due to lockdown\n",
2128 mutex_lock(&trace_types_lock);
2130 tracing_selftest_running = true;
2132 for (t = trace_types; t; t = t->next) {
2133 if (strcmp(type->name, t->name) == 0) {
2135 pr_info("Tracer %s already registered\n",
2142 if (!type->set_flag)
2143 type->set_flag = &dummy_set_flag;
2145 /*allocate a dummy tracer_flags*/
2146 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2151 type->flags->val = 0;
2152 type->flags->opts = dummy_tracer_opt;
2154 if (!type->flags->opts)
2155 type->flags->opts = dummy_tracer_opt;
2157 /* store the tracer for __set_tracer_option */
2158 type->flags->trace = type;
2160 ret = run_tracer_selftest(type);
2164 type->next = trace_types;
2166 add_tracer_options(&global_trace, type);
2169 tracing_selftest_running = false;
2170 mutex_unlock(&trace_types_lock);
2172 if (ret || !default_bootup_tracer)
2175 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2178 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2179 /* Do we want this tracer to start on bootup? */
2180 tracing_set_tracer(&global_trace, type->name);
2181 default_bootup_tracer = NULL;
2183 apply_trace_boot_options();
2185 /* disable other selftests, since this will break it. */
2186 disable_tracing_selftest("running a tracer");
2192 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2194 struct trace_buffer *buffer = buf->buffer;
2199 ring_buffer_record_disable(buffer);
2201 /* Make sure all commits have finished */
2203 ring_buffer_reset_cpu(buffer, cpu);
2205 ring_buffer_record_enable(buffer);
2208 void tracing_reset_online_cpus(struct array_buffer *buf)
2210 struct trace_buffer *buffer = buf->buffer;
2215 ring_buffer_record_disable(buffer);
2217 /* Make sure all commits have finished */
2220 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2222 ring_buffer_reset_online_cpus(buffer);
2224 ring_buffer_record_enable(buffer);
2227 /* Must have trace_types_lock held */
2228 void tracing_reset_all_online_cpus_unlocked(void)
2230 struct trace_array *tr;
2232 lockdep_assert_held(&trace_types_lock);
2234 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2235 if (!tr->clear_trace)
2237 tr->clear_trace = false;
2238 tracing_reset_online_cpus(&tr->array_buffer);
2239 #ifdef CONFIG_TRACER_MAX_TRACE
2240 tracing_reset_online_cpus(&tr->max_buffer);
2245 void tracing_reset_all_online_cpus(void)
2247 mutex_lock(&trace_types_lock);
2248 tracing_reset_all_online_cpus_unlocked();
2249 mutex_unlock(&trace_types_lock);
2253 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2254 * is the tgid last observed corresponding to pid=i.
2256 static int *tgid_map;
2258 /* The maximum valid index into tgid_map. */
2259 static size_t tgid_map_max;
2261 #define SAVED_CMDLINES_DEFAULT 128
2262 #define NO_CMDLINE_MAP UINT_MAX
2264 * Preemption must be disabled before acquiring trace_cmdline_lock.
2265 * The various trace_arrays' max_lock must be acquired in a context
2266 * where interrupt is disabled.
2268 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2269 struct saved_cmdlines_buffer {
2270 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2271 unsigned *map_cmdline_to_pid;
2272 unsigned cmdline_num;
2274 char *saved_cmdlines;
2276 static struct saved_cmdlines_buffer *savedcmd;
2278 static inline char *get_saved_cmdlines(int idx)
2280 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2283 static inline void set_cmdline(int idx, const char *cmdline)
2285 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2288 static int allocate_cmdlines_buffer(unsigned int val,
2289 struct saved_cmdlines_buffer *s)
2291 s->map_cmdline_to_pid = kmalloc_array(val,
2292 sizeof(*s->map_cmdline_to_pid),
2294 if (!s->map_cmdline_to_pid)
2297 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2298 if (!s->saved_cmdlines) {
2299 kfree(s->map_cmdline_to_pid);
2304 s->cmdline_num = val;
2305 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2306 sizeof(s->map_pid_to_cmdline));
2307 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2308 val * sizeof(*s->map_cmdline_to_pid));
2313 static int trace_create_savedcmd(void)
2317 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2321 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2331 int is_tracing_stopped(void)
2333 return global_trace.stop_count;
2337 * tracing_start - quick start of the tracer
2339 * If tracing is enabled but was stopped by tracing_stop,
2340 * this will start the tracer back up.
2342 void tracing_start(void)
2344 struct trace_buffer *buffer;
2345 unsigned long flags;
2347 if (tracing_disabled)
2350 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2351 if (--global_trace.stop_count) {
2352 if (global_trace.stop_count < 0) {
2353 /* Someone screwed up their debugging */
2355 global_trace.stop_count = 0;
2360 /* Prevent the buffers from switching */
2361 arch_spin_lock(&global_trace.max_lock);
2363 buffer = global_trace.array_buffer.buffer;
2365 ring_buffer_record_enable(buffer);
2367 #ifdef CONFIG_TRACER_MAX_TRACE
2368 buffer = global_trace.max_buffer.buffer;
2370 ring_buffer_record_enable(buffer);
2373 arch_spin_unlock(&global_trace.max_lock);
2376 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2379 static void tracing_start_tr(struct trace_array *tr)
2381 struct trace_buffer *buffer;
2382 unsigned long flags;
2384 if (tracing_disabled)
2387 /* If global, we need to also start the max tracer */
2388 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2389 return tracing_start();
2391 raw_spin_lock_irqsave(&tr->start_lock, flags);
2393 if (--tr->stop_count) {
2394 if (tr->stop_count < 0) {
2395 /* Someone screwed up their debugging */
2402 buffer = tr->array_buffer.buffer;
2404 ring_buffer_record_enable(buffer);
2407 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2411 * tracing_stop - quick stop of the tracer
2413 * Light weight way to stop tracing. Use in conjunction with
2416 void tracing_stop(void)
2418 struct trace_buffer *buffer;
2419 unsigned long flags;
2421 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2422 if (global_trace.stop_count++)
2425 /* Prevent the buffers from switching */
2426 arch_spin_lock(&global_trace.max_lock);
2428 buffer = global_trace.array_buffer.buffer;
2430 ring_buffer_record_disable(buffer);
2432 #ifdef CONFIG_TRACER_MAX_TRACE
2433 buffer = global_trace.max_buffer.buffer;
2435 ring_buffer_record_disable(buffer);
2438 arch_spin_unlock(&global_trace.max_lock);
2441 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2444 static void tracing_stop_tr(struct trace_array *tr)
2446 struct trace_buffer *buffer;
2447 unsigned long flags;
2449 /* If global, we need to also stop the max tracer */
2450 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2451 return tracing_stop();
2453 raw_spin_lock_irqsave(&tr->start_lock, flags);
2454 if (tr->stop_count++)
2457 buffer = tr->array_buffer.buffer;
2459 ring_buffer_record_disable(buffer);
2462 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2465 static int trace_save_cmdline(struct task_struct *tsk)
2469 /* treat recording of idle task as a success */
2473 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2476 * It's not the end of the world if we don't get
2477 * the lock, but we also don't want to spin
2478 * nor do we want to disable interrupts,
2479 * so if we miss here, then better luck next time.
2481 * This is called within the scheduler and wake up, so interrupts
2482 * had better been disabled and run queue lock been held.
2484 lockdep_assert_preemption_disabled();
2485 if (!arch_spin_trylock(&trace_cmdline_lock))
2488 idx = savedcmd->map_pid_to_cmdline[tpid];
2489 if (idx == NO_CMDLINE_MAP) {
2490 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2492 savedcmd->map_pid_to_cmdline[tpid] = idx;
2493 savedcmd->cmdline_idx = idx;
2496 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2497 set_cmdline(idx, tsk->comm);
2499 arch_spin_unlock(&trace_cmdline_lock);
2504 static void __trace_find_cmdline(int pid, char comm[])
2510 strcpy(comm, "<idle>");
2514 if (WARN_ON_ONCE(pid < 0)) {
2515 strcpy(comm, "<XXX>");
2519 tpid = pid & (PID_MAX_DEFAULT - 1);
2520 map = savedcmd->map_pid_to_cmdline[tpid];
2521 if (map != NO_CMDLINE_MAP) {
2522 tpid = savedcmd->map_cmdline_to_pid[map];
2524 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2528 strcpy(comm, "<...>");
2531 void trace_find_cmdline(int pid, char comm[])
2534 arch_spin_lock(&trace_cmdline_lock);
2536 __trace_find_cmdline(pid, comm);
2538 arch_spin_unlock(&trace_cmdline_lock);
2542 static int *trace_find_tgid_ptr(int pid)
2545 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2546 * if we observe a non-NULL tgid_map then we also observe the correct
2549 int *map = smp_load_acquire(&tgid_map);
2551 if (unlikely(!map || pid > tgid_map_max))
2557 int trace_find_tgid(int pid)
2559 int *ptr = trace_find_tgid_ptr(pid);
2561 return ptr ? *ptr : 0;
2564 static int trace_save_tgid(struct task_struct *tsk)
2568 /* treat recording of idle task as a success */
2572 ptr = trace_find_tgid_ptr(tsk->pid);
2580 static bool tracing_record_taskinfo_skip(int flags)
2582 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2584 if (!__this_cpu_read(trace_taskinfo_save))
2590 * tracing_record_taskinfo - record the task info of a task
2592 * @task: task to record
2593 * @flags: TRACE_RECORD_CMDLINE for recording comm
2594 * TRACE_RECORD_TGID for recording tgid
2596 void tracing_record_taskinfo(struct task_struct *task, int flags)
2600 if (tracing_record_taskinfo_skip(flags))
2604 * Record as much task information as possible. If some fail, continue
2605 * to try to record the others.
2607 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2608 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2610 /* If recording any information failed, retry again soon. */
2614 __this_cpu_write(trace_taskinfo_save, false);
2618 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2620 * @prev: previous task during sched_switch
2621 * @next: next task during sched_switch
2622 * @flags: TRACE_RECORD_CMDLINE for recording comm
2623 * TRACE_RECORD_TGID for recording tgid
2625 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2626 struct task_struct *next, int flags)
2630 if (tracing_record_taskinfo_skip(flags))
2634 * Record as much task information as possible. If some fail, continue
2635 * to try to record the others.
2637 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2638 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2639 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2640 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2642 /* If recording any information failed, retry again soon. */
2646 __this_cpu_write(trace_taskinfo_save, false);
2649 /* Helpers to record a specific task information */
2650 void tracing_record_cmdline(struct task_struct *task)
2652 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2655 void tracing_record_tgid(struct task_struct *task)
2657 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2661 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2662 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2663 * simplifies those functions and keeps them in sync.
2665 enum print_line_t trace_handle_return(struct trace_seq *s)
2667 return trace_seq_has_overflowed(s) ?
2668 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2670 EXPORT_SYMBOL_GPL(trace_handle_return);
2672 static unsigned short migration_disable_value(void)
2674 #if defined(CONFIG_SMP)
2675 return current->migration_disabled;
2681 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2683 unsigned int trace_flags = irqs_status;
2686 pc = preempt_count();
2689 trace_flags |= TRACE_FLAG_NMI;
2690 if (pc & HARDIRQ_MASK)
2691 trace_flags |= TRACE_FLAG_HARDIRQ;
2692 if (in_serving_softirq())
2693 trace_flags |= TRACE_FLAG_SOFTIRQ;
2694 if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2695 trace_flags |= TRACE_FLAG_BH_OFF;
2697 if (tif_need_resched())
2698 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2699 if (test_preempt_need_resched())
2700 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2701 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2702 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2705 struct ring_buffer_event *
2706 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2709 unsigned int trace_ctx)
2711 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2714 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2715 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2716 static int trace_buffered_event_ref;
2719 * trace_buffered_event_enable - enable buffering events
2721 * When events are being filtered, it is quicker to use a temporary
2722 * buffer to write the event data into if there's a likely chance
2723 * that it will not be committed. The discard of the ring buffer
2724 * is not as fast as committing, and is much slower than copying
2727 * When an event is to be filtered, allocate per cpu buffers to
2728 * write the event data into, and if the event is filtered and discarded
2729 * it is simply dropped, otherwise, the entire data is to be committed
2732 void trace_buffered_event_enable(void)
2734 struct ring_buffer_event *event;
2738 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2740 if (trace_buffered_event_ref++)
2743 for_each_tracing_cpu(cpu) {
2744 page = alloc_pages_node(cpu_to_node(cpu),
2745 GFP_KERNEL | __GFP_NORETRY, 0);
2749 event = page_address(page);
2750 memset(event, 0, sizeof(*event));
2752 per_cpu(trace_buffered_event, cpu) = event;
2755 if (cpu == smp_processor_id() &&
2756 __this_cpu_read(trace_buffered_event) !=
2757 per_cpu(trace_buffered_event, cpu))
2764 trace_buffered_event_disable();
2767 static void enable_trace_buffered_event(void *data)
2769 /* Probably not needed, but do it anyway */
2771 this_cpu_dec(trace_buffered_event_cnt);
2774 static void disable_trace_buffered_event(void *data)
2776 this_cpu_inc(trace_buffered_event_cnt);
2780 * trace_buffered_event_disable - disable buffering events
2782 * When a filter is removed, it is faster to not use the buffered
2783 * events, and to commit directly into the ring buffer. Free up
2784 * the temp buffers when there are no more users. This requires
2785 * special synchronization with current events.
2787 void trace_buffered_event_disable(void)
2791 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2793 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2796 if (--trace_buffered_event_ref)
2800 /* For each CPU, set the buffer as used. */
2801 smp_call_function_many(tracing_buffer_mask,
2802 disable_trace_buffered_event, NULL, 1);
2805 /* Wait for all current users to finish */
2808 for_each_tracing_cpu(cpu) {
2809 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2810 per_cpu(trace_buffered_event, cpu) = NULL;
2813 * Make sure trace_buffered_event is NULL before clearing
2814 * trace_buffered_event_cnt.
2819 /* Do the work on each cpu */
2820 smp_call_function_many(tracing_buffer_mask,
2821 enable_trace_buffered_event, NULL, 1);
2825 static struct trace_buffer *temp_buffer;
2827 struct ring_buffer_event *
2828 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2829 struct trace_event_file *trace_file,
2830 int type, unsigned long len,
2831 unsigned int trace_ctx)
2833 struct ring_buffer_event *entry;
2834 struct trace_array *tr = trace_file->tr;
2837 *current_rb = tr->array_buffer.buffer;
2839 if (!tr->no_filter_buffering_ref &&
2840 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2841 preempt_disable_notrace();
2843 * Filtering is on, so try to use the per cpu buffer first.
2844 * This buffer will simulate a ring_buffer_event,
2845 * where the type_len is zero and the array[0] will
2846 * hold the full length.
2847 * (see include/linux/ring-buffer.h for details on
2848 * how the ring_buffer_event is structured).
2850 * Using a temp buffer during filtering and copying it
2851 * on a matched filter is quicker than writing directly
2852 * into the ring buffer and then discarding it when
2853 * it doesn't match. That is because the discard
2854 * requires several atomic operations to get right.
2855 * Copying on match and doing nothing on a failed match
2856 * is still quicker than no copy on match, but having
2857 * to discard out of the ring buffer on a failed match.
2859 if ((entry = __this_cpu_read(trace_buffered_event))) {
2860 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2862 val = this_cpu_inc_return(trace_buffered_event_cnt);
2865 * Preemption is disabled, but interrupts and NMIs
2866 * can still come in now. If that happens after
2867 * the above increment, then it will have to go
2868 * back to the old method of allocating the event
2869 * on the ring buffer, and if the filter fails, it
2870 * will have to call ring_buffer_discard_commit()
2873 * Need to also check the unlikely case that the
2874 * length is bigger than the temp buffer size.
2875 * If that happens, then the reserve is pretty much
2876 * guaranteed to fail, as the ring buffer currently
2877 * only allows events less than a page. But that may
2878 * change in the future, so let the ring buffer reserve
2879 * handle the failure in that case.
2881 if (val == 1 && likely(len <= max_len)) {
2882 trace_event_setup(entry, type, trace_ctx);
2883 entry->array[0] = len;
2884 /* Return with preemption disabled */
2887 this_cpu_dec(trace_buffered_event_cnt);
2889 /* __trace_buffer_lock_reserve() disables preemption */
2890 preempt_enable_notrace();
2893 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2896 * If tracing is off, but we have triggers enabled
2897 * we still need to look at the event data. Use the temp_buffer
2898 * to store the trace event for the trigger to use. It's recursive
2899 * safe and will not be recorded anywhere.
2901 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2902 *current_rb = temp_buffer;
2903 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2908 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2910 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2911 static DEFINE_MUTEX(tracepoint_printk_mutex);
2913 static void output_printk(struct trace_event_buffer *fbuffer)
2915 struct trace_event_call *event_call;
2916 struct trace_event_file *file;
2917 struct trace_event *event;
2918 unsigned long flags;
2919 struct trace_iterator *iter = tracepoint_print_iter;
2921 /* We should never get here if iter is NULL */
2922 if (WARN_ON_ONCE(!iter))
2925 event_call = fbuffer->trace_file->event_call;
2926 if (!event_call || !event_call->event.funcs ||
2927 !event_call->event.funcs->trace)
2930 file = fbuffer->trace_file;
2931 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2932 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2933 !filter_match_preds(file->filter, fbuffer->entry)))
2936 event = &fbuffer->trace_file->event_call->event;
2938 raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2939 trace_seq_init(&iter->seq);
2940 iter->ent = fbuffer->entry;
2941 event_call->event.funcs->trace(iter, 0, event);
2942 trace_seq_putc(&iter->seq, 0);
2943 printk("%s", iter->seq.buffer);
2945 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2948 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2949 void *buffer, size_t *lenp,
2952 int save_tracepoint_printk;
2955 mutex_lock(&tracepoint_printk_mutex);
2956 save_tracepoint_printk = tracepoint_printk;
2958 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2961 * This will force exiting early, as tracepoint_printk
2962 * is always zero when tracepoint_printk_iter is not allocated
2964 if (!tracepoint_print_iter)
2965 tracepoint_printk = 0;
2967 if (save_tracepoint_printk == tracepoint_printk)
2970 if (tracepoint_printk)
2971 static_key_enable(&tracepoint_printk_key.key);
2973 static_key_disable(&tracepoint_printk_key.key);
2976 mutex_unlock(&tracepoint_printk_mutex);
2981 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2983 enum event_trigger_type tt = ETT_NONE;
2984 struct trace_event_file *file = fbuffer->trace_file;
2986 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2987 fbuffer->entry, &tt))
2990 if (static_key_false(&tracepoint_printk_key.key))
2991 output_printk(fbuffer);
2993 if (static_branch_unlikely(&trace_event_exports_enabled))
2994 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2996 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2997 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
3001 event_triggers_post_call(file, tt);
3004 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
3009 * trace_buffer_unlock_commit_regs()
3010 * trace_event_buffer_commit()
3011 * trace_event_raw_event_xxx()
3013 # define STACK_SKIP 3
3015 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
3016 struct trace_buffer *buffer,
3017 struct ring_buffer_event *event,
3018 unsigned int trace_ctx,
3019 struct pt_regs *regs)
3021 __buffer_unlock_commit(buffer, event);
3024 * If regs is not set, then skip the necessary functions.
3025 * Note, we can still get here via blktrace, wakeup tracer
3026 * and mmiotrace, but that's ok if they lose a function or
3027 * two. They are not that meaningful.
3029 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
3030 ftrace_trace_userstack(tr, buffer, trace_ctx);
3034 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
3037 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
3038 struct ring_buffer_event *event)
3040 __buffer_unlock_commit(buffer, event);
3044 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
3045 parent_ip, unsigned int trace_ctx)
3047 struct trace_event_call *call = &event_function;
3048 struct trace_buffer *buffer = tr->array_buffer.buffer;
3049 struct ring_buffer_event *event;
3050 struct ftrace_entry *entry;
3052 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
3056 entry = ring_buffer_event_data(event);
3058 entry->parent_ip = parent_ip;
3060 if (!call_filter_check_discard(call, entry, buffer, event)) {
3061 if (static_branch_unlikely(&trace_function_exports_enabled))
3062 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
3063 __buffer_unlock_commit(buffer, event);
3067 #ifdef CONFIG_STACKTRACE
3069 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
3070 #define FTRACE_KSTACK_NESTING 4
3072 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
3074 struct ftrace_stack {
3075 unsigned long calls[FTRACE_KSTACK_ENTRIES];
3079 struct ftrace_stacks {
3080 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
3083 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
3084 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
3086 static void __ftrace_trace_stack(struct trace_buffer *buffer,
3087 unsigned int trace_ctx,
3088 int skip, struct pt_regs *regs)
3090 struct trace_event_call *call = &event_kernel_stack;
3091 struct ring_buffer_event *event;
3092 unsigned int size, nr_entries;
3093 struct ftrace_stack *fstack;
3094 struct stack_entry *entry;
3098 * Add one, for this function and the call to save_stack_trace()
3099 * If regs is set, then these functions will not be in the way.
3101 #ifndef CONFIG_UNWINDER_ORC
3106 preempt_disable_notrace();
3108 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3110 /* This should never happen. If it does, yell once and skip */
3111 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3115 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3116 * interrupt will either see the value pre increment or post
3117 * increment. If the interrupt happens pre increment it will have
3118 * restored the counter when it returns. We just need a barrier to
3119 * keep gcc from moving things around.
3123 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3124 size = ARRAY_SIZE(fstack->calls);
3127 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3130 nr_entries = stack_trace_save(fstack->calls, size, skip);
3133 size = nr_entries * sizeof(unsigned long);
3134 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3135 (sizeof(*entry) - sizeof(entry->caller)) + size,
3139 entry = ring_buffer_event_data(event);
3141 memcpy(&entry->caller, fstack->calls, size);
3142 entry->size = nr_entries;
3144 if (!call_filter_check_discard(call, entry, buffer, event))
3145 __buffer_unlock_commit(buffer, event);
3148 /* Again, don't let gcc optimize things here */
3150 __this_cpu_dec(ftrace_stack_reserve);
3151 preempt_enable_notrace();
3155 static inline void ftrace_trace_stack(struct trace_array *tr,
3156 struct trace_buffer *buffer,
3157 unsigned int trace_ctx,
3158 int skip, struct pt_regs *regs)
3160 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3163 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3166 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3169 struct trace_buffer *buffer = tr->array_buffer.buffer;
3171 if (rcu_is_watching()) {
3172 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3176 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY)))
3180 * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
3181 * but if the above rcu_is_watching() failed, then the NMI
3182 * triggered someplace critical, and ct_irq_enter() should
3183 * not be called from NMI.
3185 if (unlikely(in_nmi()))
3188 ct_irq_enter_irqson();
3189 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3190 ct_irq_exit_irqson();
3194 * trace_dump_stack - record a stack back trace in the trace buffer
3195 * @skip: Number of functions to skip (helper handlers)
3197 void trace_dump_stack(int skip)
3199 if (tracing_disabled || tracing_selftest_running)
3202 #ifndef CONFIG_UNWINDER_ORC
3203 /* Skip 1 to skip this function. */
3206 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3207 tracing_gen_ctx(), skip, NULL);
3209 EXPORT_SYMBOL_GPL(trace_dump_stack);
3211 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3212 static DEFINE_PER_CPU(int, user_stack_count);
3215 ftrace_trace_userstack(struct trace_array *tr,
3216 struct trace_buffer *buffer, unsigned int trace_ctx)
3218 struct trace_event_call *call = &event_user_stack;
3219 struct ring_buffer_event *event;
3220 struct userstack_entry *entry;
3222 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3226 * NMIs can not handle page faults, even with fix ups.
3227 * The save user stack can (and often does) fault.
3229 if (unlikely(in_nmi()))
3233 * prevent recursion, since the user stack tracing may
3234 * trigger other kernel events.
3237 if (__this_cpu_read(user_stack_count))
3240 __this_cpu_inc(user_stack_count);
3242 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3243 sizeof(*entry), trace_ctx);
3245 goto out_drop_count;
3246 entry = ring_buffer_event_data(event);
3248 entry->tgid = current->tgid;
3249 memset(&entry->caller, 0, sizeof(entry->caller));
3251 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3252 if (!call_filter_check_discard(call, entry, buffer, event))
3253 __buffer_unlock_commit(buffer, event);
3256 __this_cpu_dec(user_stack_count);
3260 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3261 static void ftrace_trace_userstack(struct trace_array *tr,
3262 struct trace_buffer *buffer,
3263 unsigned int trace_ctx)
3266 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3268 #endif /* CONFIG_STACKTRACE */
3271 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3272 unsigned long long delta)
3274 entry->bottom_delta_ts = delta & U32_MAX;
3275 entry->top_delta_ts = (delta >> 32);
3278 void trace_last_func_repeats(struct trace_array *tr,
3279 struct trace_func_repeats *last_info,
3280 unsigned int trace_ctx)
3282 struct trace_buffer *buffer = tr->array_buffer.buffer;
3283 struct func_repeats_entry *entry;
3284 struct ring_buffer_event *event;
3287 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3288 sizeof(*entry), trace_ctx);
3292 delta = ring_buffer_event_time_stamp(buffer, event) -
3293 last_info->ts_last_call;
3295 entry = ring_buffer_event_data(event);
3296 entry->ip = last_info->ip;
3297 entry->parent_ip = last_info->parent_ip;
3298 entry->count = last_info->count;
3299 func_repeats_set_delta_ts(entry, delta);
3301 __buffer_unlock_commit(buffer, event);
3304 /* created for use with alloc_percpu */
3305 struct trace_buffer_struct {
3307 char buffer[4][TRACE_BUF_SIZE];
3310 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3313 * This allows for lockless recording. If we're nested too deeply, then
3314 * this returns NULL.
3316 static char *get_trace_buf(void)
3318 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3320 if (!trace_percpu_buffer || buffer->nesting >= 4)
3325 /* Interrupts must see nesting incremented before we use the buffer */
3327 return &buffer->buffer[buffer->nesting - 1][0];
3330 static void put_trace_buf(void)
3332 /* Don't let the decrement of nesting leak before this */
3334 this_cpu_dec(trace_percpu_buffer->nesting);
3337 static int alloc_percpu_trace_buffer(void)
3339 struct trace_buffer_struct __percpu *buffers;
3341 if (trace_percpu_buffer)
3344 buffers = alloc_percpu(struct trace_buffer_struct);
3345 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3348 trace_percpu_buffer = buffers;
3352 static int buffers_allocated;
3354 void trace_printk_init_buffers(void)
3356 if (buffers_allocated)
3359 if (alloc_percpu_trace_buffer())
3362 /* trace_printk() is for debug use only. Don't use it in production. */
3365 pr_warn("**********************************************************\n");
3366 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3368 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3370 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3371 pr_warn("** unsafe for production use. **\n");
3373 pr_warn("** If you see this message and you are not debugging **\n");
3374 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3376 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3377 pr_warn("**********************************************************\n");
3379 /* Expand the buffers to set size */
3380 tracing_update_buffers();
3382 buffers_allocated = 1;
3385 * trace_printk_init_buffers() can be called by modules.
3386 * If that happens, then we need to start cmdline recording
3387 * directly here. If the global_trace.buffer is already
3388 * allocated here, then this was called by module code.
3390 if (global_trace.array_buffer.buffer)
3391 tracing_start_cmdline_record();
3393 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3395 void trace_printk_start_comm(void)
3397 /* Start tracing comms if trace printk is set */
3398 if (!buffers_allocated)
3400 tracing_start_cmdline_record();
3403 static void trace_printk_start_stop_comm(int enabled)
3405 if (!buffers_allocated)
3409 tracing_start_cmdline_record();
3411 tracing_stop_cmdline_record();
3415 * trace_vbprintk - write binary msg to tracing buffer
3416 * @ip: The address of the caller
3417 * @fmt: The string format to write to the buffer
3418 * @args: Arguments for @fmt
3420 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3422 struct trace_event_call *call = &event_bprint;
3423 struct ring_buffer_event *event;
3424 struct trace_buffer *buffer;
3425 struct trace_array *tr = &global_trace;
3426 struct bprint_entry *entry;
3427 unsigned int trace_ctx;
3431 if (unlikely(tracing_selftest_running || tracing_disabled))
3434 /* Don't pollute graph traces with trace_vprintk internals */
3435 pause_graph_tracing();
3437 trace_ctx = tracing_gen_ctx();
3438 preempt_disable_notrace();
3440 tbuffer = get_trace_buf();
3446 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3448 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3451 size = sizeof(*entry) + sizeof(u32) * len;
3452 buffer = tr->array_buffer.buffer;
3453 ring_buffer_nest_start(buffer);
3454 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3458 entry = ring_buffer_event_data(event);
3462 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3463 if (!call_filter_check_discard(call, entry, buffer, event)) {
3464 __buffer_unlock_commit(buffer, event);
3465 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3469 ring_buffer_nest_end(buffer);
3474 preempt_enable_notrace();
3475 unpause_graph_tracing();
3479 EXPORT_SYMBOL_GPL(trace_vbprintk);
3483 __trace_array_vprintk(struct trace_buffer *buffer,
3484 unsigned long ip, const char *fmt, va_list args)
3486 struct trace_event_call *call = &event_print;
3487 struct ring_buffer_event *event;
3489 struct print_entry *entry;
3490 unsigned int trace_ctx;
3493 if (tracing_disabled || tracing_selftest_running)
3496 /* Don't pollute graph traces with trace_vprintk internals */
3497 pause_graph_tracing();
3499 trace_ctx = tracing_gen_ctx();
3500 preempt_disable_notrace();
3503 tbuffer = get_trace_buf();
3509 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3511 size = sizeof(*entry) + len + 1;
3512 ring_buffer_nest_start(buffer);
3513 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3517 entry = ring_buffer_event_data(event);
3520 memcpy(&entry->buf, tbuffer, len + 1);
3521 if (!call_filter_check_discard(call, entry, buffer, event)) {
3522 __buffer_unlock_commit(buffer, event);
3523 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3527 ring_buffer_nest_end(buffer);
3531 preempt_enable_notrace();
3532 unpause_graph_tracing();
3538 int trace_array_vprintk(struct trace_array *tr,
3539 unsigned long ip, const char *fmt, va_list args)
3541 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3545 * trace_array_printk - Print a message to a specific instance
3546 * @tr: The instance trace_array descriptor
3547 * @ip: The instruction pointer that this is called from.
3548 * @fmt: The format to print (printf format)
3550 * If a subsystem sets up its own instance, they have the right to
3551 * printk strings into their tracing instance buffer using this
3552 * function. Note, this function will not write into the top level
3553 * buffer (use trace_printk() for that), as writing into the top level
3554 * buffer should only have events that can be individually disabled.
3555 * trace_printk() is only used for debugging a kernel, and should not
3556 * be ever incorporated in normal use.
3558 * trace_array_printk() can be used, as it will not add noise to the
3559 * top level tracing buffer.
3561 * Note, trace_array_init_printk() must be called on @tr before this
3565 int trace_array_printk(struct trace_array *tr,
3566 unsigned long ip, const char *fmt, ...)
3574 /* This is only allowed for created instances */
3575 if (tr == &global_trace)
3578 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3582 ret = trace_array_vprintk(tr, ip, fmt, ap);
3586 EXPORT_SYMBOL_GPL(trace_array_printk);
3589 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3590 * @tr: The trace array to initialize the buffers for
3592 * As trace_array_printk() only writes into instances, they are OK to
3593 * have in the kernel (unlike trace_printk()). This needs to be called
3594 * before trace_array_printk() can be used on a trace_array.
3596 int trace_array_init_printk(struct trace_array *tr)
3601 /* This is only allowed for created instances */
3602 if (tr == &global_trace)
3605 return alloc_percpu_trace_buffer();
3607 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3610 int trace_array_printk_buf(struct trace_buffer *buffer,
3611 unsigned long ip, const char *fmt, ...)
3616 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3620 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3626 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3628 return trace_array_vprintk(&global_trace, ip, fmt, args);
3630 EXPORT_SYMBOL_GPL(trace_vprintk);
3632 static void trace_iterator_increment(struct trace_iterator *iter)
3634 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3638 ring_buffer_iter_advance(buf_iter);
3641 static struct trace_entry *
3642 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3643 unsigned long *lost_events)
3645 struct ring_buffer_event *event;
3646 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3649 event = ring_buffer_iter_peek(buf_iter, ts);
3651 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3652 (unsigned long)-1 : 0;
3654 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3659 iter->ent_size = ring_buffer_event_length(event);
3660 return ring_buffer_event_data(event);
3666 static struct trace_entry *
3667 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3668 unsigned long *missing_events, u64 *ent_ts)
3670 struct trace_buffer *buffer = iter->array_buffer->buffer;
3671 struct trace_entry *ent, *next = NULL;
3672 unsigned long lost_events = 0, next_lost = 0;
3673 int cpu_file = iter->cpu_file;
3674 u64 next_ts = 0, ts;
3680 * If we are in a per_cpu trace file, don't bother by iterating over
3681 * all cpu and peek directly.
3683 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3684 if (ring_buffer_empty_cpu(buffer, cpu_file))
3686 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3688 *ent_cpu = cpu_file;
3693 for_each_tracing_cpu(cpu) {
3695 if (ring_buffer_empty_cpu(buffer, cpu))
3698 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3701 * Pick the entry with the smallest timestamp:
3703 if (ent && (!next || ts < next_ts)) {
3707 next_lost = lost_events;
3708 next_size = iter->ent_size;
3712 iter->ent_size = next_size;
3715 *ent_cpu = next_cpu;
3721 *missing_events = next_lost;
3726 #define STATIC_FMT_BUF_SIZE 128
3727 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3729 char *trace_iter_expand_format(struct trace_iterator *iter)
3734 * iter->tr is NULL when used with tp_printk, which makes
3735 * this get called where it is not safe to call krealloc().
3737 if (!iter->tr || iter->fmt == static_fmt_buf)
3740 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3743 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3750 /* Returns true if the string is safe to dereference from an event */
3751 static bool trace_safe_str(struct trace_iterator *iter, const char *str,
3754 unsigned long addr = (unsigned long)str;
3755 struct trace_event *trace_event;
3756 struct trace_event_call *event;
3758 /* Ignore strings with no length */
3762 /* OK if part of the event data */
3763 if ((addr >= (unsigned long)iter->ent) &&
3764 (addr < (unsigned long)iter->ent + iter->ent_size))
3767 /* OK if part of the temp seq buffer */
3768 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3769 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3772 /* Core rodata can not be freed */
3773 if (is_kernel_rodata(addr))
3776 if (trace_is_tracepoint_string(str))
3780 * Now this could be a module event, referencing core module
3781 * data, which is OK.
3786 trace_event = ftrace_find_event(iter->ent->type);
3790 event = container_of(trace_event, struct trace_event_call, event);
3791 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3794 /* Would rather have rodata, but this will suffice */
3795 if (within_module_core(addr, event->module))
3801 static const char *show_buffer(struct trace_seq *s)
3803 struct seq_buf *seq = &s->seq;
3805 seq_buf_terminate(seq);
3810 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3812 static int test_can_verify_check(const char *fmt, ...)
3819 * The verifier is dependent on vsnprintf() modifies the va_list
3820 * passed to it, where it is sent as a reference. Some architectures
3821 * (like x86_32) passes it by value, which means that vsnprintf()
3822 * does not modify the va_list passed to it, and the verifier
3823 * would then need to be able to understand all the values that
3824 * vsnprintf can use. If it is passed by value, then the verifier
3828 vsnprintf(buf, 16, "%d", ap);
3829 ret = va_arg(ap, int);
3835 static void test_can_verify(void)
3837 if (!test_can_verify_check("%d %d", 0, 1)) {
3838 pr_info("trace event string verifier disabled\n");
3839 static_branch_inc(&trace_no_verify);
3844 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3845 * @iter: The iterator that holds the seq buffer and the event being printed
3846 * @fmt: The format used to print the event
3847 * @ap: The va_list holding the data to print from @fmt.
3849 * This writes the data into the @iter->seq buffer using the data from
3850 * @fmt and @ap. If the format has a %s, then the source of the string
3851 * is examined to make sure it is safe to print, otherwise it will
3852 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3855 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3858 const char *p = fmt;
3862 if (WARN_ON_ONCE(!fmt))
3865 if (static_branch_unlikely(&trace_no_verify))
3868 /* Don't bother checking when doing a ftrace_dump() */
3869 if (iter->fmt == static_fmt_buf)
3878 /* We only care about %s and variants */
3879 for (i = 0; p[i]; i++) {
3880 if (i + 1 >= iter->fmt_size) {
3882 * If we can't expand the copy buffer,
3885 if (!trace_iter_expand_format(iter))
3889 if (p[i] == '\\' && p[i+1]) {
3894 /* Need to test cases like %08.*s */
3895 for (j = 1; p[i+j]; j++) {
3896 if (isdigit(p[i+j]) ||
3899 if (p[i+j] == '*') {
3911 /* If no %s found then just print normally */
3915 /* Copy up to the %s, and print that */
3916 strncpy(iter->fmt, p, i);
3917 iter->fmt[i] = '\0';
3918 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3921 * If iter->seq is full, the above call no longer guarantees
3922 * that ap is in sync with fmt processing, and further calls
3923 * to va_arg() can return wrong positional arguments.
3925 * Ensure that ap is no longer used in this case.
3927 if (iter->seq.full) {
3933 len = va_arg(ap, int);
3935 /* The ap now points to the string data of the %s */
3936 str = va_arg(ap, const char *);
3939 * If you hit this warning, it is likely that the
3940 * trace event in question used %s on a string that
3941 * was saved at the time of the event, but may not be
3942 * around when the trace is read. Use __string(),
3943 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3944 * instead. See samples/trace_events/trace-events-sample.h
3947 if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
3948 "fmt: '%s' current_buffer: '%s'",
3949 fmt, show_buffer(&iter->seq))) {
3952 /* Try to safely read the string */
3954 if (len + 1 > iter->fmt_size)
3955 len = iter->fmt_size - 1;
3958 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3962 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3966 trace_seq_printf(&iter->seq, "(0x%px)", str);
3968 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3970 str = "[UNSAFE-MEMORY]";
3971 strcpy(iter->fmt, "%s");
3973 strncpy(iter->fmt, p + i, j + 1);
3974 iter->fmt[j+1] = '\0';
3977 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3979 trace_seq_printf(&iter->seq, iter->fmt, str);
3985 trace_seq_vprintf(&iter->seq, p, ap);
3988 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3990 const char *p, *new_fmt;
3993 if (WARN_ON_ONCE(!fmt))
3996 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
4000 new_fmt = q = iter->fmt;
4002 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
4003 if (!trace_iter_expand_format(iter))
4006 q += iter->fmt - new_fmt;
4007 new_fmt = iter->fmt;
4012 /* Replace %p with %px */
4016 } else if (p[0] == 'p' && !isalnum(p[1])) {
4027 #define STATIC_TEMP_BUF_SIZE 128
4028 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
4030 /* Find the next real entry, without updating the iterator itself */
4031 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
4032 int *ent_cpu, u64 *ent_ts)
4034 /* __find_next_entry will reset ent_size */
4035 int ent_size = iter->ent_size;
4036 struct trace_entry *entry;
4039 * If called from ftrace_dump(), then the iter->temp buffer
4040 * will be the static_temp_buf and not created from kmalloc.
4041 * If the entry size is greater than the buffer, we can
4042 * not save it. Just return NULL in that case. This is only
4043 * used to add markers when two consecutive events' time
4044 * stamps have a large delta. See trace_print_lat_context()
4046 if (iter->temp == static_temp_buf &&
4047 STATIC_TEMP_BUF_SIZE < ent_size)
4051 * The __find_next_entry() may call peek_next_entry(), which may
4052 * call ring_buffer_peek() that may make the contents of iter->ent
4053 * undefined. Need to copy iter->ent now.
4055 if (iter->ent && iter->ent != iter->temp) {
4056 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
4057 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
4059 temp = kmalloc(iter->ent_size, GFP_KERNEL);
4064 iter->temp_size = iter->ent_size;
4066 memcpy(iter->temp, iter->ent, iter->ent_size);
4067 iter->ent = iter->temp;
4069 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
4070 /* Put back the original ent_size */
4071 iter->ent_size = ent_size;
4076 /* Find the next real entry, and increment the iterator to the next entry */
4077 void *trace_find_next_entry_inc(struct trace_iterator *iter)
4079 iter->ent = __find_next_entry(iter, &iter->cpu,
4080 &iter->lost_events, &iter->ts);
4083 trace_iterator_increment(iter);
4085 return iter->ent ? iter : NULL;
4088 static void trace_consume(struct trace_iterator *iter)
4090 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
4091 &iter->lost_events);
4094 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
4096 struct trace_iterator *iter = m->private;
4100 WARN_ON_ONCE(iter->leftover);
4104 /* can't go backwards */
4109 ent = trace_find_next_entry_inc(iter);
4113 while (ent && iter->idx < i)
4114 ent = trace_find_next_entry_inc(iter);
4121 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4123 struct ring_buffer_iter *buf_iter;
4124 unsigned long entries = 0;
4127 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4129 buf_iter = trace_buffer_iter(iter, cpu);
4133 ring_buffer_iter_reset(buf_iter);
4136 * We could have the case with the max latency tracers
4137 * that a reset never took place on a cpu. This is evident
4138 * by the timestamp being before the start of the buffer.
4140 while (ring_buffer_iter_peek(buf_iter, &ts)) {
4141 if (ts >= iter->array_buffer->time_start)
4144 ring_buffer_iter_advance(buf_iter);
4147 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4151 * The current tracer is copied to avoid a global locking
4154 static void *s_start(struct seq_file *m, loff_t *pos)
4156 struct trace_iterator *iter = m->private;
4157 struct trace_array *tr = iter->tr;
4158 int cpu_file = iter->cpu_file;
4164 * copy the tracer to avoid using a global lock all around.
4165 * iter->trace is a copy of current_trace, the pointer to the
4166 * name may be used instead of a strcmp(), as iter->trace->name
4167 * will point to the same string as current_trace->name.
4169 mutex_lock(&trace_types_lock);
4170 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
4171 *iter->trace = *tr->current_trace;
4172 mutex_unlock(&trace_types_lock);
4174 #ifdef CONFIG_TRACER_MAX_TRACE
4175 if (iter->snapshot && iter->trace->use_max_tr)
4176 return ERR_PTR(-EBUSY);
4179 if (*pos != iter->pos) {
4184 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4185 for_each_tracing_cpu(cpu)
4186 tracing_iter_reset(iter, cpu);
4188 tracing_iter_reset(iter, cpu_file);
4191 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4196 * If we overflowed the seq_file before, then we want
4197 * to just reuse the trace_seq buffer again.
4203 p = s_next(m, p, &l);
4207 trace_event_read_lock();
4208 trace_access_lock(cpu_file);
4212 static void s_stop(struct seq_file *m, void *p)
4214 struct trace_iterator *iter = m->private;
4216 #ifdef CONFIG_TRACER_MAX_TRACE
4217 if (iter->snapshot && iter->trace->use_max_tr)
4221 trace_access_unlock(iter->cpu_file);
4222 trace_event_read_unlock();
4226 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4227 unsigned long *entries, int cpu)
4229 unsigned long count;
4231 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4233 * If this buffer has skipped entries, then we hold all
4234 * entries for the trace and we need to ignore the
4235 * ones before the time stamp.
4237 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4238 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4239 /* total is the same as the entries */
4243 ring_buffer_overrun_cpu(buf->buffer, cpu);
4248 get_total_entries(struct array_buffer *buf,
4249 unsigned long *total, unsigned long *entries)
4257 for_each_tracing_cpu(cpu) {
4258 get_total_entries_cpu(buf, &t, &e, cpu);
4264 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4266 unsigned long total, entries;
4271 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4276 unsigned long trace_total_entries(struct trace_array *tr)
4278 unsigned long total, entries;
4283 get_total_entries(&tr->array_buffer, &total, &entries);
4288 static void print_lat_help_header(struct seq_file *m)
4290 seq_puts(m, "# _------=> CPU# \n"
4291 "# / _-----=> irqs-off/BH-disabled\n"
4292 "# | / _----=> need-resched \n"
4293 "# || / _---=> hardirq/softirq \n"
4294 "# ||| / _--=> preempt-depth \n"
4295 "# |||| / _-=> migrate-disable \n"
4296 "# ||||| / delay \n"
4297 "# cmd pid |||||| time | caller \n"
4298 "# \\ / |||||| \\ | / \n");
4301 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4303 unsigned long total;
4304 unsigned long entries;
4306 get_total_entries(buf, &total, &entries);
4307 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4308 entries, total, num_online_cpus());
4312 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4315 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4317 print_event_info(buf, m);
4319 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4320 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4323 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4326 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4327 static const char space[] = " ";
4328 int prec = tgid ? 12 : 2;
4330 print_event_info(buf, m);
4332 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
4333 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4334 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4335 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4336 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4337 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4338 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4339 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
4343 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4345 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4346 struct array_buffer *buf = iter->array_buffer;
4347 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4348 struct tracer *type = iter->trace;
4349 unsigned long entries;
4350 unsigned long total;
4351 const char *name = type->name;
4353 get_total_entries(buf, &total, &entries);
4355 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4357 seq_puts(m, "# -----------------------------------"
4358 "---------------------------------\n");
4359 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4360 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4361 nsecs_to_usecs(data->saved_latency),
4365 preempt_model_none() ? "server" :
4366 preempt_model_voluntary() ? "desktop" :
4367 preempt_model_full() ? "preempt" :
4368 preempt_model_rt() ? "preempt_rt" :
4370 /* These are reserved for later use */
4373 seq_printf(m, " #P:%d)\n", num_online_cpus());
4377 seq_puts(m, "# -----------------\n");
4378 seq_printf(m, "# | task: %.16s-%d "
4379 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4380 data->comm, data->pid,
4381 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4382 data->policy, data->rt_priority);
4383 seq_puts(m, "# -----------------\n");
4385 if (data->critical_start) {
4386 seq_puts(m, "# => started at: ");
4387 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4388 trace_print_seq(m, &iter->seq);
4389 seq_puts(m, "\n# => ended at: ");
4390 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4391 trace_print_seq(m, &iter->seq);
4392 seq_puts(m, "\n#\n");
4398 static void test_cpu_buff_start(struct trace_iterator *iter)
4400 struct trace_seq *s = &iter->seq;
4401 struct trace_array *tr = iter->tr;
4403 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4406 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4409 if (cpumask_available(iter->started) &&
4410 cpumask_test_cpu(iter->cpu, iter->started))
4413 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4416 if (cpumask_available(iter->started))
4417 cpumask_set_cpu(iter->cpu, iter->started);
4419 /* Don't print started cpu buffer for the first entry of the trace */
4421 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4425 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4427 struct trace_array *tr = iter->tr;
4428 struct trace_seq *s = &iter->seq;
4429 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4430 struct trace_entry *entry;
4431 struct trace_event *event;
4435 test_cpu_buff_start(iter);
4437 event = ftrace_find_event(entry->type);
4439 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4440 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4441 trace_print_lat_context(iter);
4443 trace_print_context(iter);
4446 if (trace_seq_has_overflowed(s))
4447 return TRACE_TYPE_PARTIAL_LINE;
4450 if (tr->trace_flags & TRACE_ITER_FIELDS)
4451 return print_event_fields(iter, event);
4452 return event->funcs->trace(iter, sym_flags, event);
4455 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4457 return trace_handle_return(s);
4460 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4462 struct trace_array *tr = iter->tr;
4463 struct trace_seq *s = &iter->seq;
4464 struct trace_entry *entry;
4465 struct trace_event *event;
4469 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4470 trace_seq_printf(s, "%d %d %llu ",
4471 entry->pid, iter->cpu, iter->ts);
4473 if (trace_seq_has_overflowed(s))
4474 return TRACE_TYPE_PARTIAL_LINE;
4476 event = ftrace_find_event(entry->type);
4478 return event->funcs->raw(iter, 0, event);
4480 trace_seq_printf(s, "%d ?\n", entry->type);
4482 return trace_handle_return(s);
4485 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4487 struct trace_array *tr = iter->tr;
4488 struct trace_seq *s = &iter->seq;
4489 unsigned char newline = '\n';
4490 struct trace_entry *entry;
4491 struct trace_event *event;
4495 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4496 SEQ_PUT_HEX_FIELD(s, entry->pid);
4497 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4498 SEQ_PUT_HEX_FIELD(s, iter->ts);
4499 if (trace_seq_has_overflowed(s))
4500 return TRACE_TYPE_PARTIAL_LINE;
4503 event = ftrace_find_event(entry->type);
4505 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4506 if (ret != TRACE_TYPE_HANDLED)
4510 SEQ_PUT_FIELD(s, newline);
4512 return trace_handle_return(s);
4515 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4517 struct trace_array *tr = iter->tr;
4518 struct trace_seq *s = &iter->seq;
4519 struct trace_entry *entry;
4520 struct trace_event *event;
4524 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4525 SEQ_PUT_FIELD(s, entry->pid);
4526 SEQ_PUT_FIELD(s, iter->cpu);
4527 SEQ_PUT_FIELD(s, iter->ts);
4528 if (trace_seq_has_overflowed(s))
4529 return TRACE_TYPE_PARTIAL_LINE;
4532 event = ftrace_find_event(entry->type);
4533 return event ? event->funcs->binary(iter, 0, event) :
4537 int trace_empty(struct trace_iterator *iter)
4539 struct ring_buffer_iter *buf_iter;
4542 /* If we are looking at one CPU buffer, only check that one */
4543 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4544 cpu = iter->cpu_file;
4545 buf_iter = trace_buffer_iter(iter, cpu);
4547 if (!ring_buffer_iter_empty(buf_iter))
4550 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4556 for_each_tracing_cpu(cpu) {
4557 buf_iter = trace_buffer_iter(iter, cpu);
4559 if (!ring_buffer_iter_empty(buf_iter))
4562 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4570 /* Called with trace_event_read_lock() held. */
4571 enum print_line_t print_trace_line(struct trace_iterator *iter)
4573 struct trace_array *tr = iter->tr;
4574 unsigned long trace_flags = tr->trace_flags;
4575 enum print_line_t ret;
4577 if (iter->lost_events) {
4578 if (iter->lost_events == (unsigned long)-1)
4579 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4582 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4583 iter->cpu, iter->lost_events);
4584 if (trace_seq_has_overflowed(&iter->seq))
4585 return TRACE_TYPE_PARTIAL_LINE;
4588 if (iter->trace && iter->trace->print_line) {
4589 ret = iter->trace->print_line(iter);
4590 if (ret != TRACE_TYPE_UNHANDLED)
4594 if (iter->ent->type == TRACE_BPUTS &&
4595 trace_flags & TRACE_ITER_PRINTK &&
4596 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4597 return trace_print_bputs_msg_only(iter);
4599 if (iter->ent->type == TRACE_BPRINT &&
4600 trace_flags & TRACE_ITER_PRINTK &&
4601 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4602 return trace_print_bprintk_msg_only(iter);
4604 if (iter->ent->type == TRACE_PRINT &&
4605 trace_flags & TRACE_ITER_PRINTK &&
4606 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4607 return trace_print_printk_msg_only(iter);
4609 if (trace_flags & TRACE_ITER_BIN)
4610 return print_bin_fmt(iter);
4612 if (trace_flags & TRACE_ITER_HEX)
4613 return print_hex_fmt(iter);
4615 if (trace_flags & TRACE_ITER_RAW)
4616 return print_raw_fmt(iter);
4618 return print_trace_fmt(iter);
4621 void trace_latency_header(struct seq_file *m)
4623 struct trace_iterator *iter = m->private;
4624 struct trace_array *tr = iter->tr;
4626 /* print nothing if the buffers are empty */
4627 if (trace_empty(iter))
4630 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4631 print_trace_header(m, iter);
4633 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4634 print_lat_help_header(m);
4637 void trace_default_header(struct seq_file *m)
4639 struct trace_iterator *iter = m->private;
4640 struct trace_array *tr = iter->tr;
4641 unsigned long trace_flags = tr->trace_flags;
4643 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4646 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4647 /* print nothing if the buffers are empty */
4648 if (trace_empty(iter))
4650 print_trace_header(m, iter);
4651 if (!(trace_flags & TRACE_ITER_VERBOSE))
4652 print_lat_help_header(m);
4654 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4655 if (trace_flags & TRACE_ITER_IRQ_INFO)
4656 print_func_help_header_irq(iter->array_buffer,
4659 print_func_help_header(iter->array_buffer, m,
4665 static void test_ftrace_alive(struct seq_file *m)
4667 if (!ftrace_is_dead())
4669 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4670 "# MAY BE MISSING FUNCTION EVENTS\n");
4673 #ifdef CONFIG_TRACER_MAX_TRACE
4674 static void show_snapshot_main_help(struct seq_file *m)
4676 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4677 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4678 "# Takes a snapshot of the main buffer.\n"
4679 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4680 "# (Doesn't have to be '2' works with any number that\n"
4681 "# is not a '0' or '1')\n");
4684 static void show_snapshot_percpu_help(struct seq_file *m)
4686 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4687 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4688 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4689 "# Takes a snapshot of the main buffer for this cpu.\n");
4691 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4692 "# Must use main snapshot file to allocate.\n");
4694 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4695 "# (Doesn't have to be '2' works with any number that\n"
4696 "# is not a '0' or '1')\n");
4699 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4701 if (iter->tr->allocated_snapshot)
4702 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4704 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4706 seq_puts(m, "# Snapshot commands:\n");
4707 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4708 show_snapshot_main_help(m);
4710 show_snapshot_percpu_help(m);
4713 /* Should never be called */
4714 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4717 static int s_show(struct seq_file *m, void *v)
4719 struct trace_iterator *iter = v;
4722 if (iter->ent == NULL) {
4724 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4726 test_ftrace_alive(m);
4728 if (iter->snapshot && trace_empty(iter))
4729 print_snapshot_help(m, iter);
4730 else if (iter->trace && iter->trace->print_header)
4731 iter->trace->print_header(m);
4733 trace_default_header(m);
4735 } else if (iter->leftover) {
4737 * If we filled the seq_file buffer earlier, we
4738 * want to just show it now.
4740 ret = trace_print_seq(m, &iter->seq);
4742 /* ret should this time be zero, but you never know */
4743 iter->leftover = ret;
4746 print_trace_line(iter);
4747 ret = trace_print_seq(m, &iter->seq);
4749 * If we overflow the seq_file buffer, then it will
4750 * ask us for this data again at start up.
4752 * ret is 0 if seq_file write succeeded.
4755 iter->leftover = ret;
4762 * Should be used after trace_array_get(), trace_types_lock
4763 * ensures that i_cdev was already initialized.
4765 static inline int tracing_get_cpu(struct inode *inode)
4767 if (inode->i_cdev) /* See trace_create_cpu_file() */
4768 return (long)inode->i_cdev - 1;
4769 return RING_BUFFER_ALL_CPUS;
4772 static const struct seq_operations tracer_seq_ops = {
4779 static struct trace_iterator *
4780 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4782 struct trace_array *tr = inode->i_private;
4783 struct trace_iterator *iter;
4786 if (tracing_disabled)
4787 return ERR_PTR(-ENODEV);
4789 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4791 return ERR_PTR(-ENOMEM);
4793 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4795 if (!iter->buffer_iter)
4799 * trace_find_next_entry() may need to save off iter->ent.
4800 * It will place it into the iter->temp buffer. As most
4801 * events are less than 128, allocate a buffer of that size.
4802 * If one is greater, then trace_find_next_entry() will
4803 * allocate a new buffer to adjust for the bigger iter->ent.
4804 * It's not critical if it fails to get allocated here.
4806 iter->temp = kmalloc(128, GFP_KERNEL);
4808 iter->temp_size = 128;
4811 * trace_event_printf() may need to modify given format
4812 * string to replace %p with %px so that it shows real address
4813 * instead of hash value. However, that is only for the event
4814 * tracing, other tracer may not need. Defer the allocation
4815 * until it is needed.
4821 * We make a copy of the current tracer to avoid concurrent
4822 * changes on it while we are reading.
4824 mutex_lock(&trace_types_lock);
4825 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4829 *iter->trace = *tr->current_trace;
4831 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4836 #ifdef CONFIG_TRACER_MAX_TRACE
4837 /* Currently only the top directory has a snapshot */
4838 if (tr->current_trace->print_max || snapshot)
4839 iter->array_buffer = &tr->max_buffer;
4842 iter->array_buffer = &tr->array_buffer;
4843 iter->snapshot = snapshot;
4845 iter->cpu_file = tracing_get_cpu(inode);
4846 mutex_init(&iter->mutex);
4848 /* Notify the tracer early; before we stop tracing. */
4849 if (iter->trace->open)
4850 iter->trace->open(iter);
4852 /* Annotate start of buffers if we had overruns */
4853 if (ring_buffer_overruns(iter->array_buffer->buffer))
4854 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4856 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4857 if (trace_clocks[tr->clock_id].in_ns)
4858 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4861 * If pause-on-trace is enabled, then stop the trace while
4862 * dumping, unless this is the "snapshot" file
4864 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4865 tracing_stop_tr(tr);
4867 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4868 for_each_tracing_cpu(cpu) {
4869 iter->buffer_iter[cpu] =
4870 ring_buffer_read_prepare(iter->array_buffer->buffer,
4873 ring_buffer_read_prepare_sync();
4874 for_each_tracing_cpu(cpu) {
4875 ring_buffer_read_start(iter->buffer_iter[cpu]);
4876 tracing_iter_reset(iter, cpu);
4879 cpu = iter->cpu_file;
4880 iter->buffer_iter[cpu] =
4881 ring_buffer_read_prepare(iter->array_buffer->buffer,
4883 ring_buffer_read_prepare_sync();
4884 ring_buffer_read_start(iter->buffer_iter[cpu]);
4885 tracing_iter_reset(iter, cpu);
4888 mutex_unlock(&trace_types_lock);
4893 mutex_unlock(&trace_types_lock);
4896 kfree(iter->buffer_iter);
4898 seq_release_private(inode, file);
4899 return ERR_PTR(-ENOMEM);
4902 int tracing_open_generic(struct inode *inode, struct file *filp)
4906 ret = tracing_check_open_get_tr(NULL);
4910 filp->private_data = inode->i_private;
4914 bool tracing_is_disabled(void)
4916 return (tracing_disabled) ? true: false;
4920 * Open and update trace_array ref count.
4921 * Must have the current trace_array passed to it.
4923 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4925 struct trace_array *tr = inode->i_private;
4928 ret = tracing_check_open_get_tr(tr);
4932 filp->private_data = inode->i_private;
4937 static int tracing_mark_open(struct inode *inode, struct file *filp)
4939 stream_open(inode, filp);
4940 return tracing_open_generic_tr(inode, filp);
4943 static int tracing_release(struct inode *inode, struct file *file)
4945 struct trace_array *tr = inode->i_private;
4946 struct seq_file *m = file->private_data;
4947 struct trace_iterator *iter;
4950 if (!(file->f_mode & FMODE_READ)) {
4951 trace_array_put(tr);
4955 /* Writes do not use seq_file */
4957 mutex_lock(&trace_types_lock);
4959 for_each_tracing_cpu(cpu) {
4960 if (iter->buffer_iter[cpu])
4961 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4964 if (iter->trace && iter->trace->close)
4965 iter->trace->close(iter);
4967 if (!iter->snapshot && tr->stop_count)
4968 /* reenable tracing if it was previously enabled */
4969 tracing_start_tr(tr);
4971 __trace_array_put(tr);
4973 mutex_unlock(&trace_types_lock);
4975 mutex_destroy(&iter->mutex);
4976 free_cpumask_var(iter->started);
4980 kfree(iter->buffer_iter);
4981 seq_release_private(inode, file);
4986 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4988 struct trace_array *tr = inode->i_private;
4990 trace_array_put(tr);
4994 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4996 struct trace_array *tr = inode->i_private;
4998 trace_array_put(tr);
5000 return single_release(inode, file);
5003 static int tracing_open(struct inode *inode, struct file *file)
5005 struct trace_array *tr = inode->i_private;
5006 struct trace_iterator *iter;
5009 ret = tracing_check_open_get_tr(tr);
5013 /* If this file was open for write, then erase contents */
5014 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
5015 int cpu = tracing_get_cpu(inode);
5016 struct array_buffer *trace_buf = &tr->array_buffer;
5018 #ifdef CONFIG_TRACER_MAX_TRACE
5019 if (tr->current_trace->print_max)
5020 trace_buf = &tr->max_buffer;
5023 if (cpu == RING_BUFFER_ALL_CPUS)
5024 tracing_reset_online_cpus(trace_buf);
5026 tracing_reset_cpu(trace_buf, cpu);
5029 if (file->f_mode & FMODE_READ) {
5030 iter = __tracing_open(inode, file, false);
5032 ret = PTR_ERR(iter);
5033 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5034 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5038 trace_array_put(tr);
5044 * Some tracers are not suitable for instance buffers.
5045 * A tracer is always available for the global array (toplevel)
5046 * or if it explicitly states that it is.
5049 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
5051 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
5054 /* Find the next tracer that this trace array may use */
5055 static struct tracer *
5056 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
5058 while (t && !trace_ok_for_array(t, tr))
5065 t_next(struct seq_file *m, void *v, loff_t *pos)
5067 struct trace_array *tr = m->private;
5068 struct tracer *t = v;
5073 t = get_tracer_for_array(tr, t->next);
5078 static void *t_start(struct seq_file *m, loff_t *pos)
5080 struct trace_array *tr = m->private;
5084 mutex_lock(&trace_types_lock);
5086 t = get_tracer_for_array(tr, trace_types);
5087 for (; t && l < *pos; t = t_next(m, t, &l))
5093 static void t_stop(struct seq_file *m, void *p)
5095 mutex_unlock(&trace_types_lock);
5098 static int t_show(struct seq_file *m, void *v)
5100 struct tracer *t = v;
5105 seq_puts(m, t->name);
5114 static const struct seq_operations show_traces_seq_ops = {
5121 static int show_traces_open(struct inode *inode, struct file *file)
5123 struct trace_array *tr = inode->i_private;
5127 ret = tracing_check_open_get_tr(tr);
5131 ret = seq_open(file, &show_traces_seq_ops);
5133 trace_array_put(tr);
5137 m = file->private_data;
5143 static int show_traces_release(struct inode *inode, struct file *file)
5145 struct trace_array *tr = inode->i_private;
5147 trace_array_put(tr);
5148 return seq_release(inode, file);
5152 tracing_write_stub(struct file *filp, const char __user *ubuf,
5153 size_t count, loff_t *ppos)
5158 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5162 if (file->f_mode & FMODE_READ)
5163 ret = seq_lseek(file, offset, whence);
5165 file->f_pos = ret = 0;
5170 static const struct file_operations tracing_fops = {
5171 .open = tracing_open,
5173 .read_iter = seq_read_iter,
5174 .splice_read = generic_file_splice_read,
5175 .write = tracing_write_stub,
5176 .llseek = tracing_lseek,
5177 .release = tracing_release,
5180 static const struct file_operations show_traces_fops = {
5181 .open = show_traces_open,
5183 .llseek = seq_lseek,
5184 .release = show_traces_release,
5188 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5189 size_t count, loff_t *ppos)
5191 struct trace_array *tr = file_inode(filp)->i_private;
5195 len = snprintf(NULL, 0, "%*pb\n",
5196 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5197 mask_str = kmalloc(len, GFP_KERNEL);
5201 len = snprintf(mask_str, len, "%*pb\n",
5202 cpumask_pr_args(tr->tracing_cpumask));
5207 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5215 int tracing_set_cpumask(struct trace_array *tr,
5216 cpumask_var_t tracing_cpumask_new)
5223 local_irq_disable();
5224 arch_spin_lock(&tr->max_lock);
5225 for_each_tracing_cpu(cpu) {
5227 * Increase/decrease the disabled counter if we are
5228 * about to flip a bit in the cpumask:
5230 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5231 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5232 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5233 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5235 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5236 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5237 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5238 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5241 arch_spin_unlock(&tr->max_lock);
5244 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5250 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5251 size_t count, loff_t *ppos)
5253 struct trace_array *tr = file_inode(filp)->i_private;
5254 cpumask_var_t tracing_cpumask_new;
5257 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5260 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5264 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5268 free_cpumask_var(tracing_cpumask_new);
5273 free_cpumask_var(tracing_cpumask_new);
5278 static const struct file_operations tracing_cpumask_fops = {
5279 .open = tracing_open_generic_tr,
5280 .read = tracing_cpumask_read,
5281 .write = tracing_cpumask_write,
5282 .release = tracing_release_generic_tr,
5283 .llseek = generic_file_llseek,
5286 static int tracing_trace_options_show(struct seq_file *m, void *v)
5288 struct tracer_opt *trace_opts;
5289 struct trace_array *tr = m->private;
5293 mutex_lock(&trace_types_lock);
5294 tracer_flags = tr->current_trace->flags->val;
5295 trace_opts = tr->current_trace->flags->opts;
5297 for (i = 0; trace_options[i]; i++) {
5298 if (tr->trace_flags & (1 << i))
5299 seq_printf(m, "%s\n", trace_options[i]);
5301 seq_printf(m, "no%s\n", trace_options[i]);
5304 for (i = 0; trace_opts[i].name; i++) {
5305 if (tracer_flags & trace_opts[i].bit)
5306 seq_printf(m, "%s\n", trace_opts[i].name);
5308 seq_printf(m, "no%s\n", trace_opts[i].name);
5310 mutex_unlock(&trace_types_lock);
5315 static int __set_tracer_option(struct trace_array *tr,
5316 struct tracer_flags *tracer_flags,
5317 struct tracer_opt *opts, int neg)
5319 struct tracer *trace = tracer_flags->trace;
5322 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5327 tracer_flags->val &= ~opts->bit;
5329 tracer_flags->val |= opts->bit;
5333 /* Try to assign a tracer specific option */
5334 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5336 struct tracer *trace = tr->current_trace;
5337 struct tracer_flags *tracer_flags = trace->flags;
5338 struct tracer_opt *opts = NULL;
5341 for (i = 0; tracer_flags->opts[i].name; i++) {
5342 opts = &tracer_flags->opts[i];
5344 if (strcmp(cmp, opts->name) == 0)
5345 return __set_tracer_option(tr, trace->flags, opts, neg);
5351 /* Some tracers require overwrite to stay enabled */
5352 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5354 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5360 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5364 if ((mask == TRACE_ITER_RECORD_TGID) ||
5365 (mask == TRACE_ITER_RECORD_CMD))
5366 lockdep_assert_held(&event_mutex);
5368 /* do nothing if flag is already set */
5369 if (!!(tr->trace_flags & mask) == !!enabled)
5372 /* Give the tracer a chance to approve the change */
5373 if (tr->current_trace->flag_changed)
5374 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5378 tr->trace_flags |= mask;
5380 tr->trace_flags &= ~mask;
5382 if (mask == TRACE_ITER_RECORD_CMD)
5383 trace_event_enable_cmd_record(enabled);
5385 if (mask == TRACE_ITER_RECORD_TGID) {
5387 tgid_map_max = pid_max;
5388 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5392 * Pairs with smp_load_acquire() in
5393 * trace_find_tgid_ptr() to ensure that if it observes
5394 * the tgid_map we just allocated then it also observes
5395 * the corresponding tgid_map_max value.
5397 smp_store_release(&tgid_map, map);
5400 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5404 trace_event_enable_tgid_record(enabled);
5407 if (mask == TRACE_ITER_EVENT_FORK)
5408 trace_event_follow_fork(tr, enabled);
5410 if (mask == TRACE_ITER_FUNC_FORK)
5411 ftrace_pid_follow_fork(tr, enabled);
5413 if (mask == TRACE_ITER_OVERWRITE) {
5414 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5415 #ifdef CONFIG_TRACER_MAX_TRACE
5416 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5420 if (mask == TRACE_ITER_PRINTK) {
5421 trace_printk_start_stop_comm(enabled);
5422 trace_printk_control(enabled);
5428 int trace_set_options(struct trace_array *tr, char *option)
5433 size_t orig_len = strlen(option);
5436 cmp = strstrip(option);
5438 len = str_has_prefix(cmp, "no");
5444 mutex_lock(&event_mutex);
5445 mutex_lock(&trace_types_lock);
5447 ret = match_string(trace_options, -1, cmp);
5448 /* If no option could be set, test the specific tracer options */
5450 ret = set_tracer_option(tr, cmp, neg);
5452 ret = set_tracer_flag(tr, 1 << ret, !neg);
5454 mutex_unlock(&trace_types_lock);
5455 mutex_unlock(&event_mutex);
5458 * If the first trailing whitespace is replaced with '\0' by strstrip,
5459 * turn it back into a space.
5461 if (orig_len > strlen(option))
5462 option[strlen(option)] = ' ';
5467 static void __init apply_trace_boot_options(void)
5469 char *buf = trace_boot_options_buf;
5473 option = strsep(&buf, ",");
5479 trace_set_options(&global_trace, option);
5481 /* Put back the comma to allow this to be called again */
5488 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5489 size_t cnt, loff_t *ppos)
5491 struct seq_file *m = filp->private_data;
5492 struct trace_array *tr = m->private;
5496 if (cnt >= sizeof(buf))
5499 if (copy_from_user(buf, ubuf, cnt))
5504 ret = trace_set_options(tr, buf);
5513 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5515 struct trace_array *tr = inode->i_private;
5518 ret = tracing_check_open_get_tr(tr);
5522 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5524 trace_array_put(tr);
5529 static const struct file_operations tracing_iter_fops = {
5530 .open = tracing_trace_options_open,
5532 .llseek = seq_lseek,
5533 .release = tracing_single_release_tr,
5534 .write = tracing_trace_options_write,
5537 static const char readme_msg[] =
5538 "tracing mini-HOWTO:\n\n"
5539 "# echo 0 > tracing_on : quick way to disable tracing\n"
5540 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5541 " Important files:\n"
5542 " trace\t\t\t- The static contents of the buffer\n"
5543 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5544 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5545 " current_tracer\t- function and latency tracers\n"
5546 " available_tracers\t- list of configured tracers for current_tracer\n"
5547 " error_log\t- error log for failed commands (that support it)\n"
5548 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5549 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5550 " trace_clock\t\t- change the clock used to order events\n"
5551 " local: Per cpu clock but may not be synced across CPUs\n"
5552 " global: Synced across CPUs but slows tracing down.\n"
5553 " counter: Not a clock, but just an increment\n"
5554 " uptime: Jiffy counter from time of boot\n"
5555 " perf: Same clock that perf events use\n"
5556 #ifdef CONFIG_X86_64
5557 " x86-tsc: TSC cycle counter\n"
5559 "\n timestamp_mode\t- view the mode used to timestamp events\n"
5560 " delta: Delta difference against a buffer-wide timestamp\n"
5561 " absolute: Absolute (standalone) timestamp\n"
5562 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5563 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5564 " tracing_cpumask\t- Limit which CPUs to trace\n"
5565 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5566 "\t\t\t Remove sub-buffer with rmdir\n"
5567 " trace_options\t\t- Set format or modify how tracing happens\n"
5568 "\t\t\t Disable an option by prefixing 'no' to the\n"
5569 "\t\t\t option name\n"
5570 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5571 #ifdef CONFIG_DYNAMIC_FTRACE
5572 "\n available_filter_functions - list of functions that can be filtered on\n"
5573 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5574 "\t\t\t functions\n"
5575 "\t accepts: func_full_name or glob-matching-pattern\n"
5576 "\t modules: Can select a group via module\n"
5577 "\t Format: :mod:<module-name>\n"
5578 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5579 "\t triggers: a command to perform when function is hit\n"
5580 "\t Format: <function>:<trigger>[:count]\n"
5581 "\t trigger: traceon, traceoff\n"
5582 "\t\t enable_event:<system>:<event>\n"
5583 "\t\t disable_event:<system>:<event>\n"
5584 #ifdef CONFIG_STACKTRACE
5587 #ifdef CONFIG_TRACER_SNAPSHOT
5592 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5593 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5594 "\t The first one will disable tracing every time do_fault is hit\n"
5595 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5596 "\t The first time do trap is hit and it disables tracing, the\n"
5597 "\t counter will decrement to 2. If tracing is already disabled,\n"
5598 "\t the counter will not decrement. It only decrements when the\n"
5599 "\t trigger did work\n"
5600 "\t To remove trigger without count:\n"
5601 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5602 "\t To remove trigger with a count:\n"
5603 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5604 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5605 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5606 "\t modules: Can select a group via module command :mod:\n"
5607 "\t Does not accept triggers\n"
5608 #endif /* CONFIG_DYNAMIC_FTRACE */
5609 #ifdef CONFIG_FUNCTION_TRACER
5610 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5612 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5615 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5616 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5617 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5618 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5620 #ifdef CONFIG_TRACER_SNAPSHOT
5621 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5622 "\t\t\t snapshot buffer. Read the contents for more\n"
5623 "\t\t\t information\n"
5625 #ifdef CONFIG_STACK_TRACER
5626 " stack_trace\t\t- Shows the max stack trace when active\n"
5627 " stack_max_size\t- Shows current max stack size that was traced\n"
5628 "\t\t\t Write into this file to reset the max size (trigger a\n"
5629 "\t\t\t new trace)\n"
5630 #ifdef CONFIG_DYNAMIC_FTRACE
5631 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5634 #endif /* CONFIG_STACK_TRACER */
5635 #ifdef CONFIG_DYNAMIC_EVENTS
5636 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5637 "\t\t\t Write into this file to define/undefine new trace events.\n"
5639 #ifdef CONFIG_KPROBE_EVENTS
5640 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5641 "\t\t\t Write into this file to define/undefine new trace events.\n"
5643 #ifdef CONFIG_UPROBE_EVENTS
5644 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5645 "\t\t\t Write into this file to define/undefine new trace events.\n"
5647 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5648 "\t accepts: event-definitions (one definition per line)\n"
5649 "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n"
5650 "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
5651 #ifdef CONFIG_HIST_TRIGGERS
5652 "\t s:[synthetic/]<event> <field> [<field>]\n"
5654 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
5655 "\t -:[<group>/][<event>]\n"
5656 #ifdef CONFIG_KPROBE_EVENTS
5657 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5658 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5660 #ifdef CONFIG_UPROBE_EVENTS
5661 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5663 "\t args: <name>=fetcharg[:type]\n"
5664 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5665 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5666 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5668 "\t $stack<index>, $stack, $retval, $comm,\n"
5670 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5671 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n"
5672 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5673 "\t symstr, <type>\\[<array-size>\\]\n"
5674 #ifdef CONFIG_HIST_TRIGGERS
5675 "\t field: <stype> <name>;\n"
5676 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5677 "\t [unsigned] char/int/long\n"
5679 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5680 "\t of the <attached-group>/<attached-event>.\n"
5682 " events/\t\t- Directory containing all trace event subsystems:\n"
5683 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5684 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5685 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5687 " filter\t\t- If set, only events passing filter are traced\n"
5688 " events/<system>/<event>/\t- Directory containing control files for\n"
5690 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5691 " filter\t\t- If set, only events passing filter are traced\n"
5692 " trigger\t\t- If set, a command to perform when event is hit\n"
5693 "\t Format: <trigger>[:count][if <filter>]\n"
5694 "\t trigger: traceon, traceoff\n"
5695 "\t enable_event:<system>:<event>\n"
5696 "\t disable_event:<system>:<event>\n"
5697 #ifdef CONFIG_HIST_TRIGGERS
5698 "\t enable_hist:<system>:<event>\n"
5699 "\t disable_hist:<system>:<event>\n"
5701 #ifdef CONFIG_STACKTRACE
5704 #ifdef CONFIG_TRACER_SNAPSHOT
5707 #ifdef CONFIG_HIST_TRIGGERS
5708 "\t\t hist (see below)\n"
5710 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5711 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5712 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5713 "\t events/block/block_unplug/trigger\n"
5714 "\t The first disables tracing every time block_unplug is hit.\n"
5715 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5716 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5717 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5718 "\t Like function triggers, the counter is only decremented if it\n"
5719 "\t enabled or disabled tracing.\n"
5720 "\t To remove a trigger without a count:\n"
5721 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5722 "\t To remove a trigger with a count:\n"
5723 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5724 "\t Filters can be ignored when removing a trigger.\n"
5725 #ifdef CONFIG_HIST_TRIGGERS
5726 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5727 "\t Format: hist:keys=<field1[,field2,...]>\n"
5728 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5729 "\t [:values=<field1[,field2,...]>]\n"
5730 "\t [:sort=<field1[,field2,...]>]\n"
5731 "\t [:size=#entries]\n"
5732 "\t [:pause][:continue][:clear]\n"
5733 "\t [:name=histname1]\n"
5734 "\t [:nohitcount]\n"
5735 "\t [:<handler>.<action>]\n"
5736 "\t [if <filter>]\n\n"
5737 "\t Note, special fields can be used as well:\n"
5738 "\t common_timestamp - to record current timestamp\n"
5739 "\t common_cpu - to record the CPU the event happened on\n"
5741 "\t A hist trigger variable can be:\n"
5742 "\t - a reference to a field e.g. x=current_timestamp,\n"
5743 "\t - a reference to another variable e.g. y=$x,\n"
5744 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5745 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5747 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5748 "\t multiplication(*) and division(/) operators. An operand can be either a\n"
5749 "\t variable reference, field or numeric literal.\n"
5751 "\t When a matching event is hit, an entry is added to a hash\n"
5752 "\t table using the key(s) and value(s) named, and the value of a\n"
5753 "\t sum called 'hitcount' is incremented. Keys and values\n"
5754 "\t correspond to fields in the event's format description. Keys\n"
5755 "\t can be any field, or the special string 'stacktrace'.\n"
5756 "\t Compound keys consisting of up to two fields can be specified\n"
5757 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5758 "\t fields. Sort keys consisting of up to two fields can be\n"
5759 "\t specified using the 'sort' keyword. The sort direction can\n"
5760 "\t be modified by appending '.descending' or '.ascending' to a\n"
5761 "\t sort field. The 'size' parameter can be used to specify more\n"
5762 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5763 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5764 "\t its histogram data will be shared with other triggers of the\n"
5765 "\t same name, and trigger hits will update this common data.\n\n"
5766 "\t Reading the 'hist' file for the event will dump the hash\n"
5767 "\t table in its entirety to stdout. If there are multiple hist\n"
5768 "\t triggers attached to an event, there will be a table for each\n"
5769 "\t trigger in the output. The table displayed for a named\n"
5770 "\t trigger will be the same as any other instance having the\n"
5771 "\t same name. The default format used to display a given field\n"
5772 "\t can be modified by appending any of the following modifiers\n"
5773 "\t to the field name, as applicable:\n\n"
5774 "\t .hex display a number as a hex value\n"
5775 "\t .sym display an address as a symbol\n"
5776 "\t .sym-offset display an address as a symbol and offset\n"
5777 "\t .execname display a common_pid as a program name\n"
5778 "\t .syscall display a syscall id as a syscall name\n"
5779 "\t .log2 display log2 value rather than raw number\n"
5780 "\t .buckets=size display values in groups of size rather than raw number\n"
5781 "\t .usecs display a common_timestamp in microseconds\n"
5782 "\t .percent display a number of percentage value\n"
5783 "\t .graph display a bar-graph of a value\n\n"
5784 "\t The 'pause' parameter can be used to pause an existing hist\n"
5785 "\t trigger or to start a hist trigger but not log any events\n"
5786 "\t until told to do so. 'continue' can be used to start or\n"
5787 "\t restart a paused hist trigger.\n\n"
5788 "\t The 'clear' parameter will clear the contents of a running\n"
5789 "\t hist trigger and leave its current paused/active state\n"
5791 "\t The 'nohitcount' (or NOHC) parameter will suppress display of\n"
5792 "\t raw hitcount in the histogram.\n\n"
5793 "\t The enable_hist and disable_hist triggers can be used to\n"
5794 "\t have one event conditionally start and stop another event's\n"
5795 "\t already-attached hist trigger. The syntax is analogous to\n"
5796 "\t the enable_event and disable_event triggers.\n\n"
5797 "\t Hist trigger handlers and actions are executed whenever a\n"
5798 "\t a histogram entry is added or updated. They take the form:\n\n"
5799 "\t <handler>.<action>\n\n"
5800 "\t The available handlers are:\n\n"
5801 "\t onmatch(matching.event) - invoke on addition or update\n"
5802 "\t onmax(var) - invoke if var exceeds current max\n"
5803 "\t onchange(var) - invoke action if var changes\n\n"
5804 "\t The available actions are:\n\n"
5805 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5806 "\t save(field,...) - save current event fields\n"
5807 #ifdef CONFIG_TRACER_SNAPSHOT
5808 "\t snapshot() - snapshot the trace buffer\n\n"
5810 #ifdef CONFIG_SYNTH_EVENTS
5811 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5812 "\t Write into this file to define/undefine new synthetic events.\n"
5813 "\t example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n"
5819 tracing_readme_read(struct file *filp, char __user *ubuf,
5820 size_t cnt, loff_t *ppos)
5822 return simple_read_from_buffer(ubuf, cnt, ppos,
5823 readme_msg, strlen(readme_msg));
5826 static const struct file_operations tracing_readme_fops = {
5827 .open = tracing_open_generic,
5828 .read = tracing_readme_read,
5829 .llseek = generic_file_llseek,
5832 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5836 return trace_find_tgid_ptr(pid);
5839 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5843 return trace_find_tgid_ptr(pid);
5846 static void saved_tgids_stop(struct seq_file *m, void *v)
5850 static int saved_tgids_show(struct seq_file *m, void *v)
5852 int *entry = (int *)v;
5853 int pid = entry - tgid_map;
5859 seq_printf(m, "%d %d\n", pid, tgid);
5863 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5864 .start = saved_tgids_start,
5865 .stop = saved_tgids_stop,
5866 .next = saved_tgids_next,
5867 .show = saved_tgids_show,
5870 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5874 ret = tracing_check_open_get_tr(NULL);
5878 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5882 static const struct file_operations tracing_saved_tgids_fops = {
5883 .open = tracing_saved_tgids_open,
5885 .llseek = seq_lseek,
5886 .release = seq_release,
5889 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5891 unsigned int *ptr = v;
5893 if (*pos || m->count)
5898 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5900 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5909 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5915 arch_spin_lock(&trace_cmdline_lock);
5917 v = &savedcmd->map_cmdline_to_pid[0];
5919 v = saved_cmdlines_next(m, v, &l);
5927 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5929 arch_spin_unlock(&trace_cmdline_lock);
5933 static int saved_cmdlines_show(struct seq_file *m, void *v)
5935 char buf[TASK_COMM_LEN];
5936 unsigned int *pid = v;
5938 __trace_find_cmdline(*pid, buf);
5939 seq_printf(m, "%d %s\n", *pid, buf);
5943 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5944 .start = saved_cmdlines_start,
5945 .next = saved_cmdlines_next,
5946 .stop = saved_cmdlines_stop,
5947 .show = saved_cmdlines_show,
5950 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5954 ret = tracing_check_open_get_tr(NULL);
5958 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5961 static const struct file_operations tracing_saved_cmdlines_fops = {
5962 .open = tracing_saved_cmdlines_open,
5964 .llseek = seq_lseek,
5965 .release = seq_release,
5969 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5970 size_t cnt, loff_t *ppos)
5976 arch_spin_lock(&trace_cmdline_lock);
5977 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5978 arch_spin_unlock(&trace_cmdline_lock);
5981 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5984 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5986 kfree(s->saved_cmdlines);
5987 kfree(s->map_cmdline_to_pid);
5991 static int tracing_resize_saved_cmdlines(unsigned int val)
5993 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5995 s = kmalloc(sizeof(*s), GFP_KERNEL);
5999 if (allocate_cmdlines_buffer(val, s) < 0) {
6005 arch_spin_lock(&trace_cmdline_lock);
6006 savedcmd_temp = savedcmd;
6008 arch_spin_unlock(&trace_cmdline_lock);
6010 free_saved_cmdlines_buffer(savedcmd_temp);
6016 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
6017 size_t cnt, loff_t *ppos)
6022 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6026 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
6027 if (!val || val > PID_MAX_DEFAULT)
6030 ret = tracing_resize_saved_cmdlines((unsigned int)val);
6039 static const struct file_operations tracing_saved_cmdlines_size_fops = {
6040 .open = tracing_open_generic,
6041 .read = tracing_saved_cmdlines_size_read,
6042 .write = tracing_saved_cmdlines_size_write,
6045 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
6046 static union trace_eval_map_item *
6047 update_eval_map(union trace_eval_map_item *ptr)
6049 if (!ptr->map.eval_string) {
6050 if (ptr->tail.next) {
6051 ptr = ptr->tail.next;
6052 /* Set ptr to the next real item (skip head) */
6060 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
6062 union trace_eval_map_item *ptr = v;
6065 * Paranoid! If ptr points to end, we don't want to increment past it.
6066 * This really should never happen.
6069 ptr = update_eval_map(ptr);
6070 if (WARN_ON_ONCE(!ptr))
6074 ptr = update_eval_map(ptr);
6079 static void *eval_map_start(struct seq_file *m, loff_t *pos)
6081 union trace_eval_map_item *v;
6084 mutex_lock(&trace_eval_mutex);
6086 v = trace_eval_maps;
6090 while (v && l < *pos) {
6091 v = eval_map_next(m, v, &l);
6097 static void eval_map_stop(struct seq_file *m, void *v)
6099 mutex_unlock(&trace_eval_mutex);
6102 static int eval_map_show(struct seq_file *m, void *v)
6104 union trace_eval_map_item *ptr = v;
6106 seq_printf(m, "%s %ld (%s)\n",
6107 ptr->map.eval_string, ptr->map.eval_value,
6113 static const struct seq_operations tracing_eval_map_seq_ops = {
6114 .start = eval_map_start,
6115 .next = eval_map_next,
6116 .stop = eval_map_stop,
6117 .show = eval_map_show,
6120 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
6124 ret = tracing_check_open_get_tr(NULL);
6128 return seq_open(filp, &tracing_eval_map_seq_ops);
6131 static const struct file_operations tracing_eval_map_fops = {
6132 .open = tracing_eval_map_open,
6134 .llseek = seq_lseek,
6135 .release = seq_release,
6138 static inline union trace_eval_map_item *
6139 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
6141 /* Return tail of array given the head */
6142 return ptr + ptr->head.length + 1;
6146 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
6149 struct trace_eval_map **stop;
6150 struct trace_eval_map **map;
6151 union trace_eval_map_item *map_array;
6152 union trace_eval_map_item *ptr;
6157 * The trace_eval_maps contains the map plus a head and tail item,
6158 * where the head holds the module and length of array, and the
6159 * tail holds a pointer to the next list.
6161 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6163 pr_warn("Unable to allocate trace eval mapping\n");
6167 mutex_lock(&trace_eval_mutex);
6169 if (!trace_eval_maps)
6170 trace_eval_maps = map_array;
6172 ptr = trace_eval_maps;
6174 ptr = trace_eval_jmp_to_tail(ptr);
6175 if (!ptr->tail.next)
6177 ptr = ptr->tail.next;
6180 ptr->tail.next = map_array;
6182 map_array->head.mod = mod;
6183 map_array->head.length = len;
6186 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6187 map_array->map = **map;
6190 memset(map_array, 0, sizeof(*map_array));
6192 mutex_unlock(&trace_eval_mutex);
6195 static void trace_create_eval_file(struct dentry *d_tracer)
6197 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
6198 NULL, &tracing_eval_map_fops);
6201 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
6202 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6203 static inline void trace_insert_eval_map_file(struct module *mod,
6204 struct trace_eval_map **start, int len) { }
6205 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6207 static void trace_insert_eval_map(struct module *mod,
6208 struct trace_eval_map **start, int len)
6210 struct trace_eval_map **map;
6217 trace_event_eval_update(map, len);
6219 trace_insert_eval_map_file(mod, start, len);
6223 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6224 size_t cnt, loff_t *ppos)
6226 struct trace_array *tr = filp->private_data;
6227 char buf[MAX_TRACER_SIZE+2];
6230 mutex_lock(&trace_types_lock);
6231 r = sprintf(buf, "%s\n", tr->current_trace->name);
6232 mutex_unlock(&trace_types_lock);
6234 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6237 int tracer_init(struct tracer *t, struct trace_array *tr)
6239 tracing_reset_online_cpus(&tr->array_buffer);
6243 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6247 for_each_tracing_cpu(cpu)
6248 per_cpu_ptr(buf->data, cpu)->entries = val;
6251 #ifdef CONFIG_TRACER_MAX_TRACE
6252 /* resize @tr's buffer to the size of @size_tr's entries */
6253 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6254 struct array_buffer *size_buf, int cpu_id)
6258 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6259 for_each_tracing_cpu(cpu) {
6260 ret = ring_buffer_resize(trace_buf->buffer,
6261 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6264 per_cpu_ptr(trace_buf->data, cpu)->entries =
6265 per_cpu_ptr(size_buf->data, cpu)->entries;
6268 ret = ring_buffer_resize(trace_buf->buffer,
6269 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6271 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6272 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6277 #endif /* CONFIG_TRACER_MAX_TRACE */
6279 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6280 unsigned long size, int cpu)
6285 * If kernel or user changes the size of the ring buffer
6286 * we use the size that was given, and we can forget about
6287 * expanding it later.
6289 ring_buffer_expanded = true;
6291 /* May be called before buffers are initialized */
6292 if (!tr->array_buffer.buffer)
6295 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6299 #ifdef CONFIG_TRACER_MAX_TRACE
6300 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
6301 !tr->current_trace->use_max_tr)
6304 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6306 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6307 &tr->array_buffer, cpu);
6310 * AARGH! We are left with different
6311 * size max buffer!!!!
6312 * The max buffer is our "snapshot" buffer.
6313 * When a tracer needs a snapshot (one of the
6314 * latency tracers), it swaps the max buffer
6315 * with the saved snap shot. We succeeded to
6316 * update the size of the main buffer, but failed to
6317 * update the size of the max buffer. But when we tried
6318 * to reset the main buffer to the original size, we
6319 * failed there too. This is very unlikely to
6320 * happen, but if it does, warn and kill all
6324 tracing_disabled = 1;
6329 if (cpu == RING_BUFFER_ALL_CPUS)
6330 set_buffer_entries(&tr->max_buffer, size);
6332 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
6335 #endif /* CONFIG_TRACER_MAX_TRACE */
6337 if (cpu == RING_BUFFER_ALL_CPUS)
6338 set_buffer_entries(&tr->array_buffer, size);
6340 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
6345 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6346 unsigned long size, int cpu_id)
6350 mutex_lock(&trace_types_lock);
6352 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6353 /* make sure, this cpu is enabled in the mask */
6354 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6360 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6365 mutex_unlock(&trace_types_lock);
6372 * tracing_update_buffers - used by tracing facility to expand ring buffers
6374 * To save on memory when the tracing is never used on a system with it
6375 * configured in. The ring buffers are set to a minimum size. But once
6376 * a user starts to use the tracing facility, then they need to grow
6377 * to their default size.
6379 * This function is to be called when a tracer is about to be used.
6381 int tracing_update_buffers(void)
6385 mutex_lock(&trace_types_lock);
6386 if (!ring_buffer_expanded)
6387 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6388 RING_BUFFER_ALL_CPUS);
6389 mutex_unlock(&trace_types_lock);
6394 struct trace_option_dentry;
6397 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6400 * Used to clear out the tracer before deletion of an instance.
6401 * Must have trace_types_lock held.
6403 static void tracing_set_nop(struct trace_array *tr)
6405 if (tr->current_trace == &nop_trace)
6408 tr->current_trace->enabled--;
6410 if (tr->current_trace->reset)
6411 tr->current_trace->reset(tr);
6413 tr->current_trace = &nop_trace;
6416 static bool tracer_options_updated;
6418 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6420 /* Only enable if the directory has been created already. */
6424 /* Only create trace option files after update_tracer_options finish */
6425 if (!tracer_options_updated)
6428 create_trace_option_files(tr, t);
6431 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6434 #ifdef CONFIG_TRACER_MAX_TRACE
6439 mutex_lock(&trace_types_lock);
6441 if (!ring_buffer_expanded) {
6442 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6443 RING_BUFFER_ALL_CPUS);
6449 for (t = trace_types; t; t = t->next) {
6450 if (strcmp(t->name, buf) == 0)
6457 if (t == tr->current_trace)
6460 #ifdef CONFIG_TRACER_SNAPSHOT
6461 if (t->use_max_tr) {
6462 local_irq_disable();
6463 arch_spin_lock(&tr->max_lock);
6464 if (tr->cond_snapshot)
6466 arch_spin_unlock(&tr->max_lock);
6472 /* Some tracers won't work on kernel command line */
6473 if (system_state < SYSTEM_RUNNING && t->noboot) {
6474 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6479 /* Some tracers are only allowed for the top level buffer */
6480 if (!trace_ok_for_array(t, tr)) {
6485 /* If trace pipe files are being read, we can't change the tracer */
6486 if (tr->trace_ref) {
6491 trace_branch_disable();
6493 tr->current_trace->enabled--;
6495 if (tr->current_trace->reset)
6496 tr->current_trace->reset(tr);
6498 #ifdef CONFIG_TRACER_MAX_TRACE
6499 had_max_tr = tr->current_trace->use_max_tr;
6501 /* Current trace needs to be nop_trace before synchronize_rcu */
6502 tr->current_trace = &nop_trace;
6504 if (had_max_tr && !t->use_max_tr) {
6506 * We need to make sure that the update_max_tr sees that
6507 * current_trace changed to nop_trace to keep it from
6508 * swapping the buffers after we resize it.
6509 * The update_max_tr is called from interrupts disabled
6510 * so a synchronized_sched() is sufficient.
6516 if (t->use_max_tr && !tr->allocated_snapshot) {
6517 ret = tracing_alloc_snapshot_instance(tr);
6522 tr->current_trace = &nop_trace;
6526 ret = tracer_init(t, tr);
6531 tr->current_trace = t;
6532 tr->current_trace->enabled++;
6533 trace_branch_enable(tr);
6535 mutex_unlock(&trace_types_lock);
6541 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6542 size_t cnt, loff_t *ppos)
6544 struct trace_array *tr = filp->private_data;
6545 char buf[MAX_TRACER_SIZE+1];
6552 if (cnt > MAX_TRACER_SIZE)
6553 cnt = MAX_TRACER_SIZE;
6555 if (copy_from_user(buf, ubuf, cnt))
6562 err = tracing_set_tracer(tr, name);
6572 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6573 size_t cnt, loff_t *ppos)
6578 r = snprintf(buf, sizeof(buf), "%ld\n",
6579 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6580 if (r > sizeof(buf))
6582 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6586 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6587 size_t cnt, loff_t *ppos)
6592 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6602 tracing_thresh_read(struct file *filp, char __user *ubuf,
6603 size_t cnt, loff_t *ppos)
6605 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6609 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6610 size_t cnt, loff_t *ppos)
6612 struct trace_array *tr = filp->private_data;
6615 mutex_lock(&trace_types_lock);
6616 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6620 if (tr->current_trace->update_thresh) {
6621 ret = tr->current_trace->update_thresh(tr);
6628 mutex_unlock(&trace_types_lock);
6633 #ifdef CONFIG_TRACER_MAX_TRACE
6636 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6637 size_t cnt, loff_t *ppos)
6639 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6643 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6644 size_t cnt, loff_t *ppos)
6646 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6651 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6653 struct trace_array *tr = inode->i_private;
6654 struct trace_iterator *iter;
6657 ret = tracing_check_open_get_tr(tr);
6661 mutex_lock(&trace_types_lock);
6663 /* create a buffer to store the information to pass to userspace */
6664 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6667 __trace_array_put(tr);
6671 trace_seq_init(&iter->seq);
6672 iter->trace = tr->current_trace;
6674 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6679 /* trace pipe does not show start of buffer */
6680 cpumask_setall(iter->started);
6682 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6683 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6685 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6686 if (trace_clocks[tr->clock_id].in_ns)
6687 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6690 iter->array_buffer = &tr->array_buffer;
6691 iter->cpu_file = tracing_get_cpu(inode);
6692 mutex_init(&iter->mutex);
6693 filp->private_data = iter;
6695 if (iter->trace->pipe_open)
6696 iter->trace->pipe_open(iter);
6698 nonseekable_open(inode, filp);
6702 mutex_unlock(&trace_types_lock);
6707 __trace_array_put(tr);
6708 mutex_unlock(&trace_types_lock);
6712 static int tracing_release_pipe(struct inode *inode, struct file *file)
6714 struct trace_iterator *iter = file->private_data;
6715 struct trace_array *tr = inode->i_private;
6717 mutex_lock(&trace_types_lock);
6721 if (iter->trace->pipe_close)
6722 iter->trace->pipe_close(iter);
6724 mutex_unlock(&trace_types_lock);
6726 free_cpumask_var(iter->started);
6728 mutex_destroy(&iter->mutex);
6731 trace_array_put(tr);
6737 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6739 struct trace_array *tr = iter->tr;
6741 /* Iterators are static, they should be filled or empty */
6742 if (trace_buffer_iter(iter, iter->cpu_file))
6743 return EPOLLIN | EPOLLRDNORM;
6745 if (tr->trace_flags & TRACE_ITER_BLOCK)
6747 * Always select as readable when in blocking mode
6749 return EPOLLIN | EPOLLRDNORM;
6751 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6752 filp, poll_table, iter->tr->buffer_percent);
6756 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6758 struct trace_iterator *iter = filp->private_data;
6760 return trace_poll(iter, filp, poll_table);
6763 /* Must be called with iter->mutex held. */
6764 static int tracing_wait_pipe(struct file *filp)
6766 struct trace_iterator *iter = filp->private_data;
6769 while (trace_empty(iter)) {
6771 if ((filp->f_flags & O_NONBLOCK)) {
6776 * We block until we read something and tracing is disabled.
6777 * We still block if tracing is disabled, but we have never
6778 * read anything. This allows a user to cat this file, and
6779 * then enable tracing. But after we have read something,
6780 * we give an EOF when tracing is again disabled.
6782 * iter->pos will be 0 if we haven't read anything.
6784 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6787 mutex_unlock(&iter->mutex);
6789 ret = wait_on_pipe(iter, 0);
6791 mutex_lock(&iter->mutex);
6804 tracing_read_pipe(struct file *filp, char __user *ubuf,
6805 size_t cnt, loff_t *ppos)
6807 struct trace_iterator *iter = filp->private_data;
6811 * Avoid more than one consumer on a single file descriptor
6812 * This is just a matter of traces coherency, the ring buffer itself
6815 mutex_lock(&iter->mutex);
6817 /* return any leftover data */
6818 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6822 trace_seq_init(&iter->seq);
6824 if (iter->trace->read) {
6825 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6831 sret = tracing_wait_pipe(filp);
6835 /* stop when tracing is finished */
6836 if (trace_empty(iter)) {
6841 if (cnt >= PAGE_SIZE)
6842 cnt = PAGE_SIZE - 1;
6844 /* reset all but tr, trace, and overruns */
6845 trace_iterator_reset(iter);
6846 cpumask_clear(iter->started);
6847 trace_seq_init(&iter->seq);
6849 trace_event_read_lock();
6850 trace_access_lock(iter->cpu_file);
6851 while (trace_find_next_entry_inc(iter) != NULL) {
6852 enum print_line_t ret;
6853 int save_len = iter->seq.seq.len;
6855 ret = print_trace_line(iter);
6856 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6858 * If one print_trace_line() fills entire trace_seq in one shot,
6859 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6860 * In this case, we need to consume it, otherwise, loop will peek
6861 * this event next time, resulting in an infinite loop.
6863 if (save_len == 0) {
6865 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6866 trace_consume(iter);
6870 /* In other cases, don't print partial lines */
6871 iter->seq.seq.len = save_len;
6874 if (ret != TRACE_TYPE_NO_CONSUME)
6875 trace_consume(iter);
6877 if (trace_seq_used(&iter->seq) >= cnt)
6881 * Setting the full flag means we reached the trace_seq buffer
6882 * size and we should leave by partial output condition above.
6883 * One of the trace_seq_* functions is not used properly.
6885 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6888 trace_access_unlock(iter->cpu_file);
6889 trace_event_read_unlock();
6891 /* Now copy what we have to the user */
6892 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6893 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6894 trace_seq_init(&iter->seq);
6897 * If there was nothing to send to user, in spite of consuming trace
6898 * entries, go back to wait for more entries.
6904 mutex_unlock(&iter->mutex);
6909 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6912 __free_page(spd->pages[idx]);
6916 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6922 /* Seq buffer is page-sized, exactly what we need. */
6924 save_len = iter->seq.seq.len;
6925 ret = print_trace_line(iter);
6927 if (trace_seq_has_overflowed(&iter->seq)) {
6928 iter->seq.seq.len = save_len;
6933 * This should not be hit, because it should only
6934 * be set if the iter->seq overflowed. But check it
6935 * anyway to be safe.
6937 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6938 iter->seq.seq.len = save_len;
6942 count = trace_seq_used(&iter->seq) - save_len;
6945 iter->seq.seq.len = save_len;
6949 if (ret != TRACE_TYPE_NO_CONSUME)
6950 trace_consume(iter);
6952 if (!trace_find_next_entry_inc(iter)) {
6962 static ssize_t tracing_splice_read_pipe(struct file *filp,
6964 struct pipe_inode_info *pipe,
6968 struct page *pages_def[PIPE_DEF_BUFFERS];
6969 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6970 struct trace_iterator *iter = filp->private_data;
6971 struct splice_pipe_desc spd = {
6973 .partial = partial_def,
6974 .nr_pages = 0, /* This gets updated below. */
6975 .nr_pages_max = PIPE_DEF_BUFFERS,
6976 .ops = &default_pipe_buf_ops,
6977 .spd_release = tracing_spd_release_pipe,
6983 if (splice_grow_spd(pipe, &spd))
6986 mutex_lock(&iter->mutex);
6988 if (iter->trace->splice_read) {
6989 ret = iter->trace->splice_read(iter, filp,
6990 ppos, pipe, len, flags);
6995 ret = tracing_wait_pipe(filp);
6999 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
7004 trace_event_read_lock();
7005 trace_access_lock(iter->cpu_file);
7007 /* Fill as many pages as possible. */
7008 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
7009 spd.pages[i] = alloc_page(GFP_KERNEL);
7013 rem = tracing_fill_pipe_page(rem, iter);
7015 /* Copy the data into the page, so we can start over. */
7016 ret = trace_seq_to_buffer(&iter->seq,
7017 page_address(spd.pages[i]),
7018 trace_seq_used(&iter->seq));
7020 __free_page(spd.pages[i]);
7023 spd.partial[i].offset = 0;
7024 spd.partial[i].len = trace_seq_used(&iter->seq);
7026 trace_seq_init(&iter->seq);
7029 trace_access_unlock(iter->cpu_file);
7030 trace_event_read_unlock();
7031 mutex_unlock(&iter->mutex);
7036 ret = splice_to_pipe(pipe, &spd);
7040 splice_shrink_spd(&spd);
7044 mutex_unlock(&iter->mutex);
7049 tracing_entries_read(struct file *filp, char __user *ubuf,
7050 size_t cnt, loff_t *ppos)
7052 struct inode *inode = file_inode(filp);
7053 struct trace_array *tr = inode->i_private;
7054 int cpu = tracing_get_cpu(inode);
7059 mutex_lock(&trace_types_lock);
7061 if (cpu == RING_BUFFER_ALL_CPUS) {
7062 int cpu, buf_size_same;
7067 /* check if all cpu sizes are same */
7068 for_each_tracing_cpu(cpu) {
7069 /* fill in the size from first enabled cpu */
7071 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
7072 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
7078 if (buf_size_same) {
7079 if (!ring_buffer_expanded)
7080 r = sprintf(buf, "%lu (expanded: %lu)\n",
7082 trace_buf_size >> 10);
7084 r = sprintf(buf, "%lu\n", size >> 10);
7086 r = sprintf(buf, "X\n");
7088 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
7090 mutex_unlock(&trace_types_lock);
7092 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7097 tracing_entries_write(struct file *filp, const char __user *ubuf,
7098 size_t cnt, loff_t *ppos)
7100 struct inode *inode = file_inode(filp);
7101 struct trace_array *tr = inode->i_private;
7105 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7109 /* must have at least 1 entry */
7113 /* value is in KB */
7115 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
7125 tracing_total_entries_read(struct file *filp, char __user *ubuf,
7126 size_t cnt, loff_t *ppos)
7128 struct trace_array *tr = filp->private_data;
7131 unsigned long size = 0, expanded_size = 0;
7133 mutex_lock(&trace_types_lock);
7134 for_each_tracing_cpu(cpu) {
7135 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7136 if (!ring_buffer_expanded)
7137 expanded_size += trace_buf_size >> 10;
7139 if (ring_buffer_expanded)
7140 r = sprintf(buf, "%lu\n", size);
7142 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
7143 mutex_unlock(&trace_types_lock);
7145 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7149 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7150 size_t cnt, loff_t *ppos)
7153 * There is no need to read what the user has written, this function
7154 * is just to make sure that there is no error when "echo" is used
7163 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7165 struct trace_array *tr = inode->i_private;
7167 /* disable tracing ? */
7168 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7169 tracer_tracing_off(tr);
7170 /* resize the ring buffer to 0 */
7171 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7173 trace_array_put(tr);
7179 tracing_mark_write(struct file *filp, const char __user *ubuf,
7180 size_t cnt, loff_t *fpos)
7182 struct trace_array *tr = filp->private_data;
7183 struct ring_buffer_event *event;
7184 enum event_trigger_type tt = ETT_NONE;
7185 struct trace_buffer *buffer;
7186 struct print_entry *entry;
7191 /* Used in tracing_mark_raw_write() as well */
7192 #define FAULTED_STR "<faulted>"
7193 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7195 if (tracing_disabled)
7198 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7201 if (cnt > TRACE_BUF_SIZE)
7202 cnt = TRACE_BUF_SIZE;
7204 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7206 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7208 /* If less than "<faulted>", then make sure we can still add that */
7209 if (cnt < FAULTED_SIZE)
7210 size += FAULTED_SIZE - cnt;
7212 buffer = tr->array_buffer.buffer;
7213 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7215 if (unlikely(!event))
7216 /* Ring buffer disabled, return as if not open for write */
7219 entry = ring_buffer_event_data(event);
7220 entry->ip = _THIS_IP_;
7222 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7224 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7230 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7231 /* do not add \n before testing triggers, but add \0 */
7232 entry->buf[cnt] = '\0';
7233 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7236 if (entry->buf[cnt - 1] != '\n') {
7237 entry->buf[cnt] = '\n';
7238 entry->buf[cnt + 1] = '\0';
7240 entry->buf[cnt] = '\0';
7242 if (static_branch_unlikely(&trace_marker_exports_enabled))
7243 ftrace_exports(event, TRACE_EXPORT_MARKER);
7244 __buffer_unlock_commit(buffer, event);
7247 event_triggers_post_call(tr->trace_marker_file, tt);
7252 /* Limit it for now to 3K (including tag) */
7253 #define RAW_DATA_MAX_SIZE (1024*3)
7256 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7257 size_t cnt, loff_t *fpos)
7259 struct trace_array *tr = filp->private_data;
7260 struct ring_buffer_event *event;
7261 struct trace_buffer *buffer;
7262 struct raw_data_entry *entry;
7267 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7269 if (tracing_disabled)
7272 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7275 /* The marker must at least have a tag id */
7276 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7279 if (cnt > TRACE_BUF_SIZE)
7280 cnt = TRACE_BUF_SIZE;
7282 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7284 size = sizeof(*entry) + cnt;
7285 if (cnt < FAULT_SIZE_ID)
7286 size += FAULT_SIZE_ID - cnt;
7288 buffer = tr->array_buffer.buffer;
7289 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7292 /* Ring buffer disabled, return as if not open for write */
7295 entry = ring_buffer_event_data(event);
7297 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7300 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7305 __buffer_unlock_commit(buffer, event);
7310 static int tracing_clock_show(struct seq_file *m, void *v)
7312 struct trace_array *tr = m->private;
7315 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7317 "%s%s%s%s", i ? " " : "",
7318 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7319 i == tr->clock_id ? "]" : "");
7325 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7329 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7330 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7333 if (i == ARRAY_SIZE(trace_clocks))
7336 mutex_lock(&trace_types_lock);
7340 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7343 * New clock may not be consistent with the previous clock.
7344 * Reset the buffer so that it doesn't have incomparable timestamps.
7346 tracing_reset_online_cpus(&tr->array_buffer);
7348 #ifdef CONFIG_TRACER_MAX_TRACE
7349 if (tr->max_buffer.buffer)
7350 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7351 tracing_reset_online_cpus(&tr->max_buffer);
7354 mutex_unlock(&trace_types_lock);
7359 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7360 size_t cnt, loff_t *fpos)
7362 struct seq_file *m = filp->private_data;
7363 struct trace_array *tr = m->private;
7365 const char *clockstr;
7368 if (cnt >= sizeof(buf))
7371 if (copy_from_user(buf, ubuf, cnt))
7376 clockstr = strstrip(buf);
7378 ret = tracing_set_clock(tr, clockstr);
7387 static int tracing_clock_open(struct inode *inode, struct file *file)
7389 struct trace_array *tr = inode->i_private;
7392 ret = tracing_check_open_get_tr(tr);
7396 ret = single_open(file, tracing_clock_show, inode->i_private);
7398 trace_array_put(tr);
7403 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7405 struct trace_array *tr = m->private;
7407 mutex_lock(&trace_types_lock);
7409 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7410 seq_puts(m, "delta [absolute]\n");
7412 seq_puts(m, "[delta] absolute\n");
7414 mutex_unlock(&trace_types_lock);
7419 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7421 struct trace_array *tr = inode->i_private;
7424 ret = tracing_check_open_get_tr(tr);
7428 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7430 trace_array_put(tr);
7435 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7437 if (rbe == this_cpu_read(trace_buffered_event))
7438 return ring_buffer_time_stamp(buffer);
7440 return ring_buffer_event_time_stamp(buffer, rbe);
7444 * Set or disable using the per CPU trace_buffer_event when possible.
7446 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7450 mutex_lock(&trace_types_lock);
7452 if (set && tr->no_filter_buffering_ref++)
7456 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7461 --tr->no_filter_buffering_ref;
7464 mutex_unlock(&trace_types_lock);
7469 struct ftrace_buffer_info {
7470 struct trace_iterator iter;
7472 unsigned int spare_cpu;
7476 #ifdef CONFIG_TRACER_SNAPSHOT
7477 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7479 struct trace_array *tr = inode->i_private;
7480 struct trace_iterator *iter;
7484 ret = tracing_check_open_get_tr(tr);
7488 if (file->f_mode & FMODE_READ) {
7489 iter = __tracing_open(inode, file, true);
7491 ret = PTR_ERR(iter);
7493 /* Writes still need the seq_file to hold the private data */
7495 m = kzalloc(sizeof(*m), GFP_KERNEL);
7498 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7506 iter->array_buffer = &tr->max_buffer;
7507 iter->cpu_file = tracing_get_cpu(inode);
7509 file->private_data = m;
7513 trace_array_put(tr);
7519 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7522 struct seq_file *m = filp->private_data;
7523 struct trace_iterator *iter = m->private;
7524 struct trace_array *tr = iter->tr;
7528 ret = tracing_update_buffers();
7532 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7536 mutex_lock(&trace_types_lock);
7538 if (tr->current_trace->use_max_tr) {
7543 local_irq_disable();
7544 arch_spin_lock(&tr->max_lock);
7545 if (tr->cond_snapshot)
7547 arch_spin_unlock(&tr->max_lock);
7554 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7558 if (tr->allocated_snapshot)
7562 /* Only allow per-cpu swap if the ring buffer supports it */
7563 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7564 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7569 if (tr->allocated_snapshot)
7570 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7571 &tr->array_buffer, iter->cpu_file);
7573 ret = tracing_alloc_snapshot_instance(tr);
7576 local_irq_disable();
7577 /* Now, we're going to swap */
7578 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7579 update_max_tr(tr, current, smp_processor_id(), NULL);
7581 update_max_tr_single(tr, current, iter->cpu_file);
7585 if (tr->allocated_snapshot) {
7586 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7587 tracing_reset_online_cpus(&tr->max_buffer);
7589 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7599 mutex_unlock(&trace_types_lock);
7603 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7605 struct seq_file *m = file->private_data;
7608 ret = tracing_release(inode, file);
7610 if (file->f_mode & FMODE_READ)
7613 /* If write only, the seq_file is just a stub */
7621 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7622 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7623 size_t count, loff_t *ppos);
7624 static int tracing_buffers_release(struct inode *inode, struct file *file);
7625 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7626 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7628 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7630 struct ftrace_buffer_info *info;
7633 /* The following checks for tracefs lockdown */
7634 ret = tracing_buffers_open(inode, filp);
7638 info = filp->private_data;
7640 if (info->iter.trace->use_max_tr) {
7641 tracing_buffers_release(inode, filp);
7645 info->iter.snapshot = true;
7646 info->iter.array_buffer = &info->iter.tr->max_buffer;
7651 #endif /* CONFIG_TRACER_SNAPSHOT */
7654 static const struct file_operations tracing_thresh_fops = {
7655 .open = tracing_open_generic,
7656 .read = tracing_thresh_read,
7657 .write = tracing_thresh_write,
7658 .llseek = generic_file_llseek,
7661 #ifdef CONFIG_TRACER_MAX_TRACE
7662 static const struct file_operations tracing_max_lat_fops = {
7663 .open = tracing_open_generic,
7664 .read = tracing_max_lat_read,
7665 .write = tracing_max_lat_write,
7666 .llseek = generic_file_llseek,
7670 static const struct file_operations set_tracer_fops = {
7671 .open = tracing_open_generic,
7672 .read = tracing_set_trace_read,
7673 .write = tracing_set_trace_write,
7674 .llseek = generic_file_llseek,
7677 static const struct file_operations tracing_pipe_fops = {
7678 .open = tracing_open_pipe,
7679 .poll = tracing_poll_pipe,
7680 .read = tracing_read_pipe,
7681 .splice_read = tracing_splice_read_pipe,
7682 .release = tracing_release_pipe,
7683 .llseek = no_llseek,
7686 static const struct file_operations tracing_entries_fops = {
7687 .open = tracing_open_generic_tr,
7688 .read = tracing_entries_read,
7689 .write = tracing_entries_write,
7690 .llseek = generic_file_llseek,
7691 .release = tracing_release_generic_tr,
7694 static const struct file_operations tracing_total_entries_fops = {
7695 .open = tracing_open_generic_tr,
7696 .read = tracing_total_entries_read,
7697 .llseek = generic_file_llseek,
7698 .release = tracing_release_generic_tr,
7701 static const struct file_operations tracing_free_buffer_fops = {
7702 .open = tracing_open_generic_tr,
7703 .write = tracing_free_buffer_write,
7704 .release = tracing_free_buffer_release,
7707 static const struct file_operations tracing_mark_fops = {
7708 .open = tracing_mark_open,
7709 .write = tracing_mark_write,
7710 .release = tracing_release_generic_tr,
7713 static const struct file_operations tracing_mark_raw_fops = {
7714 .open = tracing_mark_open,
7715 .write = tracing_mark_raw_write,
7716 .release = tracing_release_generic_tr,
7719 static const struct file_operations trace_clock_fops = {
7720 .open = tracing_clock_open,
7722 .llseek = seq_lseek,
7723 .release = tracing_single_release_tr,
7724 .write = tracing_clock_write,
7727 static const struct file_operations trace_time_stamp_mode_fops = {
7728 .open = tracing_time_stamp_mode_open,
7730 .llseek = seq_lseek,
7731 .release = tracing_single_release_tr,
7734 #ifdef CONFIG_TRACER_SNAPSHOT
7735 static const struct file_operations snapshot_fops = {
7736 .open = tracing_snapshot_open,
7738 .write = tracing_snapshot_write,
7739 .llseek = tracing_lseek,
7740 .release = tracing_snapshot_release,
7743 static const struct file_operations snapshot_raw_fops = {
7744 .open = snapshot_raw_open,
7745 .read = tracing_buffers_read,
7746 .release = tracing_buffers_release,
7747 .splice_read = tracing_buffers_splice_read,
7748 .llseek = no_llseek,
7751 #endif /* CONFIG_TRACER_SNAPSHOT */
7754 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7755 * @filp: The active open file structure
7756 * @ubuf: The userspace provided buffer to read value into
7757 * @cnt: The maximum number of bytes to read
7758 * @ppos: The current "file" position
7760 * This function implements the write interface for a struct trace_min_max_param.
7761 * The filp->private_data must point to a trace_min_max_param structure that
7762 * defines where to write the value, the min and the max acceptable values,
7763 * and a lock to protect the write.
7766 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7768 struct trace_min_max_param *param = filp->private_data;
7775 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7780 mutex_lock(param->lock);
7782 if (param->min && val < *param->min)
7785 if (param->max && val > *param->max)
7792 mutex_unlock(param->lock);
7801 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7802 * @filp: The active open file structure
7803 * @ubuf: The userspace provided buffer to read value into
7804 * @cnt: The maximum number of bytes to read
7805 * @ppos: The current "file" position
7807 * This function implements the read interface for a struct trace_min_max_param.
7808 * The filp->private_data must point to a trace_min_max_param struct with valid
7812 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7814 struct trace_min_max_param *param = filp->private_data;
7815 char buf[U64_STR_SIZE];
7824 if (cnt > sizeof(buf))
7827 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7829 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7832 const struct file_operations trace_min_max_fops = {
7833 .open = tracing_open_generic,
7834 .read = trace_min_max_read,
7835 .write = trace_min_max_write,
7838 #define TRACING_LOG_ERRS_MAX 8
7839 #define TRACING_LOG_LOC_MAX 128
7841 #define CMD_PREFIX " Command: "
7844 const char **errs; /* ptr to loc-specific array of err strings */
7845 u8 type; /* index into errs -> specific err string */
7846 u16 pos; /* caret position */
7850 struct tracing_log_err {
7851 struct list_head list;
7852 struct err_info info;
7853 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7854 char *cmd; /* what caused err */
7857 static DEFINE_MUTEX(tracing_err_log_lock);
7859 static struct tracing_log_err *alloc_tracing_log_err(int len)
7861 struct tracing_log_err *err;
7863 err = kzalloc(sizeof(*err), GFP_KERNEL);
7865 return ERR_PTR(-ENOMEM);
7867 err->cmd = kzalloc(len, GFP_KERNEL);
7870 return ERR_PTR(-ENOMEM);
7876 static void free_tracing_log_err(struct tracing_log_err *err)
7882 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
7885 struct tracing_log_err *err;
7888 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7889 err = alloc_tracing_log_err(len);
7890 if (PTR_ERR(err) != -ENOMEM)
7891 tr->n_err_log_entries++;
7895 cmd = kzalloc(len, GFP_KERNEL);
7897 return ERR_PTR(-ENOMEM);
7898 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7901 list_del(&err->list);
7907 * err_pos - find the position of a string within a command for error careting
7908 * @cmd: The tracing command that caused the error
7909 * @str: The string to position the caret at within @cmd
7911 * Finds the position of the first occurrence of @str within @cmd. The
7912 * return value can be passed to tracing_log_err() for caret placement
7915 * Returns the index within @cmd of the first occurrence of @str or 0
7916 * if @str was not found.
7918 unsigned int err_pos(char *cmd, const char *str)
7922 if (WARN_ON(!strlen(cmd)))
7925 found = strstr(cmd, str);
7933 * tracing_log_err - write an error to the tracing error log
7934 * @tr: The associated trace array for the error (NULL for top level array)
7935 * @loc: A string describing where the error occurred
7936 * @cmd: The tracing command that caused the error
7937 * @errs: The array of loc-specific static error strings
7938 * @type: The index into errs[], which produces the specific static err string
7939 * @pos: The position the caret should be placed in the cmd
7941 * Writes an error into tracing/error_log of the form:
7943 * <loc>: error: <text>
7947 * tracing/error_log is a small log file containing the last
7948 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7949 * unless there has been a tracing error, and the error log can be
7950 * cleared and have its memory freed by writing the empty string in
7951 * truncation mode to it i.e. echo > tracing/error_log.
7953 * NOTE: the @errs array along with the @type param are used to
7954 * produce a static error string - this string is not copied and saved
7955 * when the error is logged - only a pointer to it is saved. See
7956 * existing callers for examples of how static strings are typically
7957 * defined for use with tracing_log_err().
7959 void tracing_log_err(struct trace_array *tr,
7960 const char *loc, const char *cmd,
7961 const char **errs, u8 type, u16 pos)
7963 struct tracing_log_err *err;
7969 len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
7971 mutex_lock(&tracing_err_log_lock);
7972 err = get_tracing_log_err(tr, len);
7973 if (PTR_ERR(err) == -ENOMEM) {
7974 mutex_unlock(&tracing_err_log_lock);
7978 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7979 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
7981 err->info.errs = errs;
7982 err->info.type = type;
7983 err->info.pos = pos;
7984 err->info.ts = local_clock();
7986 list_add_tail(&err->list, &tr->err_log);
7987 mutex_unlock(&tracing_err_log_lock);
7990 static void clear_tracing_err_log(struct trace_array *tr)
7992 struct tracing_log_err *err, *next;
7994 mutex_lock(&tracing_err_log_lock);
7995 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7996 list_del(&err->list);
7997 free_tracing_log_err(err);
8000 tr->n_err_log_entries = 0;
8001 mutex_unlock(&tracing_err_log_lock);
8004 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
8006 struct trace_array *tr = m->private;
8008 mutex_lock(&tracing_err_log_lock);
8010 return seq_list_start(&tr->err_log, *pos);
8013 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
8015 struct trace_array *tr = m->private;
8017 return seq_list_next(v, &tr->err_log, pos);
8020 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
8022 mutex_unlock(&tracing_err_log_lock);
8025 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
8029 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
8031 for (i = 0; i < pos; i++)
8036 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
8038 struct tracing_log_err *err = v;
8041 const char *err_text = err->info.errs[err->info.type];
8042 u64 sec = err->info.ts;
8045 nsec = do_div(sec, NSEC_PER_SEC);
8046 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
8047 err->loc, err_text);
8048 seq_printf(m, "%s", err->cmd);
8049 tracing_err_log_show_pos(m, err->info.pos);
8055 static const struct seq_operations tracing_err_log_seq_ops = {
8056 .start = tracing_err_log_seq_start,
8057 .next = tracing_err_log_seq_next,
8058 .stop = tracing_err_log_seq_stop,
8059 .show = tracing_err_log_seq_show
8062 static int tracing_err_log_open(struct inode *inode, struct file *file)
8064 struct trace_array *tr = inode->i_private;
8067 ret = tracing_check_open_get_tr(tr);
8071 /* If this file was opened for write, then erase contents */
8072 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
8073 clear_tracing_err_log(tr);
8075 if (file->f_mode & FMODE_READ) {
8076 ret = seq_open(file, &tracing_err_log_seq_ops);
8078 struct seq_file *m = file->private_data;
8081 trace_array_put(tr);
8087 static ssize_t tracing_err_log_write(struct file *file,
8088 const char __user *buffer,
8089 size_t count, loff_t *ppos)
8094 static int tracing_err_log_release(struct inode *inode, struct file *file)
8096 struct trace_array *tr = inode->i_private;
8098 trace_array_put(tr);
8100 if (file->f_mode & FMODE_READ)
8101 seq_release(inode, file);
8106 static const struct file_operations tracing_err_log_fops = {
8107 .open = tracing_err_log_open,
8108 .write = tracing_err_log_write,
8110 .llseek = seq_lseek,
8111 .release = tracing_err_log_release,
8114 static int tracing_buffers_open(struct inode *inode, struct file *filp)
8116 struct trace_array *tr = inode->i_private;
8117 struct ftrace_buffer_info *info;
8120 ret = tracing_check_open_get_tr(tr);
8124 info = kvzalloc(sizeof(*info), GFP_KERNEL);
8126 trace_array_put(tr);
8130 mutex_lock(&trace_types_lock);
8133 info->iter.cpu_file = tracing_get_cpu(inode);
8134 info->iter.trace = tr->current_trace;
8135 info->iter.array_buffer = &tr->array_buffer;
8137 /* Force reading ring buffer for first read */
8138 info->read = (unsigned int)-1;
8140 filp->private_data = info;
8144 mutex_unlock(&trace_types_lock);
8146 ret = nonseekable_open(inode, filp);
8148 trace_array_put(tr);
8154 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8156 struct ftrace_buffer_info *info = filp->private_data;
8157 struct trace_iterator *iter = &info->iter;
8159 return trace_poll(iter, filp, poll_table);
8163 tracing_buffers_read(struct file *filp, char __user *ubuf,
8164 size_t count, loff_t *ppos)
8166 struct ftrace_buffer_info *info = filp->private_data;
8167 struct trace_iterator *iter = &info->iter;
8174 #ifdef CONFIG_TRACER_MAX_TRACE
8175 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8180 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8182 if (IS_ERR(info->spare)) {
8183 ret = PTR_ERR(info->spare);
8186 info->spare_cpu = iter->cpu_file;
8192 /* Do we have previous read data to read? */
8193 if (info->read < PAGE_SIZE)
8197 trace_access_lock(iter->cpu_file);
8198 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8202 trace_access_unlock(iter->cpu_file);
8205 if (trace_empty(iter)) {
8206 if ((filp->f_flags & O_NONBLOCK))
8209 ret = wait_on_pipe(iter, 0);
8220 size = PAGE_SIZE - info->read;
8224 ret = copy_to_user(ubuf, info->spare + info->read, size);
8236 static int tracing_buffers_release(struct inode *inode, struct file *file)
8238 struct ftrace_buffer_info *info = file->private_data;
8239 struct trace_iterator *iter = &info->iter;
8241 mutex_lock(&trace_types_lock);
8243 iter->tr->trace_ref--;
8245 __trace_array_put(iter->tr);
8248 /* Make sure the waiters see the new wait_index */
8251 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8254 ring_buffer_free_read_page(iter->array_buffer->buffer,
8255 info->spare_cpu, info->spare);
8258 mutex_unlock(&trace_types_lock);
8264 struct trace_buffer *buffer;
8267 refcount_t refcount;
8270 static void buffer_ref_release(struct buffer_ref *ref)
8272 if (!refcount_dec_and_test(&ref->refcount))
8274 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8278 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8279 struct pipe_buffer *buf)
8281 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8283 buffer_ref_release(ref);
8287 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8288 struct pipe_buffer *buf)
8290 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8292 if (refcount_read(&ref->refcount) > INT_MAX/2)
8295 refcount_inc(&ref->refcount);
8299 /* Pipe buffer operations for a buffer. */
8300 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8301 .release = buffer_pipe_buf_release,
8302 .get = buffer_pipe_buf_get,
8306 * Callback from splice_to_pipe(), if we need to release some pages
8307 * at the end of the spd in case we error'ed out in filling the pipe.
8309 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8311 struct buffer_ref *ref =
8312 (struct buffer_ref *)spd->partial[i].private;
8314 buffer_ref_release(ref);
8315 spd->partial[i].private = 0;
8319 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8320 struct pipe_inode_info *pipe, size_t len,
8323 struct ftrace_buffer_info *info = file->private_data;
8324 struct trace_iterator *iter = &info->iter;
8325 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8326 struct page *pages_def[PIPE_DEF_BUFFERS];
8327 struct splice_pipe_desc spd = {
8329 .partial = partial_def,
8330 .nr_pages_max = PIPE_DEF_BUFFERS,
8331 .ops = &buffer_pipe_buf_ops,
8332 .spd_release = buffer_spd_release,
8334 struct buffer_ref *ref;
8338 #ifdef CONFIG_TRACER_MAX_TRACE
8339 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8343 if (*ppos & (PAGE_SIZE - 1))
8346 if (len & (PAGE_SIZE - 1)) {
8347 if (len < PAGE_SIZE)
8352 if (splice_grow_spd(pipe, &spd))
8356 trace_access_lock(iter->cpu_file);
8357 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8359 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8363 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8369 refcount_set(&ref->refcount, 1);
8370 ref->buffer = iter->array_buffer->buffer;
8371 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8372 if (IS_ERR(ref->page)) {
8373 ret = PTR_ERR(ref->page);
8378 ref->cpu = iter->cpu_file;
8380 r = ring_buffer_read_page(ref->buffer, &ref->page,
8381 len, iter->cpu_file, 1);
8383 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8389 page = virt_to_page(ref->page);
8391 spd.pages[i] = page;
8392 spd.partial[i].len = PAGE_SIZE;
8393 spd.partial[i].offset = 0;
8394 spd.partial[i].private = (unsigned long)ref;
8398 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8401 trace_access_unlock(iter->cpu_file);
8404 /* did we read anything? */
8405 if (!spd.nr_pages) {
8412 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8415 wait_index = READ_ONCE(iter->wait_index);
8417 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8421 /* No need to wait after waking up when tracing is off */
8422 if (!tracer_tracing_is_on(iter->tr))
8425 /* Make sure we see the new wait_index */
8427 if (wait_index != iter->wait_index)
8433 ret = splice_to_pipe(pipe, &spd);
8435 splice_shrink_spd(&spd);
8440 /* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */
8441 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8443 struct ftrace_buffer_info *info = file->private_data;
8444 struct trace_iterator *iter = &info->iter;
8447 return -ENOIOCTLCMD;
8449 mutex_lock(&trace_types_lock);
8452 /* Make sure the waiters see the new wait_index */
8455 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8457 mutex_unlock(&trace_types_lock);
8461 static const struct file_operations tracing_buffers_fops = {
8462 .open = tracing_buffers_open,
8463 .read = tracing_buffers_read,
8464 .poll = tracing_buffers_poll,
8465 .release = tracing_buffers_release,
8466 .splice_read = tracing_buffers_splice_read,
8467 .unlocked_ioctl = tracing_buffers_ioctl,
8468 .llseek = no_llseek,
8472 tracing_stats_read(struct file *filp, char __user *ubuf,
8473 size_t count, loff_t *ppos)
8475 struct inode *inode = file_inode(filp);
8476 struct trace_array *tr = inode->i_private;
8477 struct array_buffer *trace_buf = &tr->array_buffer;
8478 int cpu = tracing_get_cpu(inode);
8479 struct trace_seq *s;
8481 unsigned long long t;
8482 unsigned long usec_rem;
8484 s = kmalloc(sizeof(*s), GFP_KERNEL);
8490 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8491 trace_seq_printf(s, "entries: %ld\n", cnt);
8493 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8494 trace_seq_printf(s, "overrun: %ld\n", cnt);
8496 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8497 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8499 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8500 trace_seq_printf(s, "bytes: %ld\n", cnt);
8502 if (trace_clocks[tr->clock_id].in_ns) {
8503 /* local or global for trace_clock */
8504 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8505 usec_rem = do_div(t, USEC_PER_SEC);
8506 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8509 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8510 usec_rem = do_div(t, USEC_PER_SEC);
8511 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8513 /* counter or tsc mode for trace_clock */
8514 trace_seq_printf(s, "oldest event ts: %llu\n",
8515 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8517 trace_seq_printf(s, "now ts: %llu\n",
8518 ring_buffer_time_stamp(trace_buf->buffer));
8521 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8522 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8524 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8525 trace_seq_printf(s, "read events: %ld\n", cnt);
8527 count = simple_read_from_buffer(ubuf, count, ppos,
8528 s->buffer, trace_seq_used(s));
8535 static const struct file_operations tracing_stats_fops = {
8536 .open = tracing_open_generic_tr,
8537 .read = tracing_stats_read,
8538 .llseek = generic_file_llseek,
8539 .release = tracing_release_generic_tr,
8542 #ifdef CONFIG_DYNAMIC_FTRACE
8545 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8546 size_t cnt, loff_t *ppos)
8552 /* 256 should be plenty to hold the amount needed */
8553 buf = kmalloc(256, GFP_KERNEL);
8557 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8558 ftrace_update_tot_cnt,
8559 ftrace_number_of_pages,
8560 ftrace_number_of_groups);
8562 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8567 static const struct file_operations tracing_dyn_info_fops = {
8568 .open = tracing_open_generic,
8569 .read = tracing_read_dyn_info,
8570 .llseek = generic_file_llseek,
8572 #endif /* CONFIG_DYNAMIC_FTRACE */
8574 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8576 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8577 struct trace_array *tr, struct ftrace_probe_ops *ops,
8580 tracing_snapshot_instance(tr);
8584 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8585 struct trace_array *tr, struct ftrace_probe_ops *ops,
8588 struct ftrace_func_mapper *mapper = data;
8592 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8602 tracing_snapshot_instance(tr);
8606 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8607 struct ftrace_probe_ops *ops, void *data)
8609 struct ftrace_func_mapper *mapper = data;
8612 seq_printf(m, "%ps:", (void *)ip);
8614 seq_puts(m, "snapshot");
8617 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8620 seq_printf(m, ":count=%ld\n", *count);
8622 seq_puts(m, ":unlimited\n");
8628 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8629 unsigned long ip, void *init_data, void **data)
8631 struct ftrace_func_mapper *mapper = *data;
8634 mapper = allocate_ftrace_func_mapper();
8640 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8644 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8645 unsigned long ip, void *data)
8647 struct ftrace_func_mapper *mapper = data;
8652 free_ftrace_func_mapper(mapper, NULL);
8656 ftrace_func_mapper_remove_ip(mapper, ip);
8659 static struct ftrace_probe_ops snapshot_probe_ops = {
8660 .func = ftrace_snapshot,
8661 .print = ftrace_snapshot_print,
8664 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8665 .func = ftrace_count_snapshot,
8666 .print = ftrace_snapshot_print,
8667 .init = ftrace_snapshot_init,
8668 .free = ftrace_snapshot_free,
8672 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8673 char *glob, char *cmd, char *param, int enable)
8675 struct ftrace_probe_ops *ops;
8676 void *count = (void *)-1;
8683 /* hash funcs only work with set_ftrace_filter */
8687 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8690 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8695 number = strsep(¶m, ":");
8697 if (!strlen(number))
8701 * We use the callback data field (which is a pointer)
8704 ret = kstrtoul(number, 0, (unsigned long *)&count);
8709 ret = tracing_alloc_snapshot_instance(tr);
8713 ret = register_ftrace_function_probe(glob, tr, ops, count);
8716 return ret < 0 ? ret : 0;
8719 static struct ftrace_func_command ftrace_snapshot_cmd = {
8721 .func = ftrace_trace_snapshot_callback,
8724 static __init int register_snapshot_cmd(void)
8726 return register_ftrace_command(&ftrace_snapshot_cmd);
8729 static inline __init int register_snapshot_cmd(void) { return 0; }
8730 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8732 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8734 if (WARN_ON(!tr->dir))
8735 return ERR_PTR(-ENODEV);
8737 /* Top directory uses NULL as the parent */
8738 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8741 /* All sub buffers have a descriptor */
8745 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8747 struct dentry *d_tracer;
8750 return tr->percpu_dir;
8752 d_tracer = tracing_get_dentry(tr);
8753 if (IS_ERR(d_tracer))
8756 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8758 MEM_FAIL(!tr->percpu_dir,
8759 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8761 return tr->percpu_dir;
8764 static struct dentry *
8765 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8766 void *data, long cpu, const struct file_operations *fops)
8768 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8770 if (ret) /* See tracing_get_cpu() */
8771 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8776 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8778 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8779 struct dentry *d_cpu;
8780 char cpu_dir[30]; /* 30 characters should be more than enough */
8785 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8786 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8788 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8792 /* per cpu trace_pipe */
8793 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8794 tr, cpu, &tracing_pipe_fops);
8797 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8798 tr, cpu, &tracing_fops);
8800 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8801 tr, cpu, &tracing_buffers_fops);
8803 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8804 tr, cpu, &tracing_stats_fops);
8806 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8807 tr, cpu, &tracing_entries_fops);
8809 #ifdef CONFIG_TRACER_SNAPSHOT
8810 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8811 tr, cpu, &snapshot_fops);
8813 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8814 tr, cpu, &snapshot_raw_fops);
8818 #ifdef CONFIG_FTRACE_SELFTEST
8819 /* Let selftest have access to static functions in this file */
8820 #include "trace_selftest.c"
8824 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8827 struct trace_option_dentry *topt = filp->private_data;
8830 if (topt->flags->val & topt->opt->bit)
8835 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8839 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8842 struct trace_option_dentry *topt = filp->private_data;
8846 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8850 if (val != 0 && val != 1)
8853 if (!!(topt->flags->val & topt->opt->bit) != val) {
8854 mutex_lock(&trace_types_lock);
8855 ret = __set_tracer_option(topt->tr, topt->flags,
8857 mutex_unlock(&trace_types_lock);
8868 static const struct file_operations trace_options_fops = {
8869 .open = tracing_open_generic,
8870 .read = trace_options_read,
8871 .write = trace_options_write,
8872 .llseek = generic_file_llseek,
8876 * In order to pass in both the trace_array descriptor as well as the index
8877 * to the flag that the trace option file represents, the trace_array
8878 * has a character array of trace_flags_index[], which holds the index
8879 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8880 * The address of this character array is passed to the flag option file
8881 * read/write callbacks.
8883 * In order to extract both the index and the trace_array descriptor,
8884 * get_tr_index() uses the following algorithm.
8888 * As the pointer itself contains the address of the index (remember
8891 * Then to get the trace_array descriptor, by subtracting that index
8892 * from the ptr, we get to the start of the index itself.
8894 * ptr - idx == &index[0]
8896 * Then a simple container_of() from that pointer gets us to the
8897 * trace_array descriptor.
8899 static void get_tr_index(void *data, struct trace_array **ptr,
8900 unsigned int *pindex)
8902 *pindex = *(unsigned char *)data;
8904 *ptr = container_of(data - *pindex, struct trace_array,
8909 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8912 void *tr_index = filp->private_data;
8913 struct trace_array *tr;
8917 get_tr_index(tr_index, &tr, &index);
8919 if (tr->trace_flags & (1 << index))
8924 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8928 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8931 void *tr_index = filp->private_data;
8932 struct trace_array *tr;
8937 get_tr_index(tr_index, &tr, &index);
8939 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8943 if (val != 0 && val != 1)
8946 mutex_lock(&event_mutex);
8947 mutex_lock(&trace_types_lock);
8948 ret = set_tracer_flag(tr, 1 << index, val);
8949 mutex_unlock(&trace_types_lock);
8950 mutex_unlock(&event_mutex);
8960 static const struct file_operations trace_options_core_fops = {
8961 .open = tracing_open_generic,
8962 .read = trace_options_core_read,
8963 .write = trace_options_core_write,
8964 .llseek = generic_file_llseek,
8967 struct dentry *trace_create_file(const char *name,
8969 struct dentry *parent,
8971 const struct file_operations *fops)
8975 ret = tracefs_create_file(name, mode, parent, data, fops);
8977 pr_warn("Could not create tracefs '%s' entry\n", name);
8983 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8985 struct dentry *d_tracer;
8990 d_tracer = tracing_get_dentry(tr);
8991 if (IS_ERR(d_tracer))
8994 tr->options = tracefs_create_dir("options", d_tracer);
8996 pr_warn("Could not create tracefs directory 'options'\n");
9004 create_trace_option_file(struct trace_array *tr,
9005 struct trace_option_dentry *topt,
9006 struct tracer_flags *flags,
9007 struct tracer_opt *opt)
9009 struct dentry *t_options;
9011 t_options = trace_options_init_dentry(tr);
9015 topt->flags = flags;
9019 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
9020 t_options, topt, &trace_options_fops);
9025 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
9027 struct trace_option_dentry *topts;
9028 struct trace_options *tr_topts;
9029 struct tracer_flags *flags;
9030 struct tracer_opt *opts;
9037 flags = tracer->flags;
9039 if (!flags || !flags->opts)
9043 * If this is an instance, only create flags for tracers
9044 * the instance may have.
9046 if (!trace_ok_for_array(tracer, tr))
9049 for (i = 0; i < tr->nr_topts; i++) {
9050 /* Make sure there's no duplicate flags. */
9051 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
9057 for (cnt = 0; opts[cnt].name; cnt++)
9060 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
9064 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
9071 tr->topts = tr_topts;
9072 tr->topts[tr->nr_topts].tracer = tracer;
9073 tr->topts[tr->nr_topts].topts = topts;
9076 for (cnt = 0; opts[cnt].name; cnt++) {
9077 create_trace_option_file(tr, &topts[cnt], flags,
9079 MEM_FAIL(topts[cnt].entry == NULL,
9080 "Failed to create trace option: %s",
9085 static struct dentry *
9086 create_trace_option_core_file(struct trace_array *tr,
9087 const char *option, long index)
9089 struct dentry *t_options;
9091 t_options = trace_options_init_dentry(tr);
9095 return trace_create_file(option, TRACE_MODE_WRITE, t_options,
9096 (void *)&tr->trace_flags_index[index],
9097 &trace_options_core_fops);
9100 static void create_trace_options_dir(struct trace_array *tr)
9102 struct dentry *t_options;
9103 bool top_level = tr == &global_trace;
9106 t_options = trace_options_init_dentry(tr);
9110 for (i = 0; trace_options[i]; i++) {
9112 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
9113 create_trace_option_core_file(tr, trace_options[i], i);
9118 rb_simple_read(struct file *filp, char __user *ubuf,
9119 size_t cnt, loff_t *ppos)
9121 struct trace_array *tr = filp->private_data;
9125 r = tracer_tracing_is_on(tr);
9126 r = sprintf(buf, "%d\n", r);
9128 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9132 rb_simple_write(struct file *filp, const char __user *ubuf,
9133 size_t cnt, loff_t *ppos)
9135 struct trace_array *tr = filp->private_data;
9136 struct trace_buffer *buffer = tr->array_buffer.buffer;
9140 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9145 mutex_lock(&trace_types_lock);
9146 if (!!val == tracer_tracing_is_on(tr)) {
9147 val = 0; /* do nothing */
9149 tracer_tracing_on(tr);
9150 if (tr->current_trace->start)
9151 tr->current_trace->start(tr);
9153 tracer_tracing_off(tr);
9154 if (tr->current_trace->stop)
9155 tr->current_trace->stop(tr);
9156 /* Wake up any waiters */
9157 ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
9159 mutex_unlock(&trace_types_lock);
9167 static const struct file_operations rb_simple_fops = {
9168 .open = tracing_open_generic_tr,
9169 .read = rb_simple_read,
9170 .write = rb_simple_write,
9171 .release = tracing_release_generic_tr,
9172 .llseek = default_llseek,
9176 buffer_percent_read(struct file *filp, char __user *ubuf,
9177 size_t cnt, loff_t *ppos)
9179 struct trace_array *tr = filp->private_data;
9183 r = tr->buffer_percent;
9184 r = sprintf(buf, "%d\n", r);
9186 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9190 buffer_percent_write(struct file *filp, const char __user *ubuf,
9191 size_t cnt, loff_t *ppos)
9193 struct trace_array *tr = filp->private_data;
9197 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9204 tr->buffer_percent = val;
9211 static const struct file_operations buffer_percent_fops = {
9212 .open = tracing_open_generic_tr,
9213 .read = buffer_percent_read,
9214 .write = buffer_percent_write,
9215 .release = tracing_release_generic_tr,
9216 .llseek = default_llseek,
9219 static struct dentry *trace_instance_dir;
9222 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9225 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9227 enum ring_buffer_flags rb_flags;
9229 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9233 buf->buffer = ring_buffer_alloc(size, rb_flags);
9237 buf->data = alloc_percpu(struct trace_array_cpu);
9239 ring_buffer_free(buf->buffer);
9244 /* Allocate the first page for all buffers */
9245 set_buffer_entries(&tr->array_buffer,
9246 ring_buffer_size(tr->array_buffer.buffer, 0));
9251 static void free_trace_buffer(struct array_buffer *buf)
9254 ring_buffer_free(buf->buffer);
9256 free_percpu(buf->data);
9261 static int allocate_trace_buffers(struct trace_array *tr, int size)
9265 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9269 #ifdef CONFIG_TRACER_MAX_TRACE
9270 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9271 allocate_snapshot ? size : 1);
9272 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9273 free_trace_buffer(&tr->array_buffer);
9276 tr->allocated_snapshot = allocate_snapshot;
9278 allocate_snapshot = false;
9284 static void free_trace_buffers(struct trace_array *tr)
9289 free_trace_buffer(&tr->array_buffer);
9291 #ifdef CONFIG_TRACER_MAX_TRACE
9292 free_trace_buffer(&tr->max_buffer);
9296 static void init_trace_flags_index(struct trace_array *tr)
9300 /* Used by the trace options files */
9301 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9302 tr->trace_flags_index[i] = i;
9305 static void __update_tracer_options(struct trace_array *tr)
9309 for (t = trace_types; t; t = t->next)
9310 add_tracer_options(tr, t);
9313 static void update_tracer_options(struct trace_array *tr)
9315 mutex_lock(&trace_types_lock);
9316 tracer_options_updated = true;
9317 __update_tracer_options(tr);
9318 mutex_unlock(&trace_types_lock);
9321 /* Must have trace_types_lock held */
9322 struct trace_array *trace_array_find(const char *instance)
9324 struct trace_array *tr, *found = NULL;
9326 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9327 if (tr->name && strcmp(tr->name, instance) == 0) {
9336 struct trace_array *trace_array_find_get(const char *instance)
9338 struct trace_array *tr;
9340 mutex_lock(&trace_types_lock);
9341 tr = trace_array_find(instance);
9344 mutex_unlock(&trace_types_lock);
9349 static int trace_array_create_dir(struct trace_array *tr)
9353 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9357 ret = event_trace_add_tracer(tr->dir, tr);
9359 tracefs_remove(tr->dir);
9363 init_tracer_tracefs(tr, tr->dir);
9364 __update_tracer_options(tr);
9369 static struct trace_array *trace_array_create(const char *name)
9371 struct trace_array *tr;
9375 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9377 return ERR_PTR(ret);
9379 tr->name = kstrdup(name, GFP_KERNEL);
9383 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9386 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9388 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9390 raw_spin_lock_init(&tr->start_lock);
9392 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9394 tr->current_trace = &nop_trace;
9396 INIT_LIST_HEAD(&tr->systems);
9397 INIT_LIST_HEAD(&tr->events);
9398 INIT_LIST_HEAD(&tr->hist_vars);
9399 INIT_LIST_HEAD(&tr->err_log);
9401 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9404 if (ftrace_allocate_ftrace_ops(tr) < 0)
9407 ftrace_init_trace_array(tr);
9409 init_trace_flags_index(tr);
9411 if (trace_instance_dir) {
9412 ret = trace_array_create_dir(tr);
9416 __trace_early_add_events(tr);
9418 list_add(&tr->list, &ftrace_trace_arrays);
9425 ftrace_free_ftrace_ops(tr);
9426 free_trace_buffers(tr);
9427 free_cpumask_var(tr->tracing_cpumask);
9431 return ERR_PTR(ret);
9434 static int instance_mkdir(const char *name)
9436 struct trace_array *tr;
9439 mutex_lock(&event_mutex);
9440 mutex_lock(&trace_types_lock);
9443 if (trace_array_find(name))
9446 tr = trace_array_create(name);
9448 ret = PTR_ERR_OR_ZERO(tr);
9451 mutex_unlock(&trace_types_lock);
9452 mutex_unlock(&event_mutex);
9457 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9458 * @name: The name of the trace array to be looked up/created.
9460 * Returns pointer to trace array with given name.
9461 * NULL, if it cannot be created.
9463 * NOTE: This function increments the reference counter associated with the
9464 * trace array returned. This makes sure it cannot be freed while in use.
9465 * Use trace_array_put() once the trace array is no longer needed.
9466 * If the trace_array is to be freed, trace_array_destroy() needs to
9467 * be called after the trace_array_put(), or simply let user space delete
9468 * it from the tracefs instances directory. But until the
9469 * trace_array_put() is called, user space can not delete it.
9472 struct trace_array *trace_array_get_by_name(const char *name)
9474 struct trace_array *tr;
9476 mutex_lock(&event_mutex);
9477 mutex_lock(&trace_types_lock);
9479 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9480 if (tr->name && strcmp(tr->name, name) == 0)
9484 tr = trace_array_create(name);
9492 mutex_unlock(&trace_types_lock);
9493 mutex_unlock(&event_mutex);
9496 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9498 static int __remove_instance(struct trace_array *tr)
9502 /* Reference counter for a newly created trace array = 1. */
9503 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9506 list_del(&tr->list);
9508 /* Disable all the flags that were enabled coming in */
9509 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9510 if ((1 << i) & ZEROED_TRACE_FLAGS)
9511 set_tracer_flag(tr, 1 << i, 0);
9514 tracing_set_nop(tr);
9515 clear_ftrace_function_probes(tr);
9516 event_trace_del_tracer(tr);
9517 ftrace_clear_pids(tr);
9518 ftrace_destroy_function_files(tr);
9519 tracefs_remove(tr->dir);
9520 free_percpu(tr->last_func_repeats);
9521 free_trace_buffers(tr);
9522 clear_tracing_err_log(tr);
9524 for (i = 0; i < tr->nr_topts; i++) {
9525 kfree(tr->topts[i].topts);
9529 free_cpumask_var(tr->tracing_cpumask);
9536 int trace_array_destroy(struct trace_array *this_tr)
9538 struct trace_array *tr;
9544 mutex_lock(&event_mutex);
9545 mutex_lock(&trace_types_lock);
9549 /* Making sure trace array exists before destroying it. */
9550 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9551 if (tr == this_tr) {
9552 ret = __remove_instance(tr);
9557 mutex_unlock(&trace_types_lock);
9558 mutex_unlock(&event_mutex);
9562 EXPORT_SYMBOL_GPL(trace_array_destroy);
9564 static int instance_rmdir(const char *name)
9566 struct trace_array *tr;
9569 mutex_lock(&event_mutex);
9570 mutex_lock(&trace_types_lock);
9573 tr = trace_array_find(name);
9575 ret = __remove_instance(tr);
9577 mutex_unlock(&trace_types_lock);
9578 mutex_unlock(&event_mutex);
9583 static __init void create_trace_instances(struct dentry *d_tracer)
9585 struct trace_array *tr;
9587 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9590 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9593 mutex_lock(&event_mutex);
9594 mutex_lock(&trace_types_lock);
9596 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9599 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9600 "Failed to create instance directory\n"))
9604 mutex_unlock(&trace_types_lock);
9605 mutex_unlock(&event_mutex);
9609 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9611 struct trace_event_file *file;
9614 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9615 tr, &show_traces_fops);
9617 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9618 tr, &set_tracer_fops);
9620 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9621 tr, &tracing_cpumask_fops);
9623 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9624 tr, &tracing_iter_fops);
9626 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9629 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9630 tr, &tracing_pipe_fops);
9632 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9633 tr, &tracing_entries_fops);
9635 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9636 tr, &tracing_total_entries_fops);
9638 trace_create_file("free_buffer", 0200, d_tracer,
9639 tr, &tracing_free_buffer_fops);
9641 trace_create_file("trace_marker", 0220, d_tracer,
9642 tr, &tracing_mark_fops);
9644 file = __find_event_file(tr, "ftrace", "print");
9645 if (file && file->dir)
9646 trace_create_file("trigger", TRACE_MODE_WRITE, file->dir,
9647 file, &event_trigger_fops);
9648 tr->trace_marker_file = file;
9650 trace_create_file("trace_marker_raw", 0220, d_tracer,
9651 tr, &tracing_mark_raw_fops);
9653 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9656 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9657 tr, &rb_simple_fops);
9659 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9660 &trace_time_stamp_mode_fops);
9662 tr->buffer_percent = 50;
9664 trace_create_file("buffer_percent", TRACE_MODE_READ, d_tracer,
9665 tr, &buffer_percent_fops);
9667 create_trace_options_dir(tr);
9669 #ifdef CONFIG_TRACER_MAX_TRACE
9670 trace_create_maxlat_file(tr, d_tracer);
9673 if (ftrace_create_function_files(tr, d_tracer))
9674 MEM_FAIL(1, "Could not allocate function filter files");
9676 #ifdef CONFIG_TRACER_SNAPSHOT
9677 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9678 tr, &snapshot_fops);
9681 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9682 tr, &tracing_err_log_fops);
9684 for_each_tracing_cpu(cpu)
9685 tracing_init_tracefs_percpu(tr, cpu);
9687 ftrace_init_tracefs(tr, d_tracer);
9690 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9692 struct vfsmount *mnt;
9693 struct file_system_type *type;
9696 * To maintain backward compatibility for tools that mount
9697 * debugfs to get to the tracing facility, tracefs is automatically
9698 * mounted to the debugfs/tracing directory.
9700 type = get_fs_type("tracefs");
9703 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9704 put_filesystem(type);
9713 * tracing_init_dentry - initialize top level trace array
9715 * This is called when creating files or directories in the tracing
9716 * directory. It is called via fs_initcall() by any of the boot up code
9717 * and expects to return the dentry of the top level tracing directory.
9719 int tracing_init_dentry(void)
9721 struct trace_array *tr = &global_trace;
9723 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9724 pr_warn("Tracing disabled due to lockdown\n");
9728 /* The top level trace array uses NULL as parent */
9732 if (WARN_ON(!tracefs_initialized()))
9736 * As there may still be users that expect the tracing
9737 * files to exist in debugfs/tracing, we must automount
9738 * the tracefs file system there, so older tools still
9739 * work with the newer kernel.
9741 tr->dir = debugfs_create_automount("tracing", NULL,
9742 trace_automount, NULL);
9747 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9748 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9750 static struct workqueue_struct *eval_map_wq __initdata;
9751 static struct work_struct eval_map_work __initdata;
9752 static struct work_struct tracerfs_init_work __initdata;
9754 static void __init eval_map_work_func(struct work_struct *work)
9758 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9759 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9762 static int __init trace_eval_init(void)
9764 INIT_WORK(&eval_map_work, eval_map_work_func);
9766 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9768 pr_err("Unable to allocate eval_map_wq\n");
9770 eval_map_work_func(&eval_map_work);
9774 queue_work(eval_map_wq, &eval_map_work);
9778 subsys_initcall(trace_eval_init);
9780 static int __init trace_eval_sync(void)
9782 /* Make sure the eval map updates are finished */
9784 destroy_workqueue(eval_map_wq);
9788 late_initcall_sync(trace_eval_sync);
9791 #ifdef CONFIG_MODULES
9792 static void trace_module_add_evals(struct module *mod)
9794 if (!mod->num_trace_evals)
9798 * Modules with bad taint do not have events created, do
9799 * not bother with enums either.
9801 if (trace_module_has_bad_taint(mod))
9804 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9807 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9808 static void trace_module_remove_evals(struct module *mod)
9810 union trace_eval_map_item *map;
9811 union trace_eval_map_item **last = &trace_eval_maps;
9813 if (!mod->num_trace_evals)
9816 mutex_lock(&trace_eval_mutex);
9818 map = trace_eval_maps;
9821 if (map->head.mod == mod)
9823 map = trace_eval_jmp_to_tail(map);
9824 last = &map->tail.next;
9825 map = map->tail.next;
9830 *last = trace_eval_jmp_to_tail(map)->tail.next;
9833 mutex_unlock(&trace_eval_mutex);
9836 static inline void trace_module_remove_evals(struct module *mod) { }
9837 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9839 static int trace_module_notify(struct notifier_block *self,
9840 unsigned long val, void *data)
9842 struct module *mod = data;
9845 case MODULE_STATE_COMING:
9846 trace_module_add_evals(mod);
9848 case MODULE_STATE_GOING:
9849 trace_module_remove_evals(mod);
9856 static struct notifier_block trace_module_nb = {
9857 .notifier_call = trace_module_notify,
9860 #endif /* CONFIG_MODULES */
9862 static __init void tracer_init_tracefs_work_func(struct work_struct *work)
9867 init_tracer_tracefs(&global_trace, NULL);
9868 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9870 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
9871 &global_trace, &tracing_thresh_fops);
9873 trace_create_file("README", TRACE_MODE_READ, NULL,
9874 NULL, &tracing_readme_fops);
9876 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
9877 NULL, &tracing_saved_cmdlines_fops);
9879 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
9880 NULL, &tracing_saved_cmdlines_size_fops);
9882 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
9883 NULL, &tracing_saved_tgids_fops);
9885 trace_create_eval_file(NULL);
9887 #ifdef CONFIG_MODULES
9888 register_module_notifier(&trace_module_nb);
9891 #ifdef CONFIG_DYNAMIC_FTRACE
9892 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
9893 NULL, &tracing_dyn_info_fops);
9896 create_trace_instances(NULL);
9898 update_tracer_options(&global_trace);
9901 static __init int tracer_init_tracefs(void)
9905 trace_access_lock_init();
9907 ret = tracing_init_dentry();
9912 INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
9913 queue_work(eval_map_wq, &tracerfs_init_work);
9915 tracer_init_tracefs_work_func(NULL);
9918 rv_init_interface();
9923 fs_initcall(tracer_init_tracefs);
9925 static int trace_die_panic_handler(struct notifier_block *self,
9926 unsigned long ev, void *unused);
9928 static struct notifier_block trace_panic_notifier = {
9929 .notifier_call = trace_die_panic_handler,
9930 .priority = INT_MAX - 1,
9933 static struct notifier_block trace_die_notifier = {
9934 .notifier_call = trace_die_panic_handler,
9935 .priority = INT_MAX - 1,
9939 * The idea is to execute the following die/panic callback early, in order
9940 * to avoid showing irrelevant information in the trace (like other panic
9941 * notifier functions); we are the 2nd to run, after hung_task/rcu_stall
9942 * warnings get disabled (to prevent potential log flooding).
9944 static int trace_die_panic_handler(struct notifier_block *self,
9945 unsigned long ev, void *unused)
9947 if (!ftrace_dump_on_oops)
9950 /* The die notifier requires DIE_OOPS to trigger */
9951 if (self == &trace_die_notifier && ev != DIE_OOPS)
9954 ftrace_dump(ftrace_dump_on_oops);
9960 * printk is set to max of 1024, we really don't need it that big.
9961 * Nothing should be printing 1000 characters anyway.
9963 #define TRACE_MAX_PRINT 1000
9966 * Define here KERN_TRACE so that we have one place to modify
9967 * it if we decide to change what log level the ftrace dump
9970 #define KERN_TRACE KERN_EMERG
9973 trace_printk_seq(struct trace_seq *s)
9975 /* Probably should print a warning here. */
9976 if (s->seq.len >= TRACE_MAX_PRINT)
9977 s->seq.len = TRACE_MAX_PRINT;
9980 * More paranoid code. Although the buffer size is set to
9981 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9982 * an extra layer of protection.
9984 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9985 s->seq.len = s->seq.size - 1;
9987 /* should be zero ended, but we are paranoid. */
9988 s->buffer[s->seq.len] = 0;
9990 printk(KERN_TRACE "%s", s->buffer);
9995 void trace_init_global_iter(struct trace_iterator *iter)
9997 iter->tr = &global_trace;
9998 iter->trace = iter->tr->current_trace;
9999 iter->cpu_file = RING_BUFFER_ALL_CPUS;
10000 iter->array_buffer = &global_trace.array_buffer;
10002 if (iter->trace && iter->trace->open)
10003 iter->trace->open(iter);
10005 /* Annotate start of buffers if we had overruns */
10006 if (ring_buffer_overruns(iter->array_buffer->buffer))
10007 iter->iter_flags |= TRACE_FILE_ANNOTATE;
10009 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
10010 if (trace_clocks[iter->tr->clock_id].in_ns)
10011 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
10013 /* Can not use kmalloc for iter.temp and iter.fmt */
10014 iter->temp = static_temp_buf;
10015 iter->temp_size = STATIC_TEMP_BUF_SIZE;
10016 iter->fmt = static_fmt_buf;
10017 iter->fmt_size = STATIC_FMT_BUF_SIZE;
10020 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
10022 /* use static because iter can be a bit big for the stack */
10023 static struct trace_iterator iter;
10024 static atomic_t dump_running;
10025 struct trace_array *tr = &global_trace;
10026 unsigned int old_userobj;
10027 unsigned long flags;
10030 /* Only allow one dump user at a time. */
10031 if (atomic_inc_return(&dump_running) != 1) {
10032 atomic_dec(&dump_running);
10037 * Always turn off tracing when we dump.
10038 * We don't need to show trace output of what happens
10039 * between multiple crashes.
10041 * If the user does a sysrq-z, then they can re-enable
10042 * tracing with echo 1 > tracing_on.
10046 local_irq_save(flags);
10048 /* Simulate the iterator */
10049 trace_init_global_iter(&iter);
10051 for_each_tracing_cpu(cpu) {
10052 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10055 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10057 /* don't look at user memory in panic mode */
10058 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10060 switch (oops_dump_mode) {
10062 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10065 iter.cpu_file = raw_smp_processor_id();
10070 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
10071 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10074 printk(KERN_TRACE "Dumping ftrace buffer:\n");
10076 /* Did function tracer already get disabled? */
10077 if (ftrace_is_dead()) {
10078 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
10079 printk("# MAY BE MISSING FUNCTION EVENTS\n");
10083 * We need to stop all tracing on all CPUS to read
10084 * the next buffer. This is a bit expensive, but is
10085 * not done often. We fill all what we can read,
10086 * and then release the locks again.
10089 while (!trace_empty(&iter)) {
10092 printk(KERN_TRACE "---------------------------------\n");
10096 trace_iterator_reset(&iter);
10097 iter.iter_flags |= TRACE_FILE_LAT_FMT;
10099 if (trace_find_next_entry_inc(&iter) != NULL) {
10102 ret = print_trace_line(&iter);
10103 if (ret != TRACE_TYPE_NO_CONSUME)
10104 trace_consume(&iter);
10106 touch_nmi_watchdog();
10108 trace_printk_seq(&iter.seq);
10112 printk(KERN_TRACE " (ftrace buffer empty)\n");
10114 printk(KERN_TRACE "---------------------------------\n");
10117 tr->trace_flags |= old_userobj;
10119 for_each_tracing_cpu(cpu) {
10120 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10122 atomic_dec(&dump_running);
10123 local_irq_restore(flags);
10125 EXPORT_SYMBOL_GPL(ftrace_dump);
10127 #define WRITE_BUFSIZE 4096
10129 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
10130 size_t count, loff_t *ppos,
10131 int (*createfn)(const char *))
10133 char *kbuf, *buf, *tmp;
10138 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
10142 while (done < count) {
10143 size = count - done;
10145 if (size >= WRITE_BUFSIZE)
10146 size = WRITE_BUFSIZE - 1;
10148 if (copy_from_user(kbuf, buffer + done, size)) {
10155 tmp = strchr(buf, '\n');
10158 size = tmp - buf + 1;
10160 size = strlen(buf);
10161 if (done + size < count) {
10164 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10165 pr_warn("Line length is too long: Should be less than %d\n",
10166 WRITE_BUFSIZE - 2);
10173 /* Remove comments */
10174 tmp = strchr(buf, '#');
10179 ret = createfn(buf);
10184 } while (done < count);
10194 #ifdef CONFIG_TRACER_MAX_TRACE
10195 __init static bool tr_needs_alloc_snapshot(const char *name)
10198 int len = strlen(name);
10201 if (!boot_snapshot_index)
10204 if (strncmp(name, boot_snapshot_info, len) == 0 &&
10205 boot_snapshot_info[len] == '\t')
10208 test = kmalloc(strlen(name) + 3, GFP_KERNEL);
10212 sprintf(test, "\t%s\t", name);
10213 ret = strstr(boot_snapshot_info, test) == NULL;
10218 __init static void do_allocate_snapshot(const char *name)
10220 if (!tr_needs_alloc_snapshot(name))
10224 * When allocate_snapshot is set, the next call to
10225 * allocate_trace_buffers() (called by trace_array_get_by_name())
10226 * will allocate the snapshot buffer. That will alse clear
10229 allocate_snapshot = true;
10232 static inline void do_allocate_snapshot(const char *name) { }
10235 __init static void enable_instances(void)
10237 struct trace_array *tr;
10242 /* A tab is always appended */
10243 boot_instance_info[boot_instance_index - 1] = '\0';
10244 str = boot_instance_info;
10246 while ((curr_str = strsep(&str, "\t"))) {
10248 tok = strsep(&curr_str, ",");
10250 if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
10251 do_allocate_snapshot(tok);
10253 tr = trace_array_get_by_name(tok);
10255 pr_warn("Failed to create instance buffer %s\n", curr_str);
10258 /* Allow user space to delete it */
10259 trace_array_put(tr);
10261 while ((tok = strsep(&curr_str, ","))) {
10262 early_enable_events(tr, tok, true);
10267 __init static int tracer_alloc_buffers(void)
10273 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10274 pr_warn("Tracing disabled due to lockdown\n");
10279 * Make sure we don't accidentally add more trace options
10280 * than we have bits for.
10282 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10284 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10287 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10288 goto out_free_buffer_mask;
10290 /* Only allocate trace_printk buffers if a trace_printk exists */
10291 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10292 /* Must be called before global_trace.buffer is allocated */
10293 trace_printk_init_buffers();
10295 /* To save memory, keep the ring buffer size to its minimum */
10296 if (ring_buffer_expanded)
10297 ring_buf_size = trace_buf_size;
10301 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10302 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10304 raw_spin_lock_init(&global_trace.start_lock);
10307 * The prepare callbacks allocates some memory for the ring buffer. We
10308 * don't free the buffer if the CPU goes down. If we were to free
10309 * the buffer, then the user would lose any trace that was in the
10310 * buffer. The memory will be removed once the "instance" is removed.
10312 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10313 "trace/RB:prepare", trace_rb_cpu_prepare,
10316 goto out_free_cpumask;
10317 /* Used for event triggers */
10319 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10321 goto out_rm_hp_state;
10323 if (trace_create_savedcmd() < 0)
10324 goto out_free_temp_buffer;
10326 /* TODO: make the number of buffers hot pluggable with CPUS */
10327 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10328 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10329 goto out_free_savedcmd;
10332 if (global_trace.buffer_disabled)
10335 if (trace_boot_clock) {
10336 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10338 pr_warn("Trace clock %s not defined, going back to default\n",
10343 * register_tracer() might reference current_trace, so it
10344 * needs to be set before we register anything. This is
10345 * just a bootstrap of current_trace anyway.
10347 global_trace.current_trace = &nop_trace;
10349 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10351 ftrace_init_global_array_ops(&global_trace);
10353 init_trace_flags_index(&global_trace);
10355 register_tracer(&nop_trace);
10357 /* Function tracing may start here (via kernel command line) */
10358 init_function_trace();
10360 /* All seems OK, enable tracing */
10361 tracing_disabled = 0;
10363 atomic_notifier_chain_register(&panic_notifier_list,
10364 &trace_panic_notifier);
10366 register_die_notifier(&trace_die_notifier);
10368 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10370 INIT_LIST_HEAD(&global_trace.systems);
10371 INIT_LIST_HEAD(&global_trace.events);
10372 INIT_LIST_HEAD(&global_trace.hist_vars);
10373 INIT_LIST_HEAD(&global_trace.err_log);
10374 list_add(&global_trace.list, &ftrace_trace_arrays);
10376 apply_trace_boot_options();
10378 register_snapshot_cmd();
10385 free_saved_cmdlines_buffer(savedcmd);
10386 out_free_temp_buffer:
10387 ring_buffer_free(temp_buffer);
10389 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10391 free_cpumask_var(global_trace.tracing_cpumask);
10392 out_free_buffer_mask:
10393 free_cpumask_var(tracing_buffer_mask);
10398 void __init ftrace_boot_snapshot(void)
10400 #ifdef CONFIG_TRACER_MAX_TRACE
10401 struct trace_array *tr;
10403 if (!snapshot_at_boot)
10406 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
10407 if (!tr->allocated_snapshot)
10410 tracing_snapshot_instance(tr);
10411 trace_array_puts(tr, "** Boot snapshot taken **\n");
10416 void __init early_trace_init(void)
10418 if (tracepoint_printk) {
10419 tracepoint_print_iter =
10420 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10421 if (MEM_FAIL(!tracepoint_print_iter,
10422 "Failed to allocate trace iterator\n"))
10423 tracepoint_printk = 0;
10425 static_key_enable(&tracepoint_printk_key.key);
10427 tracer_alloc_buffers();
10432 void __init trace_init(void)
10434 trace_event_init();
10436 if (boot_instance_index)
10437 enable_instances();
10440 __init static void clear_boot_tracer(void)
10443 * The default tracer at boot buffer is an init section.
10444 * This function is called in lateinit. If we did not
10445 * find the boot tracer, then clear it out, to prevent
10446 * later registration from accessing the buffer that is
10447 * about to be freed.
10449 if (!default_bootup_tracer)
10452 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10453 default_bootup_tracer);
10454 default_bootup_tracer = NULL;
10457 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10458 __init static void tracing_set_default_clock(void)
10460 /* sched_clock_stable() is determined in late_initcall */
10461 if (!trace_boot_clock && !sched_clock_stable()) {
10462 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10463 pr_warn("Can not set tracing clock due to lockdown\n");
10467 printk(KERN_WARNING
10468 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10469 "If you want to keep using the local clock, then add:\n"
10470 " \"trace_clock=local\"\n"
10471 "on the kernel command line\n");
10472 tracing_set_clock(&global_trace, "global");
10476 static inline void tracing_set_default_clock(void) { }
10479 __init static int late_trace_init(void)
10481 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10482 static_key_disable(&tracepoint_printk_key.key);
10483 tracepoint_printk = 0;
10486 tracing_set_default_clock();
10487 clear_boot_tracer();
10491 late_initcall_sync(late_trace_init);