1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/panic_notifier.h>
43 #include <linux/poll.h>
44 #include <linux/nmi.h>
46 #include <linux/trace.h>
47 #include <linux/sched/clock.h>
48 #include <linux/sched/rt.h>
49 #include <linux/fsnotify.h>
50 #include <linux/irq_work.h>
51 #include <linux/workqueue.h>
54 #include "trace_output.h"
57 * On boot up, the ring buffer is set to the minimum size, so that
58 * we do not waste memory on systems that are not using tracing.
60 bool ring_buffer_expanded;
63 * We need to change this state when a selftest is running.
64 * A selftest will lurk into the ring-buffer to count the
65 * entries inserted during the selftest although some concurrent
66 * insertions into the ring-buffer such as trace_printk could occurred
67 * at the same time, giving false positive or negative results.
69 static bool __read_mostly tracing_selftest_running;
72 * If boot-time tracing including tracers/events via kernel cmdline
73 * is running, we do not want to run SELFTEST.
75 bool __read_mostly tracing_selftest_disabled;
77 #ifdef CONFIG_FTRACE_STARTUP_TEST
78 void __init disable_tracing_selftest(const char *reason)
80 if (!tracing_selftest_disabled) {
81 tracing_selftest_disabled = true;
82 pr_info("Ftrace startup test is disabled due to %s\n", reason);
87 /* Pipe tracepoints to printk */
88 struct trace_iterator *tracepoint_print_iter;
89 int tracepoint_printk;
90 static bool tracepoint_printk_stop_on_boot __initdata;
91 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
93 /* For tracers that don't implement custom flags */
94 static struct tracer_opt dummy_tracer_opt[] = {
99 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
105 * To prevent the comm cache from being overwritten when no
106 * tracing is active, only save the comm when a trace event
109 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
112 * Kill all tracing for good (never come back).
113 * It is initialized to 1 but will turn to zero if the initialization
114 * of the tracer is successful. But that is the only place that sets
117 static int tracing_disabled = 1;
119 cpumask_var_t __read_mostly tracing_buffer_mask;
122 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
124 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
125 * is set, then ftrace_dump is called. This will output the contents
126 * of the ftrace buffers to the console. This is very useful for
127 * capturing traces that lead to crashes and outputing it to a
130 * It is default off, but you can enable it with either specifying
131 * "ftrace_dump_on_oops" in the kernel command line, or setting
132 * /proc/sys/kernel/ftrace_dump_on_oops
133 * Set 1 if you want to dump buffers of all CPUs
134 * Set 2 if you want to dump the buffer of the CPU that triggered oops
137 enum ftrace_dump_mode ftrace_dump_on_oops;
139 /* When set, tracing will stop when a WARN*() is hit */
140 int __disable_trace_on_warning;
142 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
143 /* Map of enums to their values, for "eval_map" file */
144 struct trace_eval_map_head {
146 unsigned long length;
149 union trace_eval_map_item;
151 struct trace_eval_map_tail {
153 * "end" is first and points to NULL as it must be different
154 * than "mod" or "eval_string"
156 union trace_eval_map_item *next;
157 const char *end; /* points to NULL */
160 static DEFINE_MUTEX(trace_eval_mutex);
163 * The trace_eval_maps are saved in an array with two extra elements,
164 * one at the beginning, and one at the end. The beginning item contains
165 * the count of the saved maps (head.length), and the module they
166 * belong to if not built in (head.mod). The ending item contains a
167 * pointer to the next array of saved eval_map items.
169 union trace_eval_map_item {
170 struct trace_eval_map map;
171 struct trace_eval_map_head head;
172 struct trace_eval_map_tail tail;
175 static union trace_eval_map_item *trace_eval_maps;
176 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
178 int tracing_set_tracer(struct trace_array *tr, const char *buf);
179 static void ftrace_trace_userstack(struct trace_array *tr,
180 struct trace_buffer *buffer,
181 unsigned int trace_ctx);
183 #define MAX_TRACER_SIZE 100
184 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
185 static char *default_bootup_tracer;
187 static bool allocate_snapshot;
189 static int __init set_cmdline_ftrace(char *str)
191 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
192 default_bootup_tracer = bootup_tracer_buf;
193 /* We are using ftrace early, expand it */
194 ring_buffer_expanded = true;
197 __setup("ftrace=", set_cmdline_ftrace);
199 static int __init set_ftrace_dump_on_oops(char *str)
201 if (*str++ != '=' || !*str || !strcmp("1", str)) {
202 ftrace_dump_on_oops = DUMP_ALL;
206 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
207 ftrace_dump_on_oops = DUMP_ORIG;
213 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
215 static int __init stop_trace_on_warning(char *str)
217 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
218 __disable_trace_on_warning = 1;
221 __setup("traceoff_on_warning", stop_trace_on_warning);
223 static int __init boot_alloc_snapshot(char *str)
225 allocate_snapshot = true;
226 /* We also need the main ring buffer expanded */
227 ring_buffer_expanded = true;
230 __setup("alloc_snapshot", boot_alloc_snapshot);
233 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
235 static int __init set_trace_boot_options(char *str)
237 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
240 __setup("trace_options=", set_trace_boot_options);
242 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
243 static char *trace_boot_clock __initdata;
245 static int __init set_trace_boot_clock(char *str)
247 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
248 trace_boot_clock = trace_boot_clock_buf;
251 __setup("trace_clock=", set_trace_boot_clock);
253 static int __init set_tracepoint_printk(char *str)
255 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
256 tracepoint_printk = 1;
259 __setup("tp_printk", set_tracepoint_printk);
261 static int __init set_tracepoint_printk_stop(char *str)
263 tracepoint_printk_stop_on_boot = true;
266 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
268 unsigned long long ns2usecs(u64 nsec)
276 trace_process_export(struct trace_export *export,
277 struct ring_buffer_event *event, int flag)
279 struct trace_entry *entry;
280 unsigned int size = 0;
282 if (export->flags & flag) {
283 entry = ring_buffer_event_data(event);
284 size = ring_buffer_event_length(event);
285 export->write(export, entry, size);
289 static DEFINE_MUTEX(ftrace_export_lock);
291 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
293 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
294 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
295 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
297 static inline void ftrace_exports_enable(struct trace_export *export)
299 if (export->flags & TRACE_EXPORT_FUNCTION)
300 static_branch_inc(&trace_function_exports_enabled);
302 if (export->flags & TRACE_EXPORT_EVENT)
303 static_branch_inc(&trace_event_exports_enabled);
305 if (export->flags & TRACE_EXPORT_MARKER)
306 static_branch_inc(&trace_marker_exports_enabled);
309 static inline void ftrace_exports_disable(struct trace_export *export)
311 if (export->flags & TRACE_EXPORT_FUNCTION)
312 static_branch_dec(&trace_function_exports_enabled);
314 if (export->flags & TRACE_EXPORT_EVENT)
315 static_branch_dec(&trace_event_exports_enabled);
317 if (export->flags & TRACE_EXPORT_MARKER)
318 static_branch_dec(&trace_marker_exports_enabled);
321 static void ftrace_exports(struct ring_buffer_event *event, int flag)
323 struct trace_export *export;
325 preempt_disable_notrace();
327 export = rcu_dereference_raw_check(ftrace_exports_list);
329 trace_process_export(export, event, flag);
330 export = rcu_dereference_raw_check(export->next);
333 preempt_enable_notrace();
337 add_trace_export(struct trace_export **list, struct trace_export *export)
339 rcu_assign_pointer(export->next, *list);
341 * We are entering export into the list but another
342 * CPU might be walking that list. We need to make sure
343 * the export->next pointer is valid before another CPU sees
344 * the export pointer included into the list.
346 rcu_assign_pointer(*list, export);
350 rm_trace_export(struct trace_export **list, struct trace_export *export)
352 struct trace_export **p;
354 for (p = list; *p != NULL; p = &(*p)->next)
361 rcu_assign_pointer(*p, (*p)->next);
367 add_ftrace_export(struct trace_export **list, struct trace_export *export)
369 ftrace_exports_enable(export);
371 add_trace_export(list, export);
375 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
379 ret = rm_trace_export(list, export);
380 ftrace_exports_disable(export);
385 int register_ftrace_export(struct trace_export *export)
387 if (WARN_ON_ONCE(!export->write))
390 mutex_lock(&ftrace_export_lock);
392 add_ftrace_export(&ftrace_exports_list, export);
394 mutex_unlock(&ftrace_export_lock);
398 EXPORT_SYMBOL_GPL(register_ftrace_export);
400 int unregister_ftrace_export(struct trace_export *export)
404 mutex_lock(&ftrace_export_lock);
406 ret = rm_ftrace_export(&ftrace_exports_list, export);
408 mutex_unlock(&ftrace_export_lock);
412 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
414 /* trace_flags holds trace_options default values */
415 #define TRACE_DEFAULT_FLAGS \
416 (FUNCTION_DEFAULT_FLAGS | \
417 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
418 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
419 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
420 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
423 /* trace_options that are only supported by global_trace */
424 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
425 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
427 /* trace_flags that are default zero for instances */
428 #define ZEROED_TRACE_FLAGS \
429 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
432 * The global_trace is the descriptor that holds the top-level tracing
433 * buffers for the live tracing.
435 static struct trace_array global_trace = {
436 .trace_flags = TRACE_DEFAULT_FLAGS,
439 LIST_HEAD(ftrace_trace_arrays);
441 int trace_array_get(struct trace_array *this_tr)
443 struct trace_array *tr;
446 mutex_lock(&trace_types_lock);
447 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
454 mutex_unlock(&trace_types_lock);
459 static void __trace_array_put(struct trace_array *this_tr)
461 WARN_ON(!this_tr->ref);
466 * trace_array_put - Decrement the reference counter for this trace array.
467 * @this_tr : pointer to the trace array
469 * NOTE: Use this when we no longer need the trace array returned by
470 * trace_array_get_by_name(). This ensures the trace array can be later
474 void trace_array_put(struct trace_array *this_tr)
479 mutex_lock(&trace_types_lock);
480 __trace_array_put(this_tr);
481 mutex_unlock(&trace_types_lock);
483 EXPORT_SYMBOL_GPL(trace_array_put);
485 int tracing_check_open_get_tr(struct trace_array *tr)
489 ret = security_locked_down(LOCKDOWN_TRACEFS);
493 if (tracing_disabled)
496 if (tr && trace_array_get(tr) < 0)
502 int call_filter_check_discard(struct trace_event_call *call, void *rec,
503 struct trace_buffer *buffer,
504 struct ring_buffer_event *event)
506 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
507 !filter_match_preds(call->filter, rec)) {
508 __trace_event_discard_commit(buffer, event);
515 void trace_free_pid_list(struct trace_pid_list *pid_list)
517 vfree(pid_list->pids);
522 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
523 * @filtered_pids: The list of pids to check
524 * @search_pid: The PID to find in @filtered_pids
526 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
529 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
532 * If pid_max changed after filtered_pids was created, we
533 * by default ignore all pids greater than the previous pid_max.
535 if (search_pid >= filtered_pids->pid_max)
538 return test_bit(search_pid, filtered_pids->pids);
542 * trace_ignore_this_task - should a task be ignored for tracing
543 * @filtered_pids: The list of pids to check
544 * @filtered_no_pids: The list of pids not to be traced
545 * @task: The task that should be ignored if not filtered
547 * Checks if @task should be traced or not from @filtered_pids.
548 * Returns true if @task should *NOT* be traced.
549 * Returns false if @task should be traced.
552 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
553 struct trace_pid_list *filtered_no_pids,
554 struct task_struct *task)
557 * If filtered_no_pids is not empty, and the task's pid is listed
558 * in filtered_no_pids, then return true.
559 * Otherwise, if filtered_pids is empty, that means we can
560 * trace all tasks. If it has content, then only trace pids
561 * within filtered_pids.
564 return (filtered_pids &&
565 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
567 trace_find_filtered_pid(filtered_no_pids, task->pid));
571 * trace_filter_add_remove_task - Add or remove a task from a pid_list
572 * @pid_list: The list to modify
573 * @self: The current task for fork or NULL for exit
574 * @task: The task to add or remove
576 * If adding a task, if @self is defined, the task is only added if @self
577 * is also included in @pid_list. This happens on fork and tasks should
578 * only be added when the parent is listed. If @self is NULL, then the
579 * @task pid will be removed from the list, which would happen on exit
582 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
583 struct task_struct *self,
584 struct task_struct *task)
589 /* For forks, we only add if the forking task is listed */
591 if (!trace_find_filtered_pid(pid_list, self->pid))
595 /* Sorry, but we don't support pid_max changing after setting */
596 if (task->pid >= pid_list->pid_max)
599 /* "self" is set for forks, and NULL for exits */
601 set_bit(task->pid, pid_list->pids);
603 clear_bit(task->pid, pid_list->pids);
607 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
608 * @pid_list: The pid list to show
609 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
610 * @pos: The position of the file
612 * This is used by the seq_file "next" operation to iterate the pids
613 * listed in a trace_pid_list structure.
615 * Returns the pid+1 as we want to display pid of zero, but NULL would
616 * stop the iteration.
618 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
620 unsigned long pid = (unsigned long)v;
624 /* pid already is +1 of the actual previous bit */
625 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
627 /* Return pid + 1 to allow zero to be represented */
628 if (pid < pid_list->pid_max)
629 return (void *)(pid + 1);
635 * trace_pid_start - Used for seq_file to start reading pid lists
636 * @pid_list: The pid list to show
637 * @pos: The position of the file
639 * This is used by seq_file "start" operation to start the iteration
642 * Returns the pid+1 as we want to display pid of zero, but NULL would
643 * stop the iteration.
645 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
650 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
651 if (pid >= pid_list->pid_max)
654 /* Return pid + 1 so that zero can be the exit value */
655 for (pid++; pid && l < *pos;
656 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
662 * trace_pid_show - show the current pid in seq_file processing
663 * @m: The seq_file structure to write into
664 * @v: A void pointer of the pid (+1) value to display
666 * Can be directly used by seq_file operations to display the current
669 int trace_pid_show(struct seq_file *m, void *v)
671 unsigned long pid = (unsigned long)v - 1;
673 seq_printf(m, "%lu\n", pid);
677 /* 128 should be much more than enough */
678 #define PID_BUF_SIZE 127
680 int trace_pid_write(struct trace_pid_list *filtered_pids,
681 struct trace_pid_list **new_pid_list,
682 const char __user *ubuf, size_t cnt)
684 struct trace_pid_list *pid_list;
685 struct trace_parser parser;
693 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
697 * Always recreate a new array. The write is an all or nothing
698 * operation. Always create a new array when adding new pids by
699 * the user. If the operation fails, then the current list is
702 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
704 trace_parser_put(&parser);
708 pid_list->pid_max = READ_ONCE(pid_max);
710 /* Only truncating will shrink pid_max */
711 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
712 pid_list->pid_max = filtered_pids->pid_max;
714 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
715 if (!pid_list->pids) {
716 trace_parser_put(&parser);
722 /* copy the current bits to the new max */
723 for_each_set_bit(pid, filtered_pids->pids,
724 filtered_pids->pid_max) {
725 set_bit(pid, pid_list->pids);
734 ret = trace_get_user(&parser, ubuf, cnt, &pos);
735 if (ret < 0 || !trace_parser_loaded(&parser))
743 if (kstrtoul(parser.buffer, 0, &val))
745 if (val >= pid_list->pid_max)
750 set_bit(pid, pid_list->pids);
753 trace_parser_clear(&parser);
756 trace_parser_put(&parser);
759 trace_free_pid_list(pid_list);
764 /* Cleared the list of pids */
765 trace_free_pid_list(pid_list);
770 *new_pid_list = pid_list;
775 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
779 /* Early boot up does not have a buffer yet */
781 return trace_clock_local();
783 ts = ring_buffer_time_stamp(buf->buffer);
784 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
789 u64 ftrace_now(int cpu)
791 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
795 * tracing_is_enabled - Show if global_trace has been enabled
797 * Shows if the global trace has been enabled or not. It uses the
798 * mirror flag "buffer_disabled" to be used in fast paths such as for
799 * the irqsoff tracer. But it may be inaccurate due to races. If you
800 * need to know the accurate state, use tracing_is_on() which is a little
801 * slower, but accurate.
803 int tracing_is_enabled(void)
806 * For quick access (irqsoff uses this in fast path), just
807 * return the mirror variable of the state of the ring buffer.
808 * It's a little racy, but we don't really care.
811 return !global_trace.buffer_disabled;
815 * trace_buf_size is the size in bytes that is allocated
816 * for a buffer. Note, the number of bytes is always rounded
819 * This number is purposely set to a low number of 16384.
820 * If the dump on oops happens, it will be much appreciated
821 * to not have to wait for all that output. Anyway this can be
822 * boot time and run time configurable.
824 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
826 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
828 /* trace_types holds a link list of available tracers. */
829 static struct tracer *trace_types __read_mostly;
832 * trace_types_lock is used to protect the trace_types list.
834 DEFINE_MUTEX(trace_types_lock);
837 * serialize the access of the ring buffer
839 * ring buffer serializes readers, but it is low level protection.
840 * The validity of the events (which returns by ring_buffer_peek() ..etc)
841 * are not protected by ring buffer.
843 * The content of events may become garbage if we allow other process consumes
844 * these events concurrently:
845 * A) the page of the consumed events may become a normal page
846 * (not reader page) in ring buffer, and this page will be rewritten
847 * by events producer.
848 * B) The page of the consumed events may become a page for splice_read,
849 * and this page will be returned to system.
851 * These primitives allow multi process access to different cpu ring buffer
854 * These primitives don't distinguish read-only and read-consume access.
855 * Multi read-only access are also serialized.
859 static DECLARE_RWSEM(all_cpu_access_lock);
860 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
862 static inline void trace_access_lock(int cpu)
864 if (cpu == RING_BUFFER_ALL_CPUS) {
865 /* gain it for accessing the whole ring buffer. */
866 down_write(&all_cpu_access_lock);
868 /* gain it for accessing a cpu ring buffer. */
870 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
871 down_read(&all_cpu_access_lock);
873 /* Secondly block other access to this @cpu ring buffer. */
874 mutex_lock(&per_cpu(cpu_access_lock, cpu));
878 static inline void trace_access_unlock(int cpu)
880 if (cpu == RING_BUFFER_ALL_CPUS) {
881 up_write(&all_cpu_access_lock);
883 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
884 up_read(&all_cpu_access_lock);
888 static inline void trace_access_lock_init(void)
892 for_each_possible_cpu(cpu)
893 mutex_init(&per_cpu(cpu_access_lock, cpu));
898 static DEFINE_MUTEX(access_lock);
900 static inline void trace_access_lock(int cpu)
903 mutex_lock(&access_lock);
906 static inline void trace_access_unlock(int cpu)
909 mutex_unlock(&access_lock);
912 static inline void trace_access_lock_init(void)
918 #ifdef CONFIG_STACKTRACE
919 static void __ftrace_trace_stack(struct trace_buffer *buffer,
920 unsigned int trace_ctx,
921 int skip, struct pt_regs *regs);
922 static inline void ftrace_trace_stack(struct trace_array *tr,
923 struct trace_buffer *buffer,
924 unsigned int trace_ctx,
925 int skip, struct pt_regs *regs);
928 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
929 unsigned int trace_ctx,
930 int skip, struct pt_regs *regs)
933 static inline void ftrace_trace_stack(struct trace_array *tr,
934 struct trace_buffer *buffer,
935 unsigned long trace_ctx,
936 int skip, struct pt_regs *regs)
942 static __always_inline void
943 trace_event_setup(struct ring_buffer_event *event,
944 int type, unsigned int trace_ctx)
946 struct trace_entry *ent = ring_buffer_event_data(event);
948 tracing_generic_entry_update(ent, type, trace_ctx);
951 static __always_inline struct ring_buffer_event *
952 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
955 unsigned int trace_ctx)
957 struct ring_buffer_event *event;
959 event = ring_buffer_lock_reserve(buffer, len);
961 trace_event_setup(event, type, trace_ctx);
966 void tracer_tracing_on(struct trace_array *tr)
968 if (tr->array_buffer.buffer)
969 ring_buffer_record_on(tr->array_buffer.buffer);
971 * This flag is looked at when buffers haven't been allocated
972 * yet, or by some tracers (like irqsoff), that just want to
973 * know if the ring buffer has been disabled, but it can handle
974 * races of where it gets disabled but we still do a record.
975 * As the check is in the fast path of the tracers, it is more
976 * important to be fast than accurate.
978 tr->buffer_disabled = 0;
979 /* Make the flag seen by readers */
984 * tracing_on - enable tracing buffers
986 * This function enables tracing buffers that may have been
987 * disabled with tracing_off.
989 void tracing_on(void)
991 tracer_tracing_on(&global_trace);
993 EXPORT_SYMBOL_GPL(tracing_on);
996 static __always_inline void
997 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
999 __this_cpu_write(trace_taskinfo_save, true);
1001 /* If this is the temp buffer, we need to commit fully */
1002 if (this_cpu_read(trace_buffered_event) == event) {
1003 /* Length is in event->array[0] */
1004 ring_buffer_write(buffer, event->array[0], &event->array[1]);
1005 /* Release the temp buffer */
1006 this_cpu_dec(trace_buffered_event_cnt);
1008 ring_buffer_unlock_commit(buffer, event);
1012 * __trace_puts - write a constant string into the trace buffer.
1013 * @ip: The address of the caller
1014 * @str: The constant string to write
1015 * @size: The size of the string.
1017 int __trace_puts(unsigned long ip, const char *str, int size)
1019 struct ring_buffer_event *event;
1020 struct trace_buffer *buffer;
1021 struct print_entry *entry;
1022 unsigned int trace_ctx;
1025 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1028 if (unlikely(tracing_selftest_running || tracing_disabled))
1031 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1033 trace_ctx = tracing_gen_ctx();
1034 buffer = global_trace.array_buffer.buffer;
1035 ring_buffer_nest_start(buffer);
1036 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1043 entry = ring_buffer_event_data(event);
1046 memcpy(&entry->buf, str, size);
1048 /* Add a newline if necessary */
1049 if (entry->buf[size - 1] != '\n') {
1050 entry->buf[size] = '\n';
1051 entry->buf[size + 1] = '\0';
1053 entry->buf[size] = '\0';
1055 __buffer_unlock_commit(buffer, event);
1056 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1058 ring_buffer_nest_end(buffer);
1061 EXPORT_SYMBOL_GPL(__trace_puts);
1064 * __trace_bputs - write the pointer to a constant string into trace buffer
1065 * @ip: The address of the caller
1066 * @str: The constant string to write to the buffer to
1068 int __trace_bputs(unsigned long ip, const char *str)
1070 struct ring_buffer_event *event;
1071 struct trace_buffer *buffer;
1072 struct bputs_entry *entry;
1073 unsigned int trace_ctx;
1074 int size = sizeof(struct bputs_entry);
1077 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1080 if (unlikely(tracing_selftest_running || tracing_disabled))
1083 trace_ctx = tracing_gen_ctx();
1084 buffer = global_trace.array_buffer.buffer;
1086 ring_buffer_nest_start(buffer);
1087 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1092 entry = ring_buffer_event_data(event);
1096 __buffer_unlock_commit(buffer, event);
1097 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1101 ring_buffer_nest_end(buffer);
1104 EXPORT_SYMBOL_GPL(__trace_bputs);
1106 #ifdef CONFIG_TRACER_SNAPSHOT
1107 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1110 struct tracer *tracer = tr->current_trace;
1111 unsigned long flags;
1114 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1115 internal_trace_puts("*** snapshot is being ignored ***\n");
1119 if (!tr->allocated_snapshot) {
1120 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1121 internal_trace_puts("*** stopping trace here! ***\n");
1126 /* Note, snapshot can not be used when the tracer uses it */
1127 if (tracer->use_max_tr) {
1128 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1129 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
1133 local_irq_save(flags);
1134 update_max_tr(tr, current, smp_processor_id(), cond_data);
1135 local_irq_restore(flags);
1138 void tracing_snapshot_instance(struct trace_array *tr)
1140 tracing_snapshot_instance_cond(tr, NULL);
1144 * tracing_snapshot - take a snapshot of the current buffer.
1146 * This causes a swap between the snapshot buffer and the current live
1147 * tracing buffer. You can use this to take snapshots of the live
1148 * trace when some condition is triggered, but continue to trace.
1150 * Note, make sure to allocate the snapshot with either
1151 * a tracing_snapshot_alloc(), or by doing it manually
1152 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1154 * If the snapshot buffer is not allocated, it will stop tracing.
1155 * Basically making a permanent snapshot.
1157 void tracing_snapshot(void)
1159 struct trace_array *tr = &global_trace;
1161 tracing_snapshot_instance(tr);
1163 EXPORT_SYMBOL_GPL(tracing_snapshot);
1166 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1167 * @tr: The tracing instance to snapshot
1168 * @cond_data: The data to be tested conditionally, and possibly saved
1170 * This is the same as tracing_snapshot() except that the snapshot is
1171 * conditional - the snapshot will only happen if the
1172 * cond_snapshot.update() implementation receiving the cond_data
1173 * returns true, which means that the trace array's cond_snapshot
1174 * update() operation used the cond_data to determine whether the
1175 * snapshot should be taken, and if it was, presumably saved it along
1176 * with the snapshot.
1178 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1180 tracing_snapshot_instance_cond(tr, cond_data);
1182 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1185 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1186 * @tr: The tracing instance
1188 * When the user enables a conditional snapshot using
1189 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1190 * with the snapshot. This accessor is used to retrieve it.
1192 * Should not be called from cond_snapshot.update(), since it takes
1193 * the tr->max_lock lock, which the code calling
1194 * cond_snapshot.update() has already done.
1196 * Returns the cond_data associated with the trace array's snapshot.
1198 void *tracing_cond_snapshot_data(struct trace_array *tr)
1200 void *cond_data = NULL;
1202 arch_spin_lock(&tr->max_lock);
1204 if (tr->cond_snapshot)
1205 cond_data = tr->cond_snapshot->cond_data;
1207 arch_spin_unlock(&tr->max_lock);
1211 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1213 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1214 struct array_buffer *size_buf, int cpu_id);
1215 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1217 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1221 if (!tr->allocated_snapshot) {
1223 /* allocate spare buffer */
1224 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1225 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1229 tr->allocated_snapshot = true;
1235 static void free_snapshot(struct trace_array *tr)
1238 * We don't free the ring buffer. instead, resize it because
1239 * The max_tr ring buffer has some state (e.g. ring->clock) and
1240 * we want preserve it.
1242 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1243 set_buffer_entries(&tr->max_buffer, 1);
1244 tracing_reset_online_cpus(&tr->max_buffer);
1245 tr->allocated_snapshot = false;
1249 * tracing_alloc_snapshot - allocate snapshot buffer.
1251 * This only allocates the snapshot buffer if it isn't already
1252 * allocated - it doesn't also take a snapshot.
1254 * This is meant to be used in cases where the snapshot buffer needs
1255 * to be set up for events that can't sleep but need to be able to
1256 * trigger a snapshot.
1258 int tracing_alloc_snapshot(void)
1260 struct trace_array *tr = &global_trace;
1263 ret = tracing_alloc_snapshot_instance(tr);
1268 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1271 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1273 * This is similar to tracing_snapshot(), but it will allocate the
1274 * snapshot buffer if it isn't already allocated. Use this only
1275 * where it is safe to sleep, as the allocation may sleep.
1277 * This causes a swap between the snapshot buffer and the current live
1278 * tracing buffer. You can use this to take snapshots of the live
1279 * trace when some condition is triggered, but continue to trace.
1281 void tracing_snapshot_alloc(void)
1285 ret = tracing_alloc_snapshot();
1291 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1294 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1295 * @tr: The tracing instance
1296 * @cond_data: User data to associate with the snapshot
1297 * @update: Implementation of the cond_snapshot update function
1299 * Check whether the conditional snapshot for the given instance has
1300 * already been enabled, or if the current tracer is already using a
1301 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1302 * save the cond_data and update function inside.
1304 * Returns 0 if successful, error otherwise.
1306 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1307 cond_update_fn_t update)
1309 struct cond_snapshot *cond_snapshot;
1312 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1316 cond_snapshot->cond_data = cond_data;
1317 cond_snapshot->update = update;
1319 mutex_lock(&trace_types_lock);
1321 ret = tracing_alloc_snapshot_instance(tr);
1325 if (tr->current_trace->use_max_tr) {
1331 * The cond_snapshot can only change to NULL without the
1332 * trace_types_lock. We don't care if we race with it going
1333 * to NULL, but we want to make sure that it's not set to
1334 * something other than NULL when we get here, which we can
1335 * do safely with only holding the trace_types_lock and not
1336 * having to take the max_lock.
1338 if (tr->cond_snapshot) {
1343 arch_spin_lock(&tr->max_lock);
1344 tr->cond_snapshot = cond_snapshot;
1345 arch_spin_unlock(&tr->max_lock);
1347 mutex_unlock(&trace_types_lock);
1352 mutex_unlock(&trace_types_lock);
1353 kfree(cond_snapshot);
1356 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1359 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1360 * @tr: The tracing instance
1362 * Check whether the conditional snapshot for the given instance is
1363 * enabled; if so, free the cond_snapshot associated with it,
1364 * otherwise return -EINVAL.
1366 * Returns 0 if successful, error otherwise.
1368 int tracing_snapshot_cond_disable(struct trace_array *tr)
1372 arch_spin_lock(&tr->max_lock);
1374 if (!tr->cond_snapshot)
1377 kfree(tr->cond_snapshot);
1378 tr->cond_snapshot = NULL;
1381 arch_spin_unlock(&tr->max_lock);
1385 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1387 void tracing_snapshot(void)
1389 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1391 EXPORT_SYMBOL_GPL(tracing_snapshot);
1392 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1394 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1396 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1397 int tracing_alloc_snapshot(void)
1399 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1402 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1403 void tracing_snapshot_alloc(void)
1408 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1409 void *tracing_cond_snapshot_data(struct trace_array *tr)
1413 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1414 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1418 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1419 int tracing_snapshot_cond_disable(struct trace_array *tr)
1423 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1424 #endif /* CONFIG_TRACER_SNAPSHOT */
1426 void tracer_tracing_off(struct trace_array *tr)
1428 if (tr->array_buffer.buffer)
1429 ring_buffer_record_off(tr->array_buffer.buffer);
1431 * This flag is looked at when buffers haven't been allocated
1432 * yet, or by some tracers (like irqsoff), that just want to
1433 * know if the ring buffer has been disabled, but it can handle
1434 * races of where it gets disabled but we still do a record.
1435 * As the check is in the fast path of the tracers, it is more
1436 * important to be fast than accurate.
1438 tr->buffer_disabled = 1;
1439 /* Make the flag seen by readers */
1444 * tracing_off - turn off tracing buffers
1446 * This function stops the tracing buffers from recording data.
1447 * It does not disable any overhead the tracers themselves may
1448 * be causing. This function simply causes all recording to
1449 * the ring buffers to fail.
1451 void tracing_off(void)
1453 tracer_tracing_off(&global_trace);
1455 EXPORT_SYMBOL_GPL(tracing_off);
1457 void disable_trace_on_warning(void)
1459 if (__disable_trace_on_warning) {
1460 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1461 "Disabling tracing due to warning\n");
1467 * tracer_tracing_is_on - show real state of ring buffer enabled
1468 * @tr : the trace array to know if ring buffer is enabled
1470 * Shows real state of the ring buffer if it is enabled or not.
1472 bool tracer_tracing_is_on(struct trace_array *tr)
1474 if (tr->array_buffer.buffer)
1475 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1476 return !tr->buffer_disabled;
1480 * tracing_is_on - show state of ring buffers enabled
1482 int tracing_is_on(void)
1484 return tracer_tracing_is_on(&global_trace);
1486 EXPORT_SYMBOL_GPL(tracing_is_on);
1488 static int __init set_buf_size(char *str)
1490 unsigned long buf_size;
1494 buf_size = memparse(str, &str);
1495 /* nr_entries can not be zero */
1498 trace_buf_size = buf_size;
1501 __setup("trace_buf_size=", set_buf_size);
1503 static int __init set_tracing_thresh(char *str)
1505 unsigned long threshold;
1510 ret = kstrtoul(str, 0, &threshold);
1513 tracing_thresh = threshold * 1000;
1516 __setup("tracing_thresh=", set_tracing_thresh);
1518 unsigned long nsecs_to_usecs(unsigned long nsecs)
1520 return nsecs / 1000;
1524 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1525 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1526 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1527 * of strings in the order that the evals (enum) were defined.
1532 /* These must match the bit positions in trace_iterator_flags */
1533 static const char *trace_options[] = {
1541 int in_ns; /* is this clock in nanoseconds? */
1542 } trace_clocks[] = {
1543 { trace_clock_local, "local", 1 },
1544 { trace_clock_global, "global", 1 },
1545 { trace_clock_counter, "counter", 0 },
1546 { trace_clock_jiffies, "uptime", 0 },
1547 { trace_clock, "perf", 1 },
1548 { ktime_get_mono_fast_ns, "mono", 1 },
1549 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1550 { ktime_get_boot_fast_ns, "boot", 1 },
1554 bool trace_clock_in_ns(struct trace_array *tr)
1556 if (trace_clocks[tr->clock_id].in_ns)
1563 * trace_parser_get_init - gets the buffer for trace parser
1565 int trace_parser_get_init(struct trace_parser *parser, int size)
1567 memset(parser, 0, sizeof(*parser));
1569 parser->buffer = kmalloc(size, GFP_KERNEL);
1570 if (!parser->buffer)
1573 parser->size = size;
1578 * trace_parser_put - frees the buffer for trace parser
1580 void trace_parser_put(struct trace_parser *parser)
1582 kfree(parser->buffer);
1583 parser->buffer = NULL;
1587 * trace_get_user - reads the user input string separated by space
1588 * (matched by isspace(ch))
1590 * For each string found the 'struct trace_parser' is updated,
1591 * and the function returns.
1593 * Returns number of bytes read.
1595 * See kernel/trace/trace.h for 'struct trace_parser' details.
1597 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1598 size_t cnt, loff_t *ppos)
1605 trace_parser_clear(parser);
1607 ret = get_user(ch, ubuf++);
1615 * The parser is not finished with the last write,
1616 * continue reading the user input without skipping spaces.
1618 if (!parser->cont) {
1619 /* skip white space */
1620 while (cnt && isspace(ch)) {
1621 ret = get_user(ch, ubuf++);
1630 /* only spaces were written */
1631 if (isspace(ch) || !ch) {
1638 /* read the non-space input */
1639 while (cnt && !isspace(ch) && ch) {
1640 if (parser->idx < parser->size - 1)
1641 parser->buffer[parser->idx++] = ch;
1646 ret = get_user(ch, ubuf++);
1653 /* We either got finished input or we have to wait for another call. */
1654 if (isspace(ch) || !ch) {
1655 parser->buffer[parser->idx] = 0;
1656 parser->cont = false;
1657 } else if (parser->idx < parser->size - 1) {
1658 parser->cont = true;
1659 parser->buffer[parser->idx++] = ch;
1660 /* Make sure the parsed string always terminates with '\0'. */
1661 parser->buffer[parser->idx] = 0;
1674 /* TODO add a seq_buf_to_buffer() */
1675 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1679 if (trace_seq_used(s) <= s->seq.readpos)
1682 len = trace_seq_used(s) - s->seq.readpos;
1685 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1687 s->seq.readpos += cnt;
1691 unsigned long __read_mostly tracing_thresh;
1692 static const struct file_operations tracing_max_lat_fops;
1694 #ifdef LATENCY_FS_NOTIFY
1696 static struct workqueue_struct *fsnotify_wq;
1698 static void latency_fsnotify_workfn(struct work_struct *work)
1700 struct trace_array *tr = container_of(work, struct trace_array,
1702 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1705 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1707 struct trace_array *tr = container_of(iwork, struct trace_array,
1709 queue_work(fsnotify_wq, &tr->fsnotify_work);
1712 static void trace_create_maxlat_file(struct trace_array *tr,
1713 struct dentry *d_tracer)
1715 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1716 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1717 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1718 d_tracer, &tr->max_latency,
1719 &tracing_max_lat_fops);
1722 __init static int latency_fsnotify_init(void)
1724 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1725 WQ_UNBOUND | WQ_HIGHPRI, 0);
1727 pr_err("Unable to allocate tr_max_lat_wq\n");
1733 late_initcall_sync(latency_fsnotify_init);
1735 void latency_fsnotify(struct trace_array *tr)
1740 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1741 * possible that we are called from __schedule() or do_idle(), which
1742 * could cause a deadlock.
1744 irq_work_queue(&tr->fsnotify_irqwork);
1747 #elif defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
1748 || defined(CONFIG_OSNOISE_TRACER)
1750 #define trace_create_maxlat_file(tr, d_tracer) \
1751 trace_create_file("tracing_max_latency", 0644, d_tracer, \
1752 &tr->max_latency, &tracing_max_lat_fops)
1755 #define trace_create_maxlat_file(tr, d_tracer) do { } while (0)
1758 #ifdef CONFIG_TRACER_MAX_TRACE
1760 * Copy the new maximum trace into the separate maximum-trace
1761 * structure. (this way the maximum trace is permanently saved,
1762 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1765 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1767 struct array_buffer *trace_buf = &tr->array_buffer;
1768 struct array_buffer *max_buf = &tr->max_buffer;
1769 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1770 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1773 max_buf->time_start = data->preempt_timestamp;
1775 max_data->saved_latency = tr->max_latency;
1776 max_data->critical_start = data->critical_start;
1777 max_data->critical_end = data->critical_end;
1779 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1780 max_data->pid = tsk->pid;
1782 * If tsk == current, then use current_uid(), as that does not use
1783 * RCU. The irq tracer can be called out of RCU scope.
1786 max_data->uid = current_uid();
1788 max_data->uid = task_uid(tsk);
1790 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1791 max_data->policy = tsk->policy;
1792 max_data->rt_priority = tsk->rt_priority;
1794 /* record this tasks comm */
1795 tracing_record_cmdline(tsk);
1796 latency_fsnotify(tr);
1800 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1802 * @tsk: the task with the latency
1803 * @cpu: The cpu that initiated the trace.
1804 * @cond_data: User data associated with a conditional snapshot
1806 * Flip the buffers between the @tr and the max_tr and record information
1807 * about which task was the cause of this latency.
1810 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1816 WARN_ON_ONCE(!irqs_disabled());
1818 if (!tr->allocated_snapshot) {
1819 /* Only the nop tracer should hit this when disabling */
1820 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1824 arch_spin_lock(&tr->max_lock);
1826 /* Inherit the recordable setting from array_buffer */
1827 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1828 ring_buffer_record_on(tr->max_buffer.buffer);
1830 ring_buffer_record_off(tr->max_buffer.buffer);
1832 #ifdef CONFIG_TRACER_SNAPSHOT
1833 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1836 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1838 __update_max_tr(tr, tsk, cpu);
1841 arch_spin_unlock(&tr->max_lock);
1845 * update_max_tr_single - only copy one trace over, and reset the rest
1847 * @tsk: task with the latency
1848 * @cpu: the cpu of the buffer to copy.
1850 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1853 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1860 WARN_ON_ONCE(!irqs_disabled());
1861 if (!tr->allocated_snapshot) {
1862 /* Only the nop tracer should hit this when disabling */
1863 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1867 arch_spin_lock(&tr->max_lock);
1869 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1871 if (ret == -EBUSY) {
1873 * We failed to swap the buffer due to a commit taking
1874 * place on this CPU. We fail to record, but we reset
1875 * the max trace buffer (no one writes directly to it)
1876 * and flag that it failed.
1878 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1879 "Failed to swap buffers due to commit in progress\n");
1882 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1884 __update_max_tr(tr, tsk, cpu);
1885 arch_spin_unlock(&tr->max_lock);
1887 #endif /* CONFIG_TRACER_MAX_TRACE */
1889 static int wait_on_pipe(struct trace_iterator *iter, int full)
1891 /* Iterators are static, they should be filled or empty */
1892 if (trace_buffer_iter(iter, iter->cpu_file))
1895 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1899 #ifdef CONFIG_FTRACE_STARTUP_TEST
1900 static bool selftests_can_run;
1902 struct trace_selftests {
1903 struct list_head list;
1904 struct tracer *type;
1907 static LIST_HEAD(postponed_selftests);
1909 static int save_selftest(struct tracer *type)
1911 struct trace_selftests *selftest;
1913 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1917 selftest->type = type;
1918 list_add(&selftest->list, &postponed_selftests);
1922 static int run_tracer_selftest(struct tracer *type)
1924 struct trace_array *tr = &global_trace;
1925 struct tracer *saved_tracer = tr->current_trace;
1928 if (!type->selftest || tracing_selftest_disabled)
1932 * If a tracer registers early in boot up (before scheduling is
1933 * initialized and such), then do not run its selftests yet.
1934 * Instead, run it a little later in the boot process.
1936 if (!selftests_can_run)
1937 return save_selftest(type);
1939 if (!tracing_is_on()) {
1940 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1946 * Run a selftest on this tracer.
1947 * Here we reset the trace buffer, and set the current
1948 * tracer to be this tracer. The tracer can then run some
1949 * internal tracing to verify that everything is in order.
1950 * If we fail, we do not register this tracer.
1952 tracing_reset_online_cpus(&tr->array_buffer);
1954 tr->current_trace = type;
1956 #ifdef CONFIG_TRACER_MAX_TRACE
1957 if (type->use_max_tr) {
1958 /* If we expanded the buffers, make sure the max is expanded too */
1959 if (ring_buffer_expanded)
1960 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1961 RING_BUFFER_ALL_CPUS);
1962 tr->allocated_snapshot = true;
1966 /* the test is responsible for initializing and enabling */
1967 pr_info("Testing tracer %s: ", type->name);
1968 ret = type->selftest(type, tr);
1969 /* the test is responsible for resetting too */
1970 tr->current_trace = saved_tracer;
1972 printk(KERN_CONT "FAILED!\n");
1973 /* Add the warning after printing 'FAILED' */
1977 /* Only reset on passing, to avoid touching corrupted buffers */
1978 tracing_reset_online_cpus(&tr->array_buffer);
1980 #ifdef CONFIG_TRACER_MAX_TRACE
1981 if (type->use_max_tr) {
1982 tr->allocated_snapshot = false;
1984 /* Shrink the max buffer again */
1985 if (ring_buffer_expanded)
1986 ring_buffer_resize(tr->max_buffer.buffer, 1,
1987 RING_BUFFER_ALL_CPUS);
1991 printk(KERN_CONT "PASSED\n");
1995 static __init int init_trace_selftests(void)
1997 struct trace_selftests *p, *n;
1998 struct tracer *t, **last;
2001 selftests_can_run = true;
2003 mutex_lock(&trace_types_lock);
2005 if (list_empty(&postponed_selftests))
2008 pr_info("Running postponed tracer tests:\n");
2010 tracing_selftest_running = true;
2011 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2012 /* This loop can take minutes when sanitizers are enabled, so
2013 * lets make sure we allow RCU processing.
2016 ret = run_tracer_selftest(p->type);
2017 /* If the test fails, then warn and remove from available_tracers */
2019 WARN(1, "tracer: %s failed selftest, disabling\n",
2021 last = &trace_types;
2022 for (t = trace_types; t; t = t->next) {
2033 tracing_selftest_running = false;
2036 mutex_unlock(&trace_types_lock);
2040 core_initcall(init_trace_selftests);
2042 static inline int run_tracer_selftest(struct tracer *type)
2046 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2048 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2050 static void __init apply_trace_boot_options(void);
2053 * register_tracer - register a tracer with the ftrace system.
2054 * @type: the plugin for the tracer
2056 * Register a new plugin tracer.
2058 int __init register_tracer(struct tracer *type)
2064 pr_info("Tracer must have a name\n");
2068 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2069 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2073 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2074 pr_warn("Can not register tracer %s due to lockdown\n",
2079 mutex_lock(&trace_types_lock);
2081 tracing_selftest_running = true;
2083 for (t = trace_types; t; t = t->next) {
2084 if (strcmp(type->name, t->name) == 0) {
2086 pr_info("Tracer %s already registered\n",
2093 if (!type->set_flag)
2094 type->set_flag = &dummy_set_flag;
2096 /*allocate a dummy tracer_flags*/
2097 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2102 type->flags->val = 0;
2103 type->flags->opts = dummy_tracer_opt;
2105 if (!type->flags->opts)
2106 type->flags->opts = dummy_tracer_opt;
2108 /* store the tracer for __set_tracer_option */
2109 type->flags->trace = type;
2111 ret = run_tracer_selftest(type);
2115 type->next = trace_types;
2117 add_tracer_options(&global_trace, type);
2120 tracing_selftest_running = false;
2121 mutex_unlock(&trace_types_lock);
2123 if (ret || !default_bootup_tracer)
2126 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2129 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2130 /* Do we want this tracer to start on bootup? */
2131 tracing_set_tracer(&global_trace, type->name);
2132 default_bootup_tracer = NULL;
2134 apply_trace_boot_options();
2136 /* disable other selftests, since this will break it. */
2137 disable_tracing_selftest("running a tracer");
2143 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2145 struct trace_buffer *buffer = buf->buffer;
2150 ring_buffer_record_disable(buffer);
2152 /* Make sure all commits have finished */
2154 ring_buffer_reset_cpu(buffer, cpu);
2156 ring_buffer_record_enable(buffer);
2159 void tracing_reset_online_cpus(struct array_buffer *buf)
2161 struct trace_buffer *buffer = buf->buffer;
2166 ring_buffer_record_disable(buffer);
2168 /* Make sure all commits have finished */
2171 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2173 ring_buffer_reset_online_cpus(buffer);
2175 ring_buffer_record_enable(buffer);
2178 /* Must have trace_types_lock held */
2179 void tracing_reset_all_online_cpus(void)
2181 struct trace_array *tr;
2183 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2184 if (!tr->clear_trace)
2186 tr->clear_trace = false;
2187 tracing_reset_online_cpus(&tr->array_buffer);
2188 #ifdef CONFIG_TRACER_MAX_TRACE
2189 tracing_reset_online_cpus(&tr->max_buffer);
2195 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2196 * is the tgid last observed corresponding to pid=i.
2198 static int *tgid_map;
2200 /* The maximum valid index into tgid_map. */
2201 static size_t tgid_map_max;
2203 #define SAVED_CMDLINES_DEFAULT 128
2204 #define NO_CMDLINE_MAP UINT_MAX
2205 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2206 struct saved_cmdlines_buffer {
2207 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2208 unsigned *map_cmdline_to_pid;
2209 unsigned cmdline_num;
2211 char *saved_cmdlines;
2213 static struct saved_cmdlines_buffer *savedcmd;
2215 static inline char *get_saved_cmdlines(int idx)
2217 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2220 static inline void set_cmdline(int idx, const char *cmdline)
2222 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2225 static int allocate_cmdlines_buffer(unsigned int val,
2226 struct saved_cmdlines_buffer *s)
2228 s->map_cmdline_to_pid = kmalloc_array(val,
2229 sizeof(*s->map_cmdline_to_pid),
2231 if (!s->map_cmdline_to_pid)
2234 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2235 if (!s->saved_cmdlines) {
2236 kfree(s->map_cmdline_to_pid);
2241 s->cmdline_num = val;
2242 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2243 sizeof(s->map_pid_to_cmdline));
2244 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2245 val * sizeof(*s->map_cmdline_to_pid));
2250 static int trace_create_savedcmd(void)
2254 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2258 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2268 int is_tracing_stopped(void)
2270 return global_trace.stop_count;
2274 * tracing_start - quick start of the tracer
2276 * If tracing is enabled but was stopped by tracing_stop,
2277 * this will start the tracer back up.
2279 void tracing_start(void)
2281 struct trace_buffer *buffer;
2282 unsigned long flags;
2284 if (tracing_disabled)
2287 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2288 if (--global_trace.stop_count) {
2289 if (global_trace.stop_count < 0) {
2290 /* Someone screwed up their debugging */
2292 global_trace.stop_count = 0;
2297 /* Prevent the buffers from switching */
2298 arch_spin_lock(&global_trace.max_lock);
2300 buffer = global_trace.array_buffer.buffer;
2302 ring_buffer_record_enable(buffer);
2304 #ifdef CONFIG_TRACER_MAX_TRACE
2305 buffer = global_trace.max_buffer.buffer;
2307 ring_buffer_record_enable(buffer);
2310 arch_spin_unlock(&global_trace.max_lock);
2313 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2316 static void tracing_start_tr(struct trace_array *tr)
2318 struct trace_buffer *buffer;
2319 unsigned long flags;
2321 if (tracing_disabled)
2324 /* If global, we need to also start the max tracer */
2325 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2326 return tracing_start();
2328 raw_spin_lock_irqsave(&tr->start_lock, flags);
2330 if (--tr->stop_count) {
2331 if (tr->stop_count < 0) {
2332 /* Someone screwed up their debugging */
2339 buffer = tr->array_buffer.buffer;
2341 ring_buffer_record_enable(buffer);
2344 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2348 * tracing_stop - quick stop of the tracer
2350 * Light weight way to stop tracing. Use in conjunction with
2353 void tracing_stop(void)
2355 struct trace_buffer *buffer;
2356 unsigned long flags;
2358 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2359 if (global_trace.stop_count++)
2362 /* Prevent the buffers from switching */
2363 arch_spin_lock(&global_trace.max_lock);
2365 buffer = global_trace.array_buffer.buffer;
2367 ring_buffer_record_disable(buffer);
2369 #ifdef CONFIG_TRACER_MAX_TRACE
2370 buffer = global_trace.max_buffer.buffer;
2372 ring_buffer_record_disable(buffer);
2375 arch_spin_unlock(&global_trace.max_lock);
2378 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2381 static void tracing_stop_tr(struct trace_array *tr)
2383 struct trace_buffer *buffer;
2384 unsigned long flags;
2386 /* If global, we need to also stop the max tracer */
2387 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2388 return tracing_stop();
2390 raw_spin_lock_irqsave(&tr->start_lock, flags);
2391 if (tr->stop_count++)
2394 buffer = tr->array_buffer.buffer;
2396 ring_buffer_record_disable(buffer);
2399 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2402 static int trace_save_cmdline(struct task_struct *tsk)
2406 /* treat recording of idle task as a success */
2410 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2413 * It's not the end of the world if we don't get
2414 * the lock, but we also don't want to spin
2415 * nor do we want to disable interrupts,
2416 * so if we miss here, then better luck next time.
2418 if (!arch_spin_trylock(&trace_cmdline_lock))
2421 idx = savedcmd->map_pid_to_cmdline[tpid];
2422 if (idx == NO_CMDLINE_MAP) {
2423 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2425 savedcmd->map_pid_to_cmdline[tpid] = idx;
2426 savedcmd->cmdline_idx = idx;
2429 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2430 set_cmdline(idx, tsk->comm);
2432 arch_spin_unlock(&trace_cmdline_lock);
2437 static void __trace_find_cmdline(int pid, char comm[])
2443 strcpy(comm, "<idle>");
2447 if (WARN_ON_ONCE(pid < 0)) {
2448 strcpy(comm, "<XXX>");
2452 tpid = pid & (PID_MAX_DEFAULT - 1);
2453 map = savedcmd->map_pid_to_cmdline[tpid];
2454 if (map != NO_CMDLINE_MAP) {
2455 tpid = savedcmd->map_cmdline_to_pid[map];
2457 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2461 strcpy(comm, "<...>");
2464 void trace_find_cmdline(int pid, char comm[])
2467 arch_spin_lock(&trace_cmdline_lock);
2469 __trace_find_cmdline(pid, comm);
2471 arch_spin_unlock(&trace_cmdline_lock);
2475 static int *trace_find_tgid_ptr(int pid)
2478 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2479 * if we observe a non-NULL tgid_map then we also observe the correct
2482 int *map = smp_load_acquire(&tgid_map);
2484 if (unlikely(!map || pid > tgid_map_max))
2490 int trace_find_tgid(int pid)
2492 int *ptr = trace_find_tgid_ptr(pid);
2494 return ptr ? *ptr : 0;
2497 static int trace_save_tgid(struct task_struct *tsk)
2501 /* treat recording of idle task as a success */
2505 ptr = trace_find_tgid_ptr(tsk->pid);
2513 static bool tracing_record_taskinfo_skip(int flags)
2515 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2517 if (!__this_cpu_read(trace_taskinfo_save))
2523 * tracing_record_taskinfo - record the task info of a task
2525 * @task: task to record
2526 * @flags: TRACE_RECORD_CMDLINE for recording comm
2527 * TRACE_RECORD_TGID for recording tgid
2529 void tracing_record_taskinfo(struct task_struct *task, int flags)
2533 if (tracing_record_taskinfo_skip(flags))
2537 * Record as much task information as possible. If some fail, continue
2538 * to try to record the others.
2540 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2541 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2543 /* If recording any information failed, retry again soon. */
2547 __this_cpu_write(trace_taskinfo_save, false);
2551 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2553 * @prev: previous task during sched_switch
2554 * @next: next task during sched_switch
2555 * @flags: TRACE_RECORD_CMDLINE for recording comm
2556 * TRACE_RECORD_TGID for recording tgid
2558 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2559 struct task_struct *next, int flags)
2563 if (tracing_record_taskinfo_skip(flags))
2567 * Record as much task information as possible. If some fail, continue
2568 * to try to record the others.
2570 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2571 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2572 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2573 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2575 /* If recording any information failed, retry again soon. */
2579 __this_cpu_write(trace_taskinfo_save, false);
2582 /* Helpers to record a specific task information */
2583 void tracing_record_cmdline(struct task_struct *task)
2585 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2588 void tracing_record_tgid(struct task_struct *task)
2590 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2594 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2595 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2596 * simplifies those functions and keeps them in sync.
2598 enum print_line_t trace_handle_return(struct trace_seq *s)
2600 return trace_seq_has_overflowed(s) ?
2601 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2603 EXPORT_SYMBOL_GPL(trace_handle_return);
2605 static unsigned short migration_disable_value(void)
2607 #if defined(CONFIG_SMP)
2608 return current->migration_disabled;
2614 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2616 unsigned int trace_flags = irqs_status;
2619 pc = preempt_count();
2622 trace_flags |= TRACE_FLAG_NMI;
2623 if (pc & HARDIRQ_MASK)
2624 trace_flags |= TRACE_FLAG_HARDIRQ;
2625 if (in_serving_softirq())
2626 trace_flags |= TRACE_FLAG_SOFTIRQ;
2628 if (tif_need_resched())
2629 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2630 if (test_preempt_need_resched())
2631 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2632 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2633 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2636 struct ring_buffer_event *
2637 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2640 unsigned int trace_ctx)
2642 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2645 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2646 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2647 static int trace_buffered_event_ref;
2650 * trace_buffered_event_enable - enable buffering events
2652 * When events are being filtered, it is quicker to use a temporary
2653 * buffer to write the event data into if there's a likely chance
2654 * that it will not be committed. The discard of the ring buffer
2655 * is not as fast as committing, and is much slower than copying
2658 * When an event is to be filtered, allocate per cpu buffers to
2659 * write the event data into, and if the event is filtered and discarded
2660 * it is simply dropped, otherwise, the entire data is to be committed
2663 void trace_buffered_event_enable(void)
2665 struct ring_buffer_event *event;
2669 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2671 if (trace_buffered_event_ref++)
2674 for_each_tracing_cpu(cpu) {
2675 page = alloc_pages_node(cpu_to_node(cpu),
2676 GFP_KERNEL | __GFP_NORETRY, 0);
2680 event = page_address(page);
2681 memset(event, 0, sizeof(*event));
2683 per_cpu(trace_buffered_event, cpu) = event;
2686 if (cpu == smp_processor_id() &&
2687 __this_cpu_read(trace_buffered_event) !=
2688 per_cpu(trace_buffered_event, cpu))
2695 trace_buffered_event_disable();
2698 static void enable_trace_buffered_event(void *data)
2700 /* Probably not needed, but do it anyway */
2702 this_cpu_dec(trace_buffered_event_cnt);
2705 static void disable_trace_buffered_event(void *data)
2707 this_cpu_inc(trace_buffered_event_cnt);
2711 * trace_buffered_event_disable - disable buffering events
2713 * When a filter is removed, it is faster to not use the buffered
2714 * events, and to commit directly into the ring buffer. Free up
2715 * the temp buffers when there are no more users. This requires
2716 * special synchronization with current events.
2718 void trace_buffered_event_disable(void)
2722 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2724 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2727 if (--trace_buffered_event_ref)
2731 /* For each CPU, set the buffer as used. */
2732 smp_call_function_many(tracing_buffer_mask,
2733 disable_trace_buffered_event, NULL, 1);
2736 /* Wait for all current users to finish */
2739 for_each_tracing_cpu(cpu) {
2740 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2741 per_cpu(trace_buffered_event, cpu) = NULL;
2744 * Make sure trace_buffered_event is NULL before clearing
2745 * trace_buffered_event_cnt.
2750 /* Do the work on each cpu */
2751 smp_call_function_many(tracing_buffer_mask,
2752 enable_trace_buffered_event, NULL, 1);
2756 static struct trace_buffer *temp_buffer;
2758 struct ring_buffer_event *
2759 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2760 struct trace_event_file *trace_file,
2761 int type, unsigned long len,
2762 unsigned int trace_ctx)
2764 struct ring_buffer_event *entry;
2765 struct trace_array *tr = trace_file->tr;
2768 *current_rb = tr->array_buffer.buffer;
2770 if (!tr->no_filter_buffering_ref &&
2771 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2772 (entry = this_cpu_read(trace_buffered_event))) {
2774 * Filtering is on, so try to use the per cpu buffer first.
2775 * This buffer will simulate a ring_buffer_event,
2776 * where the type_len is zero and the array[0] will
2777 * hold the full length.
2778 * (see include/linux/ring-buffer.h for details on
2779 * how the ring_buffer_event is structured).
2781 * Using a temp buffer during filtering and copying it
2782 * on a matched filter is quicker than writing directly
2783 * into the ring buffer and then discarding it when
2784 * it doesn't match. That is because the discard
2785 * requires several atomic operations to get right.
2786 * Copying on match and doing nothing on a failed match
2787 * is still quicker than no copy on match, but having
2788 * to discard out of the ring buffer on a failed match.
2790 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2792 val = this_cpu_inc_return(trace_buffered_event_cnt);
2795 * Preemption is disabled, but interrupts and NMIs
2796 * can still come in now. If that happens after
2797 * the above increment, then it will have to go
2798 * back to the old method of allocating the event
2799 * on the ring buffer, and if the filter fails, it
2800 * will have to call ring_buffer_discard_commit()
2803 * Need to also check the unlikely case that the
2804 * length is bigger than the temp buffer size.
2805 * If that happens, then the reserve is pretty much
2806 * guaranteed to fail, as the ring buffer currently
2807 * only allows events less than a page. But that may
2808 * change in the future, so let the ring buffer reserve
2809 * handle the failure in that case.
2811 if (val == 1 && likely(len <= max_len)) {
2812 trace_event_setup(entry, type, trace_ctx);
2813 entry->array[0] = len;
2816 this_cpu_dec(trace_buffered_event_cnt);
2819 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2822 * If tracing is off, but we have triggers enabled
2823 * we still need to look at the event data. Use the temp_buffer
2824 * to store the trace event for the trigger to use. It's recursive
2825 * safe and will not be recorded anywhere.
2827 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2828 *current_rb = temp_buffer;
2829 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2834 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2836 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2837 static DEFINE_MUTEX(tracepoint_printk_mutex);
2839 static void output_printk(struct trace_event_buffer *fbuffer)
2841 struct trace_event_call *event_call;
2842 struct trace_event_file *file;
2843 struct trace_event *event;
2844 unsigned long flags;
2845 struct trace_iterator *iter = tracepoint_print_iter;
2847 /* We should never get here if iter is NULL */
2848 if (WARN_ON_ONCE(!iter))
2851 event_call = fbuffer->trace_file->event_call;
2852 if (!event_call || !event_call->event.funcs ||
2853 !event_call->event.funcs->trace)
2856 file = fbuffer->trace_file;
2857 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2858 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2859 !filter_match_preds(file->filter, fbuffer->entry)))
2862 event = &fbuffer->trace_file->event_call->event;
2864 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2865 trace_seq_init(&iter->seq);
2866 iter->ent = fbuffer->entry;
2867 event_call->event.funcs->trace(iter, 0, event);
2868 trace_seq_putc(&iter->seq, 0);
2869 printk("%s", iter->seq.buffer);
2871 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2874 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2875 void *buffer, size_t *lenp,
2878 int save_tracepoint_printk;
2881 mutex_lock(&tracepoint_printk_mutex);
2882 save_tracepoint_printk = tracepoint_printk;
2884 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2887 * This will force exiting early, as tracepoint_printk
2888 * is always zero when tracepoint_printk_iter is not allocated
2890 if (!tracepoint_print_iter)
2891 tracepoint_printk = 0;
2893 if (save_tracepoint_printk == tracepoint_printk)
2896 if (tracepoint_printk)
2897 static_key_enable(&tracepoint_printk_key.key);
2899 static_key_disable(&tracepoint_printk_key.key);
2902 mutex_unlock(&tracepoint_printk_mutex);
2907 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2909 enum event_trigger_type tt = ETT_NONE;
2910 struct trace_event_file *file = fbuffer->trace_file;
2912 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2913 fbuffer->entry, &tt))
2916 if (static_key_false(&tracepoint_printk_key.key))
2917 output_printk(fbuffer);
2919 if (static_branch_unlikely(&trace_event_exports_enabled))
2920 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2922 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2923 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2927 event_triggers_post_call(file, tt);
2930 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2935 * trace_buffer_unlock_commit_regs()
2936 * trace_event_buffer_commit()
2937 * trace_event_raw_event_xxx()
2939 # define STACK_SKIP 3
2941 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2942 struct trace_buffer *buffer,
2943 struct ring_buffer_event *event,
2944 unsigned int trace_ctx,
2945 struct pt_regs *regs)
2947 __buffer_unlock_commit(buffer, event);
2950 * If regs is not set, then skip the necessary functions.
2951 * Note, we can still get here via blktrace, wakeup tracer
2952 * and mmiotrace, but that's ok if they lose a function or
2953 * two. They are not that meaningful.
2955 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2956 ftrace_trace_userstack(tr, buffer, trace_ctx);
2960 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2963 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2964 struct ring_buffer_event *event)
2966 __buffer_unlock_commit(buffer, event);
2970 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2971 parent_ip, unsigned int trace_ctx)
2973 struct trace_event_call *call = &event_function;
2974 struct trace_buffer *buffer = tr->array_buffer.buffer;
2975 struct ring_buffer_event *event;
2976 struct ftrace_entry *entry;
2978 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2982 entry = ring_buffer_event_data(event);
2984 entry->parent_ip = parent_ip;
2986 if (!call_filter_check_discard(call, entry, buffer, event)) {
2987 if (static_branch_unlikely(&trace_function_exports_enabled))
2988 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2989 __buffer_unlock_commit(buffer, event);
2993 #ifdef CONFIG_STACKTRACE
2995 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2996 #define FTRACE_KSTACK_NESTING 4
2998 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
3000 struct ftrace_stack {
3001 unsigned long calls[FTRACE_KSTACK_ENTRIES];
3005 struct ftrace_stacks {
3006 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
3009 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
3010 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
3012 static void __ftrace_trace_stack(struct trace_buffer *buffer,
3013 unsigned int trace_ctx,
3014 int skip, struct pt_regs *regs)
3016 struct trace_event_call *call = &event_kernel_stack;
3017 struct ring_buffer_event *event;
3018 unsigned int size, nr_entries;
3019 struct ftrace_stack *fstack;
3020 struct stack_entry *entry;
3024 * Add one, for this function and the call to save_stack_trace()
3025 * If regs is set, then these functions will not be in the way.
3027 #ifndef CONFIG_UNWINDER_ORC
3032 preempt_disable_notrace();
3034 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3036 /* This should never happen. If it does, yell once and skip */
3037 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3041 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3042 * interrupt will either see the value pre increment or post
3043 * increment. If the interrupt happens pre increment it will have
3044 * restored the counter when it returns. We just need a barrier to
3045 * keep gcc from moving things around.
3049 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3050 size = ARRAY_SIZE(fstack->calls);
3053 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3056 nr_entries = stack_trace_save(fstack->calls, size, skip);
3059 size = nr_entries * sizeof(unsigned long);
3060 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3061 (sizeof(*entry) - sizeof(entry->caller)) + size,
3065 entry = ring_buffer_event_data(event);
3067 memcpy(&entry->caller, fstack->calls, size);
3068 entry->size = nr_entries;
3070 if (!call_filter_check_discard(call, entry, buffer, event))
3071 __buffer_unlock_commit(buffer, event);
3074 /* Again, don't let gcc optimize things here */
3076 __this_cpu_dec(ftrace_stack_reserve);
3077 preempt_enable_notrace();
3081 static inline void ftrace_trace_stack(struct trace_array *tr,
3082 struct trace_buffer *buffer,
3083 unsigned int trace_ctx,
3084 int skip, struct pt_regs *regs)
3086 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3089 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3092 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3095 struct trace_buffer *buffer = tr->array_buffer.buffer;
3097 if (rcu_is_watching()) {
3098 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3103 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3104 * but if the above rcu_is_watching() failed, then the NMI
3105 * triggered someplace critical, and rcu_irq_enter() should
3106 * not be called from NMI.
3108 if (unlikely(in_nmi()))
3111 rcu_irq_enter_irqson();
3112 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3113 rcu_irq_exit_irqson();
3117 * trace_dump_stack - record a stack back trace in the trace buffer
3118 * @skip: Number of functions to skip (helper handlers)
3120 void trace_dump_stack(int skip)
3122 if (tracing_disabled || tracing_selftest_running)
3125 #ifndef CONFIG_UNWINDER_ORC
3126 /* Skip 1 to skip this function. */
3129 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3130 tracing_gen_ctx(), skip, NULL);
3132 EXPORT_SYMBOL_GPL(trace_dump_stack);
3134 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3135 static DEFINE_PER_CPU(int, user_stack_count);
3138 ftrace_trace_userstack(struct trace_array *tr,
3139 struct trace_buffer *buffer, unsigned int trace_ctx)
3141 struct trace_event_call *call = &event_user_stack;
3142 struct ring_buffer_event *event;
3143 struct userstack_entry *entry;
3145 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3149 * NMIs can not handle page faults, even with fix ups.
3150 * The save user stack can (and often does) fault.
3152 if (unlikely(in_nmi()))
3156 * prevent recursion, since the user stack tracing may
3157 * trigger other kernel events.
3160 if (__this_cpu_read(user_stack_count))
3163 __this_cpu_inc(user_stack_count);
3165 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3166 sizeof(*entry), trace_ctx);
3168 goto out_drop_count;
3169 entry = ring_buffer_event_data(event);
3171 entry->tgid = current->tgid;
3172 memset(&entry->caller, 0, sizeof(entry->caller));
3174 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3175 if (!call_filter_check_discard(call, entry, buffer, event))
3176 __buffer_unlock_commit(buffer, event);
3179 __this_cpu_dec(user_stack_count);
3183 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3184 static void ftrace_trace_userstack(struct trace_array *tr,
3185 struct trace_buffer *buffer,
3186 unsigned int trace_ctx)
3189 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3191 #endif /* CONFIG_STACKTRACE */
3194 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3195 unsigned long long delta)
3197 entry->bottom_delta_ts = delta & U32_MAX;
3198 entry->top_delta_ts = (delta >> 32);
3201 void trace_last_func_repeats(struct trace_array *tr,
3202 struct trace_func_repeats *last_info,
3203 unsigned int trace_ctx)
3205 struct trace_buffer *buffer = tr->array_buffer.buffer;
3206 struct func_repeats_entry *entry;
3207 struct ring_buffer_event *event;
3210 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3211 sizeof(*entry), trace_ctx);
3215 delta = ring_buffer_event_time_stamp(buffer, event) -
3216 last_info->ts_last_call;
3218 entry = ring_buffer_event_data(event);
3219 entry->ip = last_info->ip;
3220 entry->parent_ip = last_info->parent_ip;
3221 entry->count = last_info->count;
3222 func_repeats_set_delta_ts(entry, delta);
3224 __buffer_unlock_commit(buffer, event);
3227 /* created for use with alloc_percpu */
3228 struct trace_buffer_struct {
3230 char buffer[4][TRACE_BUF_SIZE];
3233 static struct trace_buffer_struct *trace_percpu_buffer;
3236 * This allows for lockless recording. If we're nested too deeply, then
3237 * this returns NULL.
3239 static char *get_trace_buf(void)
3241 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3243 if (!buffer || buffer->nesting >= 4)
3248 /* Interrupts must see nesting incremented before we use the buffer */
3250 return &buffer->buffer[buffer->nesting - 1][0];
3253 static void put_trace_buf(void)
3255 /* Don't let the decrement of nesting leak before this */
3257 this_cpu_dec(trace_percpu_buffer->nesting);
3260 static int alloc_percpu_trace_buffer(void)
3262 struct trace_buffer_struct *buffers;
3264 if (trace_percpu_buffer)
3267 buffers = alloc_percpu(struct trace_buffer_struct);
3268 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3271 trace_percpu_buffer = buffers;
3275 static int buffers_allocated;
3277 void trace_printk_init_buffers(void)
3279 if (buffers_allocated)
3282 if (alloc_percpu_trace_buffer())
3285 /* trace_printk() is for debug use only. Don't use it in production. */
3288 pr_warn("**********************************************************\n");
3289 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3291 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3293 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3294 pr_warn("** unsafe for production use. **\n");
3296 pr_warn("** If you see this message and you are not debugging **\n");
3297 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3299 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3300 pr_warn("**********************************************************\n");
3302 /* Expand the buffers to set size */
3303 tracing_update_buffers();
3305 buffers_allocated = 1;
3308 * trace_printk_init_buffers() can be called by modules.
3309 * If that happens, then we need to start cmdline recording
3310 * directly here. If the global_trace.buffer is already
3311 * allocated here, then this was called by module code.
3313 if (global_trace.array_buffer.buffer)
3314 tracing_start_cmdline_record();
3316 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3318 void trace_printk_start_comm(void)
3320 /* Start tracing comms if trace printk is set */
3321 if (!buffers_allocated)
3323 tracing_start_cmdline_record();
3326 static void trace_printk_start_stop_comm(int enabled)
3328 if (!buffers_allocated)
3332 tracing_start_cmdline_record();
3334 tracing_stop_cmdline_record();
3338 * trace_vbprintk - write binary msg to tracing buffer
3339 * @ip: The address of the caller
3340 * @fmt: The string format to write to the buffer
3341 * @args: Arguments for @fmt
3343 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3345 struct trace_event_call *call = &event_bprint;
3346 struct ring_buffer_event *event;
3347 struct trace_buffer *buffer;
3348 struct trace_array *tr = &global_trace;
3349 struct bprint_entry *entry;
3350 unsigned int trace_ctx;
3354 if (unlikely(tracing_selftest_running || tracing_disabled))
3357 /* Don't pollute graph traces with trace_vprintk internals */
3358 pause_graph_tracing();
3360 trace_ctx = tracing_gen_ctx();
3361 preempt_disable_notrace();
3363 tbuffer = get_trace_buf();
3369 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3371 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3374 size = sizeof(*entry) + sizeof(u32) * len;
3375 buffer = tr->array_buffer.buffer;
3376 ring_buffer_nest_start(buffer);
3377 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3381 entry = ring_buffer_event_data(event);
3385 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3386 if (!call_filter_check_discard(call, entry, buffer, event)) {
3387 __buffer_unlock_commit(buffer, event);
3388 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3392 ring_buffer_nest_end(buffer);
3397 preempt_enable_notrace();
3398 unpause_graph_tracing();
3402 EXPORT_SYMBOL_GPL(trace_vbprintk);
3406 __trace_array_vprintk(struct trace_buffer *buffer,
3407 unsigned long ip, const char *fmt, va_list args)
3409 struct trace_event_call *call = &event_print;
3410 struct ring_buffer_event *event;
3412 struct print_entry *entry;
3413 unsigned int trace_ctx;
3416 if (tracing_disabled || tracing_selftest_running)
3419 /* Don't pollute graph traces with trace_vprintk internals */
3420 pause_graph_tracing();
3422 trace_ctx = tracing_gen_ctx();
3423 preempt_disable_notrace();
3426 tbuffer = get_trace_buf();
3432 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3434 size = sizeof(*entry) + len + 1;
3435 ring_buffer_nest_start(buffer);
3436 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3440 entry = ring_buffer_event_data(event);
3443 memcpy(&entry->buf, tbuffer, len + 1);
3444 if (!call_filter_check_discard(call, entry, buffer, event)) {
3445 __buffer_unlock_commit(buffer, event);
3446 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3450 ring_buffer_nest_end(buffer);
3454 preempt_enable_notrace();
3455 unpause_graph_tracing();
3461 int trace_array_vprintk(struct trace_array *tr,
3462 unsigned long ip, const char *fmt, va_list args)
3464 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3468 * trace_array_printk - Print a message to a specific instance
3469 * @tr: The instance trace_array descriptor
3470 * @ip: The instruction pointer that this is called from.
3471 * @fmt: The format to print (printf format)
3473 * If a subsystem sets up its own instance, they have the right to
3474 * printk strings into their tracing instance buffer using this
3475 * function. Note, this function will not write into the top level
3476 * buffer (use trace_printk() for that), as writing into the top level
3477 * buffer should only have events that can be individually disabled.
3478 * trace_printk() is only used for debugging a kernel, and should not
3479 * be ever incorporated in normal use.
3481 * trace_array_printk() can be used, as it will not add noise to the
3482 * top level tracing buffer.
3484 * Note, trace_array_init_printk() must be called on @tr before this
3488 int trace_array_printk(struct trace_array *tr,
3489 unsigned long ip, const char *fmt, ...)
3497 /* This is only allowed for created instances */
3498 if (tr == &global_trace)
3501 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3505 ret = trace_array_vprintk(tr, ip, fmt, ap);
3509 EXPORT_SYMBOL_GPL(trace_array_printk);
3512 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3513 * @tr: The trace array to initialize the buffers for
3515 * As trace_array_printk() only writes into instances, they are OK to
3516 * have in the kernel (unlike trace_printk()). This needs to be called
3517 * before trace_array_printk() can be used on a trace_array.
3519 int trace_array_init_printk(struct trace_array *tr)
3524 /* This is only allowed for created instances */
3525 if (tr == &global_trace)
3528 return alloc_percpu_trace_buffer();
3530 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3533 int trace_array_printk_buf(struct trace_buffer *buffer,
3534 unsigned long ip, const char *fmt, ...)
3539 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3543 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3549 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3551 return trace_array_vprintk(&global_trace, ip, fmt, args);
3553 EXPORT_SYMBOL_GPL(trace_vprintk);
3555 static void trace_iterator_increment(struct trace_iterator *iter)
3557 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3561 ring_buffer_iter_advance(buf_iter);
3564 static struct trace_entry *
3565 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3566 unsigned long *lost_events)
3568 struct ring_buffer_event *event;
3569 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3572 event = ring_buffer_iter_peek(buf_iter, ts);
3574 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3575 (unsigned long)-1 : 0;
3577 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3582 iter->ent_size = ring_buffer_event_length(event);
3583 return ring_buffer_event_data(event);
3589 static struct trace_entry *
3590 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3591 unsigned long *missing_events, u64 *ent_ts)
3593 struct trace_buffer *buffer = iter->array_buffer->buffer;
3594 struct trace_entry *ent, *next = NULL;
3595 unsigned long lost_events = 0, next_lost = 0;
3596 int cpu_file = iter->cpu_file;
3597 u64 next_ts = 0, ts;
3603 * If we are in a per_cpu trace file, don't bother by iterating over
3604 * all cpu and peek directly.
3606 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3607 if (ring_buffer_empty_cpu(buffer, cpu_file))
3609 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3611 *ent_cpu = cpu_file;
3616 for_each_tracing_cpu(cpu) {
3618 if (ring_buffer_empty_cpu(buffer, cpu))
3621 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3624 * Pick the entry with the smallest timestamp:
3626 if (ent && (!next || ts < next_ts)) {
3630 next_lost = lost_events;
3631 next_size = iter->ent_size;
3635 iter->ent_size = next_size;
3638 *ent_cpu = next_cpu;
3644 *missing_events = next_lost;
3649 #define STATIC_FMT_BUF_SIZE 128
3650 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3652 static char *trace_iter_expand_format(struct trace_iterator *iter)
3657 * iter->tr is NULL when used with tp_printk, which makes
3658 * this get called where it is not safe to call krealloc().
3660 if (!iter->tr || iter->fmt == static_fmt_buf)
3663 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3666 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3673 /* Returns true if the string is safe to dereference from an event */
3674 static bool trace_safe_str(struct trace_iterator *iter, const char *str)
3676 unsigned long addr = (unsigned long)str;
3677 struct trace_event *trace_event;
3678 struct trace_event_call *event;
3680 /* OK if part of the event data */
3681 if ((addr >= (unsigned long)iter->ent) &&
3682 (addr < (unsigned long)iter->ent + iter->ent_size))
3685 /* OK if part of the temp seq buffer */
3686 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3687 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3690 /* Core rodata can not be freed */
3691 if (is_kernel_rodata(addr))
3694 if (trace_is_tracepoint_string(str))
3698 * Now this could be a module event, referencing core module
3699 * data, which is OK.
3704 trace_event = ftrace_find_event(iter->ent->type);
3708 event = container_of(trace_event, struct trace_event_call, event);
3709 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3712 /* Would rather have rodata, but this will suffice */
3713 if (within_module_core(addr, event->module))
3719 static const char *show_buffer(struct trace_seq *s)
3721 struct seq_buf *seq = &s->seq;
3723 seq_buf_terminate(seq);
3728 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3730 static int test_can_verify_check(const char *fmt, ...)
3737 * The verifier is dependent on vsnprintf() modifies the va_list
3738 * passed to it, where it is sent as a reference. Some architectures
3739 * (like x86_32) passes it by value, which means that vsnprintf()
3740 * does not modify the va_list passed to it, and the verifier
3741 * would then need to be able to understand all the values that
3742 * vsnprintf can use. If it is passed by value, then the verifier
3746 vsnprintf(buf, 16, "%d", ap);
3747 ret = va_arg(ap, int);
3753 static void test_can_verify(void)
3755 if (!test_can_verify_check("%d %d", 0, 1)) {
3756 pr_info("trace event string verifier disabled\n");
3757 static_branch_inc(&trace_no_verify);
3762 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3763 * @iter: The iterator that holds the seq buffer and the event being printed
3764 * @fmt: The format used to print the event
3765 * @ap: The va_list holding the data to print from @fmt.
3767 * This writes the data into the @iter->seq buffer using the data from
3768 * @fmt and @ap. If the format has a %s, then the source of the string
3769 * is examined to make sure it is safe to print, otherwise it will
3770 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3773 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3776 const char *p = fmt;
3780 if (WARN_ON_ONCE(!fmt))
3783 if (static_branch_unlikely(&trace_no_verify))
3786 /* Don't bother checking when doing a ftrace_dump() */
3787 if (iter->fmt == static_fmt_buf)
3796 /* We only care about %s and variants */
3797 for (i = 0; p[i]; i++) {
3798 if (i + 1 >= iter->fmt_size) {
3800 * If we can't expand the copy buffer,
3803 if (!trace_iter_expand_format(iter))
3807 if (p[i] == '\\' && p[i+1]) {
3812 /* Need to test cases like %08.*s */
3813 for (j = 1; p[i+j]; j++) {
3814 if (isdigit(p[i+j]) ||
3817 if (p[i+j] == '*') {
3829 /* If no %s found then just print normally */
3833 /* Copy up to the %s, and print that */
3834 strncpy(iter->fmt, p, i);
3835 iter->fmt[i] = '\0';
3836 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3839 len = va_arg(ap, int);
3841 /* The ap now points to the string data of the %s */
3842 str = va_arg(ap, const char *);
3845 * If you hit this warning, it is likely that the
3846 * trace event in question used %s on a string that
3847 * was saved at the time of the event, but may not be
3848 * around when the trace is read. Use __string(),
3849 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3850 * instead. See samples/trace_events/trace-events-sample.h
3853 if (WARN_ONCE(!trace_safe_str(iter, str),
3854 "fmt: '%s' current_buffer: '%s'",
3855 fmt, show_buffer(&iter->seq))) {
3858 /* Try to safely read the string */
3860 if (len + 1 > iter->fmt_size)
3861 len = iter->fmt_size - 1;
3864 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3868 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3872 trace_seq_printf(&iter->seq, "(0x%px)", str);
3874 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3876 str = "[UNSAFE-MEMORY]";
3877 strcpy(iter->fmt, "%s");
3879 strncpy(iter->fmt, p + i, j + 1);
3880 iter->fmt[j+1] = '\0';
3883 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3885 trace_seq_printf(&iter->seq, iter->fmt, str);
3891 trace_seq_vprintf(&iter->seq, p, ap);
3894 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3896 const char *p, *new_fmt;
3899 if (WARN_ON_ONCE(!fmt))
3902 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3906 new_fmt = q = iter->fmt;
3908 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3909 if (!trace_iter_expand_format(iter))
3912 q += iter->fmt - new_fmt;
3913 new_fmt = iter->fmt;
3918 /* Replace %p with %px */
3922 } else if (p[0] == 'p' && !isalnum(p[1])) {
3933 #define STATIC_TEMP_BUF_SIZE 128
3934 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3936 /* Find the next real entry, without updating the iterator itself */
3937 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3938 int *ent_cpu, u64 *ent_ts)
3940 /* __find_next_entry will reset ent_size */
3941 int ent_size = iter->ent_size;
3942 struct trace_entry *entry;
3945 * If called from ftrace_dump(), then the iter->temp buffer
3946 * will be the static_temp_buf and not created from kmalloc.
3947 * If the entry size is greater than the buffer, we can
3948 * not save it. Just return NULL in that case. This is only
3949 * used to add markers when two consecutive events' time
3950 * stamps have a large delta. See trace_print_lat_context()
3952 if (iter->temp == static_temp_buf &&
3953 STATIC_TEMP_BUF_SIZE < ent_size)
3957 * The __find_next_entry() may call peek_next_entry(), which may
3958 * call ring_buffer_peek() that may make the contents of iter->ent
3959 * undefined. Need to copy iter->ent now.
3961 if (iter->ent && iter->ent != iter->temp) {
3962 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3963 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3965 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3970 iter->temp_size = iter->ent_size;
3972 memcpy(iter->temp, iter->ent, iter->ent_size);
3973 iter->ent = iter->temp;
3975 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3976 /* Put back the original ent_size */
3977 iter->ent_size = ent_size;
3982 /* Find the next real entry, and increment the iterator to the next entry */
3983 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3985 iter->ent = __find_next_entry(iter, &iter->cpu,
3986 &iter->lost_events, &iter->ts);
3989 trace_iterator_increment(iter);
3991 return iter->ent ? iter : NULL;
3994 static void trace_consume(struct trace_iterator *iter)
3996 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3997 &iter->lost_events);
4000 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
4002 struct trace_iterator *iter = m->private;
4006 WARN_ON_ONCE(iter->leftover);
4010 /* can't go backwards */
4015 ent = trace_find_next_entry_inc(iter);
4019 while (ent && iter->idx < i)
4020 ent = trace_find_next_entry_inc(iter);
4027 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4029 struct ring_buffer_iter *buf_iter;
4030 unsigned long entries = 0;
4033 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4035 buf_iter = trace_buffer_iter(iter, cpu);
4039 ring_buffer_iter_reset(buf_iter);
4042 * We could have the case with the max latency tracers
4043 * that a reset never took place on a cpu. This is evident
4044 * by the timestamp being before the start of the buffer.
4046 while (ring_buffer_iter_peek(buf_iter, &ts)) {
4047 if (ts >= iter->array_buffer->time_start)
4050 ring_buffer_iter_advance(buf_iter);
4053 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4057 * The current tracer is copied to avoid a global locking
4060 static void *s_start(struct seq_file *m, loff_t *pos)
4062 struct trace_iterator *iter = m->private;
4063 struct trace_array *tr = iter->tr;
4064 int cpu_file = iter->cpu_file;
4070 * copy the tracer to avoid using a global lock all around.
4071 * iter->trace is a copy of current_trace, the pointer to the
4072 * name may be used instead of a strcmp(), as iter->trace->name
4073 * will point to the same string as current_trace->name.
4075 mutex_lock(&trace_types_lock);
4076 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
4077 *iter->trace = *tr->current_trace;
4078 mutex_unlock(&trace_types_lock);
4080 #ifdef CONFIG_TRACER_MAX_TRACE
4081 if (iter->snapshot && iter->trace->use_max_tr)
4082 return ERR_PTR(-EBUSY);
4085 if (*pos != iter->pos) {
4090 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4091 for_each_tracing_cpu(cpu)
4092 tracing_iter_reset(iter, cpu);
4094 tracing_iter_reset(iter, cpu_file);
4097 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4102 * If we overflowed the seq_file before, then we want
4103 * to just reuse the trace_seq buffer again.
4109 p = s_next(m, p, &l);
4113 trace_event_read_lock();
4114 trace_access_lock(cpu_file);
4118 static void s_stop(struct seq_file *m, void *p)
4120 struct trace_iterator *iter = m->private;
4122 #ifdef CONFIG_TRACER_MAX_TRACE
4123 if (iter->snapshot && iter->trace->use_max_tr)
4127 trace_access_unlock(iter->cpu_file);
4128 trace_event_read_unlock();
4132 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4133 unsigned long *entries, int cpu)
4135 unsigned long count;
4137 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4139 * If this buffer has skipped entries, then we hold all
4140 * entries for the trace and we need to ignore the
4141 * ones before the time stamp.
4143 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4144 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4145 /* total is the same as the entries */
4149 ring_buffer_overrun_cpu(buf->buffer, cpu);
4154 get_total_entries(struct array_buffer *buf,
4155 unsigned long *total, unsigned long *entries)
4163 for_each_tracing_cpu(cpu) {
4164 get_total_entries_cpu(buf, &t, &e, cpu);
4170 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4172 unsigned long total, entries;
4177 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4182 unsigned long trace_total_entries(struct trace_array *tr)
4184 unsigned long total, entries;
4189 get_total_entries(&tr->array_buffer, &total, &entries);
4194 static void print_lat_help_header(struct seq_file *m)
4196 seq_puts(m, "# _------=> CPU# \n"
4197 "# / _-----=> irqs-off \n"
4198 "# | / _----=> need-resched \n"
4199 "# || / _---=> hardirq/softirq \n"
4200 "# ||| / _--=> preempt-depth \n"
4201 "# |||| / _-=> migrate-disable \n"
4202 "# ||||| / delay \n"
4203 "# cmd pid |||||| time | caller \n"
4204 "# \\ / |||||| \\ | / \n");
4207 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4209 unsigned long total;
4210 unsigned long entries;
4212 get_total_entries(buf, &total, &entries);
4213 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4214 entries, total, num_online_cpus());
4218 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4221 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4223 print_event_info(buf, m);
4225 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4226 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4229 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4232 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4233 const char *space = " ";
4234 int prec = tgid ? 12 : 2;
4236 print_event_info(buf, m);
4238 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
4239 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4240 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4241 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4242 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4243 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4244 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4245 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
4249 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4251 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4252 struct array_buffer *buf = iter->array_buffer;
4253 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4254 struct tracer *type = iter->trace;
4255 unsigned long entries;
4256 unsigned long total;
4257 const char *name = "preemption";
4261 get_total_entries(buf, &total, &entries);
4263 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4265 seq_puts(m, "# -----------------------------------"
4266 "---------------------------------\n");
4267 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4268 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4269 nsecs_to_usecs(data->saved_latency),
4273 #if defined(CONFIG_PREEMPT_NONE)
4275 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
4277 #elif defined(CONFIG_PREEMPT)
4279 #elif defined(CONFIG_PREEMPT_RT)
4284 /* These are reserved for later use */
4287 seq_printf(m, " #P:%d)\n", num_online_cpus());
4291 seq_puts(m, "# -----------------\n");
4292 seq_printf(m, "# | task: %.16s-%d "
4293 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4294 data->comm, data->pid,
4295 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4296 data->policy, data->rt_priority);
4297 seq_puts(m, "# -----------------\n");
4299 if (data->critical_start) {
4300 seq_puts(m, "# => started at: ");
4301 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4302 trace_print_seq(m, &iter->seq);
4303 seq_puts(m, "\n# => ended at: ");
4304 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4305 trace_print_seq(m, &iter->seq);
4306 seq_puts(m, "\n#\n");
4312 static void test_cpu_buff_start(struct trace_iterator *iter)
4314 struct trace_seq *s = &iter->seq;
4315 struct trace_array *tr = iter->tr;
4317 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4320 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4323 if (cpumask_available(iter->started) &&
4324 cpumask_test_cpu(iter->cpu, iter->started))
4327 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4330 if (cpumask_available(iter->started))
4331 cpumask_set_cpu(iter->cpu, iter->started);
4333 /* Don't print started cpu buffer for the first entry of the trace */
4335 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4339 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4341 struct trace_array *tr = iter->tr;
4342 struct trace_seq *s = &iter->seq;
4343 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4344 struct trace_entry *entry;
4345 struct trace_event *event;
4349 test_cpu_buff_start(iter);
4351 event = ftrace_find_event(entry->type);
4353 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4354 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4355 trace_print_lat_context(iter);
4357 trace_print_context(iter);
4360 if (trace_seq_has_overflowed(s))
4361 return TRACE_TYPE_PARTIAL_LINE;
4364 return event->funcs->trace(iter, sym_flags, event);
4366 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4368 return trace_handle_return(s);
4371 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4373 struct trace_array *tr = iter->tr;
4374 struct trace_seq *s = &iter->seq;
4375 struct trace_entry *entry;
4376 struct trace_event *event;
4380 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4381 trace_seq_printf(s, "%d %d %llu ",
4382 entry->pid, iter->cpu, iter->ts);
4384 if (trace_seq_has_overflowed(s))
4385 return TRACE_TYPE_PARTIAL_LINE;
4387 event = ftrace_find_event(entry->type);
4389 return event->funcs->raw(iter, 0, event);
4391 trace_seq_printf(s, "%d ?\n", entry->type);
4393 return trace_handle_return(s);
4396 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4398 struct trace_array *tr = iter->tr;
4399 struct trace_seq *s = &iter->seq;
4400 unsigned char newline = '\n';
4401 struct trace_entry *entry;
4402 struct trace_event *event;
4406 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4407 SEQ_PUT_HEX_FIELD(s, entry->pid);
4408 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4409 SEQ_PUT_HEX_FIELD(s, iter->ts);
4410 if (trace_seq_has_overflowed(s))
4411 return TRACE_TYPE_PARTIAL_LINE;
4414 event = ftrace_find_event(entry->type);
4416 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4417 if (ret != TRACE_TYPE_HANDLED)
4421 SEQ_PUT_FIELD(s, newline);
4423 return trace_handle_return(s);
4426 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4428 struct trace_array *tr = iter->tr;
4429 struct trace_seq *s = &iter->seq;
4430 struct trace_entry *entry;
4431 struct trace_event *event;
4435 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4436 SEQ_PUT_FIELD(s, entry->pid);
4437 SEQ_PUT_FIELD(s, iter->cpu);
4438 SEQ_PUT_FIELD(s, iter->ts);
4439 if (trace_seq_has_overflowed(s))
4440 return TRACE_TYPE_PARTIAL_LINE;
4443 event = ftrace_find_event(entry->type);
4444 return event ? event->funcs->binary(iter, 0, event) :
4448 int trace_empty(struct trace_iterator *iter)
4450 struct ring_buffer_iter *buf_iter;
4453 /* If we are looking at one CPU buffer, only check that one */
4454 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4455 cpu = iter->cpu_file;
4456 buf_iter = trace_buffer_iter(iter, cpu);
4458 if (!ring_buffer_iter_empty(buf_iter))
4461 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4467 for_each_tracing_cpu(cpu) {
4468 buf_iter = trace_buffer_iter(iter, cpu);
4470 if (!ring_buffer_iter_empty(buf_iter))
4473 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4481 /* Called with trace_event_read_lock() held. */
4482 enum print_line_t print_trace_line(struct trace_iterator *iter)
4484 struct trace_array *tr = iter->tr;
4485 unsigned long trace_flags = tr->trace_flags;
4486 enum print_line_t ret;
4488 if (iter->lost_events) {
4489 if (iter->lost_events == (unsigned long)-1)
4490 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4493 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4494 iter->cpu, iter->lost_events);
4495 if (trace_seq_has_overflowed(&iter->seq))
4496 return TRACE_TYPE_PARTIAL_LINE;
4499 if (iter->trace && iter->trace->print_line) {
4500 ret = iter->trace->print_line(iter);
4501 if (ret != TRACE_TYPE_UNHANDLED)
4505 if (iter->ent->type == TRACE_BPUTS &&
4506 trace_flags & TRACE_ITER_PRINTK &&
4507 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4508 return trace_print_bputs_msg_only(iter);
4510 if (iter->ent->type == TRACE_BPRINT &&
4511 trace_flags & TRACE_ITER_PRINTK &&
4512 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4513 return trace_print_bprintk_msg_only(iter);
4515 if (iter->ent->type == TRACE_PRINT &&
4516 trace_flags & TRACE_ITER_PRINTK &&
4517 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4518 return trace_print_printk_msg_only(iter);
4520 if (trace_flags & TRACE_ITER_BIN)
4521 return print_bin_fmt(iter);
4523 if (trace_flags & TRACE_ITER_HEX)
4524 return print_hex_fmt(iter);
4526 if (trace_flags & TRACE_ITER_RAW)
4527 return print_raw_fmt(iter);
4529 return print_trace_fmt(iter);
4532 void trace_latency_header(struct seq_file *m)
4534 struct trace_iterator *iter = m->private;
4535 struct trace_array *tr = iter->tr;
4537 /* print nothing if the buffers are empty */
4538 if (trace_empty(iter))
4541 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4542 print_trace_header(m, iter);
4544 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4545 print_lat_help_header(m);
4548 void trace_default_header(struct seq_file *m)
4550 struct trace_iterator *iter = m->private;
4551 struct trace_array *tr = iter->tr;
4552 unsigned long trace_flags = tr->trace_flags;
4554 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4557 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4558 /* print nothing if the buffers are empty */
4559 if (trace_empty(iter))
4561 print_trace_header(m, iter);
4562 if (!(trace_flags & TRACE_ITER_VERBOSE))
4563 print_lat_help_header(m);
4565 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4566 if (trace_flags & TRACE_ITER_IRQ_INFO)
4567 print_func_help_header_irq(iter->array_buffer,
4570 print_func_help_header(iter->array_buffer, m,
4576 static void test_ftrace_alive(struct seq_file *m)
4578 if (!ftrace_is_dead())
4580 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4581 "# MAY BE MISSING FUNCTION EVENTS\n");
4584 #ifdef CONFIG_TRACER_MAX_TRACE
4585 static void show_snapshot_main_help(struct seq_file *m)
4587 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4588 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4589 "# Takes a snapshot of the main buffer.\n"
4590 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4591 "# (Doesn't have to be '2' works with any number that\n"
4592 "# is not a '0' or '1')\n");
4595 static void show_snapshot_percpu_help(struct seq_file *m)
4597 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4598 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4599 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4600 "# Takes a snapshot of the main buffer for this cpu.\n");
4602 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4603 "# Must use main snapshot file to allocate.\n");
4605 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4606 "# (Doesn't have to be '2' works with any number that\n"
4607 "# is not a '0' or '1')\n");
4610 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4612 if (iter->tr->allocated_snapshot)
4613 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4615 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4617 seq_puts(m, "# Snapshot commands:\n");
4618 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4619 show_snapshot_main_help(m);
4621 show_snapshot_percpu_help(m);
4624 /* Should never be called */
4625 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4628 static int s_show(struct seq_file *m, void *v)
4630 struct trace_iterator *iter = v;
4633 if (iter->ent == NULL) {
4635 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4637 test_ftrace_alive(m);
4639 if (iter->snapshot && trace_empty(iter))
4640 print_snapshot_help(m, iter);
4641 else if (iter->trace && iter->trace->print_header)
4642 iter->trace->print_header(m);
4644 trace_default_header(m);
4646 } else if (iter->leftover) {
4648 * If we filled the seq_file buffer earlier, we
4649 * want to just show it now.
4651 ret = trace_print_seq(m, &iter->seq);
4653 /* ret should this time be zero, but you never know */
4654 iter->leftover = ret;
4657 print_trace_line(iter);
4658 ret = trace_print_seq(m, &iter->seq);
4660 * If we overflow the seq_file buffer, then it will
4661 * ask us for this data again at start up.
4663 * ret is 0 if seq_file write succeeded.
4666 iter->leftover = ret;
4673 * Should be used after trace_array_get(), trace_types_lock
4674 * ensures that i_cdev was already initialized.
4676 static inline int tracing_get_cpu(struct inode *inode)
4678 if (inode->i_cdev) /* See trace_create_cpu_file() */
4679 return (long)inode->i_cdev - 1;
4680 return RING_BUFFER_ALL_CPUS;
4683 static const struct seq_operations tracer_seq_ops = {
4690 static struct trace_iterator *
4691 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4693 struct trace_array *tr = inode->i_private;
4694 struct trace_iterator *iter;
4697 if (tracing_disabled)
4698 return ERR_PTR(-ENODEV);
4700 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4702 return ERR_PTR(-ENOMEM);
4704 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4706 if (!iter->buffer_iter)
4710 * trace_find_next_entry() may need to save off iter->ent.
4711 * It will place it into the iter->temp buffer. As most
4712 * events are less than 128, allocate a buffer of that size.
4713 * If one is greater, then trace_find_next_entry() will
4714 * allocate a new buffer to adjust for the bigger iter->ent.
4715 * It's not critical if it fails to get allocated here.
4717 iter->temp = kmalloc(128, GFP_KERNEL);
4719 iter->temp_size = 128;
4722 * trace_event_printf() may need to modify given format
4723 * string to replace %p with %px so that it shows real address
4724 * instead of hash value. However, that is only for the event
4725 * tracing, other tracer may not need. Defer the allocation
4726 * until it is needed.
4732 * We make a copy of the current tracer to avoid concurrent
4733 * changes on it while we are reading.
4735 mutex_lock(&trace_types_lock);
4736 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4740 *iter->trace = *tr->current_trace;
4742 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4747 #ifdef CONFIG_TRACER_MAX_TRACE
4748 /* Currently only the top directory has a snapshot */
4749 if (tr->current_trace->print_max || snapshot)
4750 iter->array_buffer = &tr->max_buffer;
4753 iter->array_buffer = &tr->array_buffer;
4754 iter->snapshot = snapshot;
4756 iter->cpu_file = tracing_get_cpu(inode);
4757 mutex_init(&iter->mutex);
4759 /* Notify the tracer early; before we stop tracing. */
4760 if (iter->trace->open)
4761 iter->trace->open(iter);
4763 /* Annotate start of buffers if we had overruns */
4764 if (ring_buffer_overruns(iter->array_buffer->buffer))
4765 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4767 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4768 if (trace_clocks[tr->clock_id].in_ns)
4769 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4772 * If pause-on-trace is enabled, then stop the trace while
4773 * dumping, unless this is the "snapshot" file
4775 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4776 tracing_stop_tr(tr);
4778 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4779 for_each_tracing_cpu(cpu) {
4780 iter->buffer_iter[cpu] =
4781 ring_buffer_read_prepare(iter->array_buffer->buffer,
4784 ring_buffer_read_prepare_sync();
4785 for_each_tracing_cpu(cpu) {
4786 ring_buffer_read_start(iter->buffer_iter[cpu]);
4787 tracing_iter_reset(iter, cpu);
4790 cpu = iter->cpu_file;
4791 iter->buffer_iter[cpu] =
4792 ring_buffer_read_prepare(iter->array_buffer->buffer,
4794 ring_buffer_read_prepare_sync();
4795 ring_buffer_read_start(iter->buffer_iter[cpu]);
4796 tracing_iter_reset(iter, cpu);
4799 mutex_unlock(&trace_types_lock);
4804 mutex_unlock(&trace_types_lock);
4807 kfree(iter->buffer_iter);
4809 seq_release_private(inode, file);
4810 return ERR_PTR(-ENOMEM);
4813 int tracing_open_generic(struct inode *inode, struct file *filp)
4817 ret = tracing_check_open_get_tr(NULL);
4821 filp->private_data = inode->i_private;
4825 bool tracing_is_disabled(void)
4827 return (tracing_disabled) ? true: false;
4831 * Open and update trace_array ref count.
4832 * Must have the current trace_array passed to it.
4834 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4836 struct trace_array *tr = inode->i_private;
4839 ret = tracing_check_open_get_tr(tr);
4843 filp->private_data = inode->i_private;
4848 static int tracing_release(struct inode *inode, struct file *file)
4850 struct trace_array *tr = inode->i_private;
4851 struct seq_file *m = file->private_data;
4852 struct trace_iterator *iter;
4855 if (!(file->f_mode & FMODE_READ)) {
4856 trace_array_put(tr);
4860 /* Writes do not use seq_file */
4862 mutex_lock(&trace_types_lock);
4864 for_each_tracing_cpu(cpu) {
4865 if (iter->buffer_iter[cpu])
4866 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4869 if (iter->trace && iter->trace->close)
4870 iter->trace->close(iter);
4872 if (!iter->snapshot && tr->stop_count)
4873 /* reenable tracing if it was previously enabled */
4874 tracing_start_tr(tr);
4876 __trace_array_put(tr);
4878 mutex_unlock(&trace_types_lock);
4880 mutex_destroy(&iter->mutex);
4881 free_cpumask_var(iter->started);
4885 kfree(iter->buffer_iter);
4886 seq_release_private(inode, file);
4891 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4893 struct trace_array *tr = inode->i_private;
4895 trace_array_put(tr);
4899 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4901 struct trace_array *tr = inode->i_private;
4903 trace_array_put(tr);
4905 return single_release(inode, file);
4908 static int tracing_open(struct inode *inode, struct file *file)
4910 struct trace_array *tr = inode->i_private;
4911 struct trace_iterator *iter;
4914 ret = tracing_check_open_get_tr(tr);
4918 /* If this file was open for write, then erase contents */
4919 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4920 int cpu = tracing_get_cpu(inode);
4921 struct array_buffer *trace_buf = &tr->array_buffer;
4923 #ifdef CONFIG_TRACER_MAX_TRACE
4924 if (tr->current_trace->print_max)
4925 trace_buf = &tr->max_buffer;
4928 if (cpu == RING_BUFFER_ALL_CPUS)
4929 tracing_reset_online_cpus(trace_buf);
4931 tracing_reset_cpu(trace_buf, cpu);
4934 if (file->f_mode & FMODE_READ) {
4935 iter = __tracing_open(inode, file, false);
4937 ret = PTR_ERR(iter);
4938 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4939 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4943 trace_array_put(tr);
4949 * Some tracers are not suitable for instance buffers.
4950 * A tracer is always available for the global array (toplevel)
4951 * or if it explicitly states that it is.
4954 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4956 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4959 /* Find the next tracer that this trace array may use */
4960 static struct tracer *
4961 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4963 while (t && !trace_ok_for_array(t, tr))
4970 t_next(struct seq_file *m, void *v, loff_t *pos)
4972 struct trace_array *tr = m->private;
4973 struct tracer *t = v;
4978 t = get_tracer_for_array(tr, t->next);
4983 static void *t_start(struct seq_file *m, loff_t *pos)
4985 struct trace_array *tr = m->private;
4989 mutex_lock(&trace_types_lock);
4991 t = get_tracer_for_array(tr, trace_types);
4992 for (; t && l < *pos; t = t_next(m, t, &l))
4998 static void t_stop(struct seq_file *m, void *p)
5000 mutex_unlock(&trace_types_lock);
5003 static int t_show(struct seq_file *m, void *v)
5005 struct tracer *t = v;
5010 seq_puts(m, t->name);
5019 static const struct seq_operations show_traces_seq_ops = {
5026 static int show_traces_open(struct inode *inode, struct file *file)
5028 struct trace_array *tr = inode->i_private;
5032 ret = tracing_check_open_get_tr(tr);
5036 ret = seq_open(file, &show_traces_seq_ops);
5038 trace_array_put(tr);
5042 m = file->private_data;
5048 static int show_traces_release(struct inode *inode, struct file *file)
5050 struct trace_array *tr = inode->i_private;
5052 trace_array_put(tr);
5053 return seq_release(inode, file);
5057 tracing_write_stub(struct file *filp, const char __user *ubuf,
5058 size_t count, loff_t *ppos)
5063 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5067 if (file->f_mode & FMODE_READ)
5068 ret = seq_lseek(file, offset, whence);
5070 file->f_pos = ret = 0;
5075 static const struct file_operations tracing_fops = {
5076 .open = tracing_open,
5078 .write = tracing_write_stub,
5079 .llseek = tracing_lseek,
5080 .release = tracing_release,
5083 static const struct file_operations show_traces_fops = {
5084 .open = show_traces_open,
5086 .llseek = seq_lseek,
5087 .release = show_traces_release,
5091 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5092 size_t count, loff_t *ppos)
5094 struct trace_array *tr = file_inode(filp)->i_private;
5098 len = snprintf(NULL, 0, "%*pb\n",
5099 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5100 mask_str = kmalloc(len, GFP_KERNEL);
5104 len = snprintf(mask_str, len, "%*pb\n",
5105 cpumask_pr_args(tr->tracing_cpumask));
5110 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5118 int tracing_set_cpumask(struct trace_array *tr,
5119 cpumask_var_t tracing_cpumask_new)
5126 local_irq_disable();
5127 arch_spin_lock(&tr->max_lock);
5128 for_each_tracing_cpu(cpu) {
5130 * Increase/decrease the disabled counter if we are
5131 * about to flip a bit in the cpumask:
5133 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5134 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5135 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5136 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5138 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5139 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5140 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5141 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5144 arch_spin_unlock(&tr->max_lock);
5147 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5153 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5154 size_t count, loff_t *ppos)
5156 struct trace_array *tr = file_inode(filp)->i_private;
5157 cpumask_var_t tracing_cpumask_new;
5160 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5163 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5167 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5171 free_cpumask_var(tracing_cpumask_new);
5176 free_cpumask_var(tracing_cpumask_new);
5181 static const struct file_operations tracing_cpumask_fops = {
5182 .open = tracing_open_generic_tr,
5183 .read = tracing_cpumask_read,
5184 .write = tracing_cpumask_write,
5185 .release = tracing_release_generic_tr,
5186 .llseek = generic_file_llseek,
5189 static int tracing_trace_options_show(struct seq_file *m, void *v)
5191 struct tracer_opt *trace_opts;
5192 struct trace_array *tr = m->private;
5196 mutex_lock(&trace_types_lock);
5197 tracer_flags = tr->current_trace->flags->val;
5198 trace_opts = tr->current_trace->flags->opts;
5200 for (i = 0; trace_options[i]; i++) {
5201 if (tr->trace_flags & (1 << i))
5202 seq_printf(m, "%s\n", trace_options[i]);
5204 seq_printf(m, "no%s\n", trace_options[i]);
5207 for (i = 0; trace_opts[i].name; i++) {
5208 if (tracer_flags & trace_opts[i].bit)
5209 seq_printf(m, "%s\n", trace_opts[i].name);
5211 seq_printf(m, "no%s\n", trace_opts[i].name);
5213 mutex_unlock(&trace_types_lock);
5218 static int __set_tracer_option(struct trace_array *tr,
5219 struct tracer_flags *tracer_flags,
5220 struct tracer_opt *opts, int neg)
5222 struct tracer *trace = tracer_flags->trace;
5225 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5230 tracer_flags->val &= ~opts->bit;
5232 tracer_flags->val |= opts->bit;
5236 /* Try to assign a tracer specific option */
5237 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5239 struct tracer *trace = tr->current_trace;
5240 struct tracer_flags *tracer_flags = trace->flags;
5241 struct tracer_opt *opts = NULL;
5244 for (i = 0; tracer_flags->opts[i].name; i++) {
5245 opts = &tracer_flags->opts[i];
5247 if (strcmp(cmp, opts->name) == 0)
5248 return __set_tracer_option(tr, trace->flags, opts, neg);
5254 /* Some tracers require overwrite to stay enabled */
5255 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5257 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5263 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5267 if ((mask == TRACE_ITER_RECORD_TGID) ||
5268 (mask == TRACE_ITER_RECORD_CMD))
5269 lockdep_assert_held(&event_mutex);
5271 /* do nothing if flag is already set */
5272 if (!!(tr->trace_flags & mask) == !!enabled)
5275 /* Give the tracer a chance to approve the change */
5276 if (tr->current_trace->flag_changed)
5277 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5281 tr->trace_flags |= mask;
5283 tr->trace_flags &= ~mask;
5285 if (mask == TRACE_ITER_RECORD_CMD)
5286 trace_event_enable_cmd_record(enabled);
5288 if (mask == TRACE_ITER_RECORD_TGID) {
5290 tgid_map_max = pid_max;
5291 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5295 * Pairs with smp_load_acquire() in
5296 * trace_find_tgid_ptr() to ensure that if it observes
5297 * the tgid_map we just allocated then it also observes
5298 * the corresponding tgid_map_max value.
5300 smp_store_release(&tgid_map, map);
5303 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5307 trace_event_enable_tgid_record(enabled);
5310 if (mask == TRACE_ITER_EVENT_FORK)
5311 trace_event_follow_fork(tr, enabled);
5313 if (mask == TRACE_ITER_FUNC_FORK)
5314 ftrace_pid_follow_fork(tr, enabled);
5316 if (mask == TRACE_ITER_OVERWRITE) {
5317 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5318 #ifdef CONFIG_TRACER_MAX_TRACE
5319 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5323 if (mask == TRACE_ITER_PRINTK) {
5324 trace_printk_start_stop_comm(enabled);
5325 trace_printk_control(enabled);
5331 int trace_set_options(struct trace_array *tr, char *option)
5336 size_t orig_len = strlen(option);
5339 cmp = strstrip(option);
5341 len = str_has_prefix(cmp, "no");
5347 mutex_lock(&event_mutex);
5348 mutex_lock(&trace_types_lock);
5350 ret = match_string(trace_options, -1, cmp);
5351 /* If no option could be set, test the specific tracer options */
5353 ret = set_tracer_option(tr, cmp, neg);
5355 ret = set_tracer_flag(tr, 1 << ret, !neg);
5357 mutex_unlock(&trace_types_lock);
5358 mutex_unlock(&event_mutex);
5361 * If the first trailing whitespace is replaced with '\0' by strstrip,
5362 * turn it back into a space.
5364 if (orig_len > strlen(option))
5365 option[strlen(option)] = ' ';
5370 static void __init apply_trace_boot_options(void)
5372 char *buf = trace_boot_options_buf;
5376 option = strsep(&buf, ",");
5382 trace_set_options(&global_trace, option);
5384 /* Put back the comma to allow this to be called again */
5391 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5392 size_t cnt, loff_t *ppos)
5394 struct seq_file *m = filp->private_data;
5395 struct trace_array *tr = m->private;
5399 if (cnt >= sizeof(buf))
5402 if (copy_from_user(buf, ubuf, cnt))
5407 ret = trace_set_options(tr, buf);
5416 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5418 struct trace_array *tr = inode->i_private;
5421 ret = tracing_check_open_get_tr(tr);
5425 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5427 trace_array_put(tr);
5432 static const struct file_operations tracing_iter_fops = {
5433 .open = tracing_trace_options_open,
5435 .llseek = seq_lseek,
5436 .release = tracing_single_release_tr,
5437 .write = tracing_trace_options_write,
5440 static const char readme_msg[] =
5441 "tracing mini-HOWTO:\n\n"
5442 "# echo 0 > tracing_on : quick way to disable tracing\n"
5443 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5444 " Important files:\n"
5445 " trace\t\t\t- The static contents of the buffer\n"
5446 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5447 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5448 " current_tracer\t- function and latency tracers\n"
5449 " available_tracers\t- list of configured tracers for current_tracer\n"
5450 " error_log\t- error log for failed commands (that support it)\n"
5451 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5452 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5453 " trace_clock\t\t-change the clock used to order events\n"
5454 " local: Per cpu clock but may not be synced across CPUs\n"
5455 " global: Synced across CPUs but slows tracing down.\n"
5456 " counter: Not a clock, but just an increment\n"
5457 " uptime: Jiffy counter from time of boot\n"
5458 " perf: Same clock that perf events use\n"
5459 #ifdef CONFIG_X86_64
5460 " x86-tsc: TSC cycle counter\n"
5462 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5463 " delta: Delta difference against a buffer-wide timestamp\n"
5464 " absolute: Absolute (standalone) timestamp\n"
5465 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5466 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5467 " tracing_cpumask\t- Limit which CPUs to trace\n"
5468 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5469 "\t\t\t Remove sub-buffer with rmdir\n"
5470 " trace_options\t\t- Set format or modify how tracing happens\n"
5471 "\t\t\t Disable an option by prefixing 'no' to the\n"
5472 "\t\t\t option name\n"
5473 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5474 #ifdef CONFIG_DYNAMIC_FTRACE
5475 "\n available_filter_functions - list of functions that can be filtered on\n"
5476 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5477 "\t\t\t functions\n"
5478 "\t accepts: func_full_name or glob-matching-pattern\n"
5479 "\t modules: Can select a group via module\n"
5480 "\t Format: :mod:<module-name>\n"
5481 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5482 "\t triggers: a command to perform when function is hit\n"
5483 "\t Format: <function>:<trigger>[:count]\n"
5484 "\t trigger: traceon, traceoff\n"
5485 "\t\t enable_event:<system>:<event>\n"
5486 "\t\t disable_event:<system>:<event>\n"
5487 #ifdef CONFIG_STACKTRACE
5490 #ifdef CONFIG_TRACER_SNAPSHOT
5495 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5496 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5497 "\t The first one will disable tracing every time do_fault is hit\n"
5498 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5499 "\t The first time do trap is hit and it disables tracing, the\n"
5500 "\t counter will decrement to 2. If tracing is already disabled,\n"
5501 "\t the counter will not decrement. It only decrements when the\n"
5502 "\t trigger did work\n"
5503 "\t To remove trigger without count:\n"
5504 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5505 "\t To remove trigger with a count:\n"
5506 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5507 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5508 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5509 "\t modules: Can select a group via module command :mod:\n"
5510 "\t Does not accept triggers\n"
5511 #endif /* CONFIG_DYNAMIC_FTRACE */
5512 #ifdef CONFIG_FUNCTION_TRACER
5513 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5515 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5518 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5519 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5520 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5521 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5523 #ifdef CONFIG_TRACER_SNAPSHOT
5524 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5525 "\t\t\t snapshot buffer. Read the contents for more\n"
5526 "\t\t\t information\n"
5528 #ifdef CONFIG_STACK_TRACER
5529 " stack_trace\t\t- Shows the max stack trace when active\n"
5530 " stack_max_size\t- Shows current max stack size that was traced\n"
5531 "\t\t\t Write into this file to reset the max size (trigger a\n"
5532 "\t\t\t new trace)\n"
5533 #ifdef CONFIG_DYNAMIC_FTRACE
5534 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5537 #endif /* CONFIG_STACK_TRACER */
5538 #ifdef CONFIG_DYNAMIC_EVENTS
5539 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5540 "\t\t\t Write into this file to define/undefine new trace events.\n"
5542 #ifdef CONFIG_KPROBE_EVENTS
5543 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5544 "\t\t\t Write into this file to define/undefine new trace events.\n"
5546 #ifdef CONFIG_UPROBE_EVENTS
5547 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5548 "\t\t\t Write into this file to define/undefine new trace events.\n"
5550 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5551 "\t accepts: event-definitions (one definition per line)\n"
5552 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5553 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
5554 #ifdef CONFIG_HIST_TRIGGERS
5555 "\t s:[synthetic/]<event> <field> [<field>]\n"
5557 "\t e[:[<group>/]<event>] <attached-group>.<attached-event> [<args>]\n"
5558 "\t -:[<group>/]<event>\n"
5559 #ifdef CONFIG_KPROBE_EVENTS
5560 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5561 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5563 #ifdef CONFIG_UPROBE_EVENTS
5564 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5566 "\t args: <name>=fetcharg[:type]\n"
5567 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5568 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5569 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5571 "\t $stack<index>, $stack, $retval, $comm,\n"
5573 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5574 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5575 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5576 "\t <type>\\[<array-size>\\]\n"
5577 #ifdef CONFIG_HIST_TRIGGERS
5578 "\t field: <stype> <name>;\n"
5579 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5580 "\t [unsigned] char/int/long\n"
5582 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5583 "\t of the <attached-group>/<attached-event>.\n"
5585 " events/\t\t- Directory containing all trace event subsystems:\n"
5586 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5587 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5588 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5590 " filter\t\t- If set, only events passing filter are traced\n"
5591 " events/<system>/<event>/\t- Directory containing control files for\n"
5593 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5594 " filter\t\t- If set, only events passing filter are traced\n"
5595 " trigger\t\t- If set, a command to perform when event is hit\n"
5596 "\t Format: <trigger>[:count][if <filter>]\n"
5597 "\t trigger: traceon, traceoff\n"
5598 "\t enable_event:<system>:<event>\n"
5599 "\t disable_event:<system>:<event>\n"
5600 #ifdef CONFIG_HIST_TRIGGERS
5601 "\t enable_hist:<system>:<event>\n"
5602 "\t disable_hist:<system>:<event>\n"
5604 #ifdef CONFIG_STACKTRACE
5607 #ifdef CONFIG_TRACER_SNAPSHOT
5610 #ifdef CONFIG_HIST_TRIGGERS
5611 "\t\t hist (see below)\n"
5613 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5614 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5615 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5616 "\t events/block/block_unplug/trigger\n"
5617 "\t The first disables tracing every time block_unplug is hit.\n"
5618 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5619 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5620 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5621 "\t Like function triggers, the counter is only decremented if it\n"
5622 "\t enabled or disabled tracing.\n"
5623 "\t To remove a trigger without a count:\n"
5624 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5625 "\t To remove a trigger with a count:\n"
5626 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5627 "\t Filters can be ignored when removing a trigger.\n"
5628 #ifdef CONFIG_HIST_TRIGGERS
5629 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5630 "\t Format: hist:keys=<field1[,field2,...]>\n"
5631 "\t [:values=<field1[,field2,...]>]\n"
5632 "\t [:sort=<field1[,field2,...]>]\n"
5633 "\t [:size=#entries]\n"
5634 "\t [:pause][:continue][:clear]\n"
5635 "\t [:name=histname1]\n"
5636 "\t [:<handler>.<action>]\n"
5637 "\t [if <filter>]\n\n"
5638 "\t Note, special fields can be used as well:\n"
5639 "\t common_timestamp - to record current timestamp\n"
5640 "\t common_cpu - to record the CPU the event happened on\n"
5642 "\t When a matching event is hit, an entry is added to a hash\n"
5643 "\t table using the key(s) and value(s) named, and the value of a\n"
5644 "\t sum called 'hitcount' is incremented. Keys and values\n"
5645 "\t correspond to fields in the event's format description. Keys\n"
5646 "\t can be any field, or the special string 'stacktrace'.\n"
5647 "\t Compound keys consisting of up to two fields can be specified\n"
5648 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5649 "\t fields. Sort keys consisting of up to two fields can be\n"
5650 "\t specified using the 'sort' keyword. The sort direction can\n"
5651 "\t be modified by appending '.descending' or '.ascending' to a\n"
5652 "\t sort field. The 'size' parameter can be used to specify more\n"
5653 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5654 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5655 "\t its histogram data will be shared with other triggers of the\n"
5656 "\t same name, and trigger hits will update this common data.\n\n"
5657 "\t Reading the 'hist' file for the event will dump the hash\n"
5658 "\t table in its entirety to stdout. If there are multiple hist\n"
5659 "\t triggers attached to an event, there will be a table for each\n"
5660 "\t trigger in the output. The table displayed for a named\n"
5661 "\t trigger will be the same as any other instance having the\n"
5662 "\t same name. The default format used to display a given field\n"
5663 "\t can be modified by appending any of the following modifiers\n"
5664 "\t to the field name, as applicable:\n\n"
5665 "\t .hex display a number as a hex value\n"
5666 "\t .sym display an address as a symbol\n"
5667 "\t .sym-offset display an address as a symbol and offset\n"
5668 "\t .execname display a common_pid as a program name\n"
5669 "\t .syscall display a syscall id as a syscall name\n"
5670 "\t .log2 display log2 value rather than raw number\n"
5671 "\t .buckets=size display values in groups of size rather than raw number\n"
5672 "\t .usecs display a common_timestamp in microseconds\n\n"
5673 "\t The 'pause' parameter can be used to pause an existing hist\n"
5674 "\t trigger or to start a hist trigger but not log any events\n"
5675 "\t until told to do so. 'continue' can be used to start or\n"
5676 "\t restart a paused hist trigger.\n\n"
5677 "\t The 'clear' parameter will clear the contents of a running\n"
5678 "\t hist trigger and leave its current paused/active state\n"
5680 "\t The enable_hist and disable_hist triggers can be used to\n"
5681 "\t have one event conditionally start and stop another event's\n"
5682 "\t already-attached hist trigger. The syntax is analogous to\n"
5683 "\t the enable_event and disable_event triggers.\n\n"
5684 "\t Hist trigger handlers and actions are executed whenever a\n"
5685 "\t a histogram entry is added or updated. They take the form:\n\n"
5686 "\t <handler>.<action>\n\n"
5687 "\t The available handlers are:\n\n"
5688 "\t onmatch(matching.event) - invoke on addition or update\n"
5689 "\t onmax(var) - invoke if var exceeds current max\n"
5690 "\t onchange(var) - invoke action if var changes\n\n"
5691 "\t The available actions are:\n\n"
5692 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5693 "\t save(field,...) - save current event fields\n"
5694 #ifdef CONFIG_TRACER_SNAPSHOT
5695 "\t snapshot() - snapshot the trace buffer\n\n"
5697 #ifdef CONFIG_SYNTH_EVENTS
5698 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5699 "\t Write into this file to define/undefine new synthetic events.\n"
5700 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
5706 tracing_readme_read(struct file *filp, char __user *ubuf,
5707 size_t cnt, loff_t *ppos)
5709 return simple_read_from_buffer(ubuf, cnt, ppos,
5710 readme_msg, strlen(readme_msg));
5713 static const struct file_operations tracing_readme_fops = {
5714 .open = tracing_open_generic,
5715 .read = tracing_readme_read,
5716 .llseek = generic_file_llseek,
5719 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5723 return trace_find_tgid_ptr(pid);
5726 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5730 return trace_find_tgid_ptr(pid);
5733 static void saved_tgids_stop(struct seq_file *m, void *v)
5737 static int saved_tgids_show(struct seq_file *m, void *v)
5739 int *entry = (int *)v;
5740 int pid = entry - tgid_map;
5746 seq_printf(m, "%d %d\n", pid, tgid);
5750 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5751 .start = saved_tgids_start,
5752 .stop = saved_tgids_stop,
5753 .next = saved_tgids_next,
5754 .show = saved_tgids_show,
5757 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5761 ret = tracing_check_open_get_tr(NULL);
5765 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5769 static const struct file_operations tracing_saved_tgids_fops = {
5770 .open = tracing_saved_tgids_open,
5772 .llseek = seq_lseek,
5773 .release = seq_release,
5776 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5778 unsigned int *ptr = v;
5780 if (*pos || m->count)
5785 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5787 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5796 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5802 arch_spin_lock(&trace_cmdline_lock);
5804 v = &savedcmd->map_cmdline_to_pid[0];
5806 v = saved_cmdlines_next(m, v, &l);
5814 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5816 arch_spin_unlock(&trace_cmdline_lock);
5820 static int saved_cmdlines_show(struct seq_file *m, void *v)
5822 char buf[TASK_COMM_LEN];
5823 unsigned int *pid = v;
5825 __trace_find_cmdline(*pid, buf);
5826 seq_printf(m, "%d %s\n", *pid, buf);
5830 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5831 .start = saved_cmdlines_start,
5832 .next = saved_cmdlines_next,
5833 .stop = saved_cmdlines_stop,
5834 .show = saved_cmdlines_show,
5837 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5841 ret = tracing_check_open_get_tr(NULL);
5845 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5848 static const struct file_operations tracing_saved_cmdlines_fops = {
5849 .open = tracing_saved_cmdlines_open,
5851 .llseek = seq_lseek,
5852 .release = seq_release,
5856 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5857 size_t cnt, loff_t *ppos)
5862 arch_spin_lock(&trace_cmdline_lock);
5863 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5864 arch_spin_unlock(&trace_cmdline_lock);
5866 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5869 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5871 kfree(s->saved_cmdlines);
5872 kfree(s->map_cmdline_to_pid);
5876 static int tracing_resize_saved_cmdlines(unsigned int val)
5878 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5880 s = kmalloc(sizeof(*s), GFP_KERNEL);
5884 if (allocate_cmdlines_buffer(val, s) < 0) {
5889 arch_spin_lock(&trace_cmdline_lock);
5890 savedcmd_temp = savedcmd;
5892 arch_spin_unlock(&trace_cmdline_lock);
5893 free_saved_cmdlines_buffer(savedcmd_temp);
5899 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5900 size_t cnt, loff_t *ppos)
5905 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5909 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5910 if (!val || val > PID_MAX_DEFAULT)
5913 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5922 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5923 .open = tracing_open_generic,
5924 .read = tracing_saved_cmdlines_size_read,
5925 .write = tracing_saved_cmdlines_size_write,
5928 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5929 static union trace_eval_map_item *
5930 update_eval_map(union trace_eval_map_item *ptr)
5932 if (!ptr->map.eval_string) {
5933 if (ptr->tail.next) {
5934 ptr = ptr->tail.next;
5935 /* Set ptr to the next real item (skip head) */
5943 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5945 union trace_eval_map_item *ptr = v;
5948 * Paranoid! If ptr points to end, we don't want to increment past it.
5949 * This really should never happen.
5952 ptr = update_eval_map(ptr);
5953 if (WARN_ON_ONCE(!ptr))
5957 ptr = update_eval_map(ptr);
5962 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5964 union trace_eval_map_item *v;
5967 mutex_lock(&trace_eval_mutex);
5969 v = trace_eval_maps;
5973 while (v && l < *pos) {
5974 v = eval_map_next(m, v, &l);
5980 static void eval_map_stop(struct seq_file *m, void *v)
5982 mutex_unlock(&trace_eval_mutex);
5985 static int eval_map_show(struct seq_file *m, void *v)
5987 union trace_eval_map_item *ptr = v;
5989 seq_printf(m, "%s %ld (%s)\n",
5990 ptr->map.eval_string, ptr->map.eval_value,
5996 static const struct seq_operations tracing_eval_map_seq_ops = {
5997 .start = eval_map_start,
5998 .next = eval_map_next,
5999 .stop = eval_map_stop,
6000 .show = eval_map_show,
6003 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
6007 ret = tracing_check_open_get_tr(NULL);
6011 return seq_open(filp, &tracing_eval_map_seq_ops);
6014 static const struct file_operations tracing_eval_map_fops = {
6015 .open = tracing_eval_map_open,
6017 .llseek = seq_lseek,
6018 .release = seq_release,
6021 static inline union trace_eval_map_item *
6022 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
6024 /* Return tail of array given the head */
6025 return ptr + ptr->head.length + 1;
6029 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
6032 struct trace_eval_map **stop;
6033 struct trace_eval_map **map;
6034 union trace_eval_map_item *map_array;
6035 union trace_eval_map_item *ptr;
6040 * The trace_eval_maps contains the map plus a head and tail item,
6041 * where the head holds the module and length of array, and the
6042 * tail holds a pointer to the next list.
6044 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6046 pr_warn("Unable to allocate trace eval mapping\n");
6050 mutex_lock(&trace_eval_mutex);
6052 if (!trace_eval_maps)
6053 trace_eval_maps = map_array;
6055 ptr = trace_eval_maps;
6057 ptr = trace_eval_jmp_to_tail(ptr);
6058 if (!ptr->tail.next)
6060 ptr = ptr->tail.next;
6063 ptr->tail.next = map_array;
6065 map_array->head.mod = mod;
6066 map_array->head.length = len;
6069 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6070 map_array->map = **map;
6073 memset(map_array, 0, sizeof(*map_array));
6075 mutex_unlock(&trace_eval_mutex);
6078 static void trace_create_eval_file(struct dentry *d_tracer)
6080 trace_create_file("eval_map", 0444, d_tracer,
6081 NULL, &tracing_eval_map_fops);
6084 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
6085 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6086 static inline void trace_insert_eval_map_file(struct module *mod,
6087 struct trace_eval_map **start, int len) { }
6088 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6090 static void trace_insert_eval_map(struct module *mod,
6091 struct trace_eval_map **start, int len)
6093 struct trace_eval_map **map;
6100 trace_event_eval_update(map, len);
6102 trace_insert_eval_map_file(mod, start, len);
6106 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6107 size_t cnt, loff_t *ppos)
6109 struct trace_array *tr = filp->private_data;
6110 char buf[MAX_TRACER_SIZE+2];
6113 mutex_lock(&trace_types_lock);
6114 r = sprintf(buf, "%s\n", tr->current_trace->name);
6115 mutex_unlock(&trace_types_lock);
6117 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6120 int tracer_init(struct tracer *t, struct trace_array *tr)
6122 tracing_reset_online_cpus(&tr->array_buffer);
6126 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6130 for_each_tracing_cpu(cpu)
6131 per_cpu_ptr(buf->data, cpu)->entries = val;
6134 #ifdef CONFIG_TRACER_MAX_TRACE
6135 /* resize @tr's buffer to the size of @size_tr's entries */
6136 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6137 struct array_buffer *size_buf, int cpu_id)
6141 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6142 for_each_tracing_cpu(cpu) {
6143 ret = ring_buffer_resize(trace_buf->buffer,
6144 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6147 per_cpu_ptr(trace_buf->data, cpu)->entries =
6148 per_cpu_ptr(size_buf->data, cpu)->entries;
6151 ret = ring_buffer_resize(trace_buf->buffer,
6152 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6154 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6155 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6160 #endif /* CONFIG_TRACER_MAX_TRACE */
6162 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6163 unsigned long size, int cpu)
6168 * If kernel or user changes the size of the ring buffer
6169 * we use the size that was given, and we can forget about
6170 * expanding it later.
6172 ring_buffer_expanded = true;
6174 /* May be called before buffers are initialized */
6175 if (!tr->array_buffer.buffer)
6178 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6182 #ifdef CONFIG_TRACER_MAX_TRACE
6183 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
6184 !tr->current_trace->use_max_tr)
6187 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6189 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6190 &tr->array_buffer, cpu);
6193 * AARGH! We are left with different
6194 * size max buffer!!!!
6195 * The max buffer is our "snapshot" buffer.
6196 * When a tracer needs a snapshot (one of the
6197 * latency tracers), it swaps the max buffer
6198 * with the saved snap shot. We succeeded to
6199 * update the size of the main buffer, but failed to
6200 * update the size of the max buffer. But when we tried
6201 * to reset the main buffer to the original size, we
6202 * failed there too. This is very unlikely to
6203 * happen, but if it does, warn and kill all
6207 tracing_disabled = 1;
6212 if (cpu == RING_BUFFER_ALL_CPUS)
6213 set_buffer_entries(&tr->max_buffer, size);
6215 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
6218 #endif /* CONFIG_TRACER_MAX_TRACE */
6220 if (cpu == RING_BUFFER_ALL_CPUS)
6221 set_buffer_entries(&tr->array_buffer, size);
6223 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
6228 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6229 unsigned long size, int cpu_id)
6233 mutex_lock(&trace_types_lock);
6235 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6236 /* make sure, this cpu is enabled in the mask */
6237 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6243 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6248 mutex_unlock(&trace_types_lock);
6255 * tracing_update_buffers - used by tracing facility to expand ring buffers
6257 * To save on memory when the tracing is never used on a system with it
6258 * configured in. The ring buffers are set to a minimum size. But once
6259 * a user starts to use the tracing facility, then they need to grow
6260 * to their default size.
6262 * This function is to be called when a tracer is about to be used.
6264 int tracing_update_buffers(void)
6268 mutex_lock(&trace_types_lock);
6269 if (!ring_buffer_expanded)
6270 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6271 RING_BUFFER_ALL_CPUS);
6272 mutex_unlock(&trace_types_lock);
6277 struct trace_option_dentry;
6280 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6283 * Used to clear out the tracer before deletion of an instance.
6284 * Must have trace_types_lock held.
6286 static void tracing_set_nop(struct trace_array *tr)
6288 if (tr->current_trace == &nop_trace)
6291 tr->current_trace->enabled--;
6293 if (tr->current_trace->reset)
6294 tr->current_trace->reset(tr);
6296 tr->current_trace = &nop_trace;
6299 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6301 /* Only enable if the directory has been created already. */
6305 create_trace_option_files(tr, t);
6308 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6311 #ifdef CONFIG_TRACER_MAX_TRACE
6316 mutex_lock(&trace_types_lock);
6318 if (!ring_buffer_expanded) {
6319 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6320 RING_BUFFER_ALL_CPUS);
6326 for (t = trace_types; t; t = t->next) {
6327 if (strcmp(t->name, buf) == 0)
6334 if (t == tr->current_trace)
6337 #ifdef CONFIG_TRACER_SNAPSHOT
6338 if (t->use_max_tr) {
6339 arch_spin_lock(&tr->max_lock);
6340 if (tr->cond_snapshot)
6342 arch_spin_unlock(&tr->max_lock);
6347 /* Some tracers won't work on kernel command line */
6348 if (system_state < SYSTEM_RUNNING && t->noboot) {
6349 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6354 /* Some tracers are only allowed for the top level buffer */
6355 if (!trace_ok_for_array(t, tr)) {
6360 /* If trace pipe files are being read, we can't change the tracer */
6361 if (tr->trace_ref) {
6366 trace_branch_disable();
6368 tr->current_trace->enabled--;
6370 if (tr->current_trace->reset)
6371 tr->current_trace->reset(tr);
6373 /* Current trace needs to be nop_trace before synchronize_rcu */
6374 tr->current_trace = &nop_trace;
6376 #ifdef CONFIG_TRACER_MAX_TRACE
6377 had_max_tr = tr->allocated_snapshot;
6379 if (had_max_tr && !t->use_max_tr) {
6381 * We need to make sure that the update_max_tr sees that
6382 * current_trace changed to nop_trace to keep it from
6383 * swapping the buffers after we resize it.
6384 * The update_max_tr is called from interrupts disabled
6385 * so a synchronized_sched() is sufficient.
6392 #ifdef CONFIG_TRACER_MAX_TRACE
6393 if (t->use_max_tr && !had_max_tr) {
6394 ret = tracing_alloc_snapshot_instance(tr);
6401 ret = tracer_init(t, tr);
6406 tr->current_trace = t;
6407 tr->current_trace->enabled++;
6408 trace_branch_enable(tr);
6410 mutex_unlock(&trace_types_lock);
6416 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6417 size_t cnt, loff_t *ppos)
6419 struct trace_array *tr = filp->private_data;
6420 char buf[MAX_TRACER_SIZE+1];
6427 if (cnt > MAX_TRACER_SIZE)
6428 cnt = MAX_TRACER_SIZE;
6430 if (copy_from_user(buf, ubuf, cnt))
6435 /* strip ending whitespace. */
6436 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6439 err = tracing_set_tracer(tr, buf);
6449 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6450 size_t cnt, loff_t *ppos)
6455 r = snprintf(buf, sizeof(buf), "%ld\n",
6456 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6457 if (r > sizeof(buf))
6459 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6463 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6464 size_t cnt, loff_t *ppos)
6469 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6479 tracing_thresh_read(struct file *filp, char __user *ubuf,
6480 size_t cnt, loff_t *ppos)
6482 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6486 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6487 size_t cnt, loff_t *ppos)
6489 struct trace_array *tr = filp->private_data;
6492 mutex_lock(&trace_types_lock);
6493 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6497 if (tr->current_trace->update_thresh) {
6498 ret = tr->current_trace->update_thresh(tr);
6505 mutex_unlock(&trace_types_lock);
6510 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6513 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6514 size_t cnt, loff_t *ppos)
6516 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6520 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6521 size_t cnt, loff_t *ppos)
6523 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6528 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6530 struct trace_array *tr = inode->i_private;
6531 struct trace_iterator *iter;
6534 ret = tracing_check_open_get_tr(tr);
6538 mutex_lock(&trace_types_lock);
6540 /* create a buffer to store the information to pass to userspace */
6541 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6544 __trace_array_put(tr);
6548 trace_seq_init(&iter->seq);
6549 iter->trace = tr->current_trace;
6551 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6556 /* trace pipe does not show start of buffer */
6557 cpumask_setall(iter->started);
6559 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6560 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6562 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6563 if (trace_clocks[tr->clock_id].in_ns)
6564 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6567 iter->array_buffer = &tr->array_buffer;
6568 iter->cpu_file = tracing_get_cpu(inode);
6569 mutex_init(&iter->mutex);
6570 filp->private_data = iter;
6572 if (iter->trace->pipe_open)
6573 iter->trace->pipe_open(iter);
6575 nonseekable_open(inode, filp);
6579 mutex_unlock(&trace_types_lock);
6584 __trace_array_put(tr);
6585 mutex_unlock(&trace_types_lock);
6589 static int tracing_release_pipe(struct inode *inode, struct file *file)
6591 struct trace_iterator *iter = file->private_data;
6592 struct trace_array *tr = inode->i_private;
6594 mutex_lock(&trace_types_lock);
6598 if (iter->trace->pipe_close)
6599 iter->trace->pipe_close(iter);
6601 mutex_unlock(&trace_types_lock);
6603 free_cpumask_var(iter->started);
6604 mutex_destroy(&iter->mutex);
6607 trace_array_put(tr);
6613 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6615 struct trace_array *tr = iter->tr;
6617 /* Iterators are static, they should be filled or empty */
6618 if (trace_buffer_iter(iter, iter->cpu_file))
6619 return EPOLLIN | EPOLLRDNORM;
6621 if (tr->trace_flags & TRACE_ITER_BLOCK)
6623 * Always select as readable when in blocking mode
6625 return EPOLLIN | EPOLLRDNORM;
6627 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6632 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6634 struct trace_iterator *iter = filp->private_data;
6636 return trace_poll(iter, filp, poll_table);
6639 /* Must be called with iter->mutex held. */
6640 static int tracing_wait_pipe(struct file *filp)
6642 struct trace_iterator *iter = filp->private_data;
6645 while (trace_empty(iter)) {
6647 if ((filp->f_flags & O_NONBLOCK)) {
6652 * We block until we read something and tracing is disabled.
6653 * We still block if tracing is disabled, but we have never
6654 * read anything. This allows a user to cat this file, and
6655 * then enable tracing. But after we have read something,
6656 * we give an EOF when tracing is again disabled.
6658 * iter->pos will be 0 if we haven't read anything.
6660 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6663 mutex_unlock(&iter->mutex);
6665 ret = wait_on_pipe(iter, 0);
6667 mutex_lock(&iter->mutex);
6680 tracing_read_pipe(struct file *filp, char __user *ubuf,
6681 size_t cnt, loff_t *ppos)
6683 struct trace_iterator *iter = filp->private_data;
6687 * Avoid more than one consumer on a single file descriptor
6688 * This is just a matter of traces coherency, the ring buffer itself
6691 mutex_lock(&iter->mutex);
6693 /* return any leftover data */
6694 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6698 trace_seq_init(&iter->seq);
6700 if (iter->trace->read) {
6701 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6707 sret = tracing_wait_pipe(filp);
6711 /* stop when tracing is finished */
6712 if (trace_empty(iter)) {
6717 if (cnt >= PAGE_SIZE)
6718 cnt = PAGE_SIZE - 1;
6720 /* reset all but tr, trace, and overruns */
6721 memset(&iter->seq, 0,
6722 sizeof(struct trace_iterator) -
6723 offsetof(struct trace_iterator, seq));
6724 cpumask_clear(iter->started);
6725 trace_seq_init(&iter->seq);
6728 trace_event_read_lock();
6729 trace_access_lock(iter->cpu_file);
6730 while (trace_find_next_entry_inc(iter) != NULL) {
6731 enum print_line_t ret;
6732 int save_len = iter->seq.seq.len;
6734 ret = print_trace_line(iter);
6735 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6736 /* don't print partial lines */
6737 iter->seq.seq.len = save_len;
6740 if (ret != TRACE_TYPE_NO_CONSUME)
6741 trace_consume(iter);
6743 if (trace_seq_used(&iter->seq) >= cnt)
6747 * Setting the full flag means we reached the trace_seq buffer
6748 * size and we should leave by partial output condition above.
6749 * One of the trace_seq_* functions is not used properly.
6751 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6754 trace_access_unlock(iter->cpu_file);
6755 trace_event_read_unlock();
6757 /* Now copy what we have to the user */
6758 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6759 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6760 trace_seq_init(&iter->seq);
6763 * If there was nothing to send to user, in spite of consuming trace
6764 * entries, go back to wait for more entries.
6770 mutex_unlock(&iter->mutex);
6775 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6778 __free_page(spd->pages[idx]);
6782 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6788 /* Seq buffer is page-sized, exactly what we need. */
6790 save_len = iter->seq.seq.len;
6791 ret = print_trace_line(iter);
6793 if (trace_seq_has_overflowed(&iter->seq)) {
6794 iter->seq.seq.len = save_len;
6799 * This should not be hit, because it should only
6800 * be set if the iter->seq overflowed. But check it
6801 * anyway to be safe.
6803 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6804 iter->seq.seq.len = save_len;
6808 count = trace_seq_used(&iter->seq) - save_len;
6811 iter->seq.seq.len = save_len;
6815 if (ret != TRACE_TYPE_NO_CONSUME)
6816 trace_consume(iter);
6818 if (!trace_find_next_entry_inc(iter)) {
6828 static ssize_t tracing_splice_read_pipe(struct file *filp,
6830 struct pipe_inode_info *pipe,
6834 struct page *pages_def[PIPE_DEF_BUFFERS];
6835 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6836 struct trace_iterator *iter = filp->private_data;
6837 struct splice_pipe_desc spd = {
6839 .partial = partial_def,
6840 .nr_pages = 0, /* This gets updated below. */
6841 .nr_pages_max = PIPE_DEF_BUFFERS,
6842 .ops = &default_pipe_buf_ops,
6843 .spd_release = tracing_spd_release_pipe,
6849 if (splice_grow_spd(pipe, &spd))
6852 mutex_lock(&iter->mutex);
6854 if (iter->trace->splice_read) {
6855 ret = iter->trace->splice_read(iter, filp,
6856 ppos, pipe, len, flags);
6861 ret = tracing_wait_pipe(filp);
6865 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6870 trace_event_read_lock();
6871 trace_access_lock(iter->cpu_file);
6873 /* Fill as many pages as possible. */
6874 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6875 spd.pages[i] = alloc_page(GFP_KERNEL);
6879 rem = tracing_fill_pipe_page(rem, iter);
6881 /* Copy the data into the page, so we can start over. */
6882 ret = trace_seq_to_buffer(&iter->seq,
6883 page_address(spd.pages[i]),
6884 trace_seq_used(&iter->seq));
6886 __free_page(spd.pages[i]);
6889 spd.partial[i].offset = 0;
6890 spd.partial[i].len = trace_seq_used(&iter->seq);
6892 trace_seq_init(&iter->seq);
6895 trace_access_unlock(iter->cpu_file);
6896 trace_event_read_unlock();
6897 mutex_unlock(&iter->mutex);
6902 ret = splice_to_pipe(pipe, &spd);
6906 splice_shrink_spd(&spd);
6910 mutex_unlock(&iter->mutex);
6915 tracing_entries_read(struct file *filp, char __user *ubuf,
6916 size_t cnt, loff_t *ppos)
6918 struct inode *inode = file_inode(filp);
6919 struct trace_array *tr = inode->i_private;
6920 int cpu = tracing_get_cpu(inode);
6925 mutex_lock(&trace_types_lock);
6927 if (cpu == RING_BUFFER_ALL_CPUS) {
6928 int cpu, buf_size_same;
6933 /* check if all cpu sizes are same */
6934 for_each_tracing_cpu(cpu) {
6935 /* fill in the size from first enabled cpu */
6937 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6938 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6944 if (buf_size_same) {
6945 if (!ring_buffer_expanded)
6946 r = sprintf(buf, "%lu (expanded: %lu)\n",
6948 trace_buf_size >> 10);
6950 r = sprintf(buf, "%lu\n", size >> 10);
6952 r = sprintf(buf, "X\n");
6954 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6956 mutex_unlock(&trace_types_lock);
6958 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6963 tracing_entries_write(struct file *filp, const char __user *ubuf,
6964 size_t cnt, loff_t *ppos)
6966 struct inode *inode = file_inode(filp);
6967 struct trace_array *tr = inode->i_private;
6971 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6975 /* must have at least 1 entry */
6979 /* value is in KB */
6981 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6991 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6992 size_t cnt, loff_t *ppos)
6994 struct trace_array *tr = filp->private_data;
6997 unsigned long size = 0, expanded_size = 0;
6999 mutex_lock(&trace_types_lock);
7000 for_each_tracing_cpu(cpu) {
7001 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7002 if (!ring_buffer_expanded)
7003 expanded_size += trace_buf_size >> 10;
7005 if (ring_buffer_expanded)
7006 r = sprintf(buf, "%lu\n", size);
7008 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
7009 mutex_unlock(&trace_types_lock);
7011 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7015 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7016 size_t cnt, loff_t *ppos)
7019 * There is no need to read what the user has written, this function
7020 * is just to make sure that there is no error when "echo" is used
7029 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7031 struct trace_array *tr = inode->i_private;
7033 /* disable tracing ? */
7034 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7035 tracer_tracing_off(tr);
7036 /* resize the ring buffer to 0 */
7037 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7039 trace_array_put(tr);
7045 tracing_mark_write(struct file *filp, const char __user *ubuf,
7046 size_t cnt, loff_t *fpos)
7048 struct trace_array *tr = filp->private_data;
7049 struct ring_buffer_event *event;
7050 enum event_trigger_type tt = ETT_NONE;
7051 struct trace_buffer *buffer;
7052 struct print_entry *entry;
7057 /* Used in tracing_mark_raw_write() as well */
7058 #define FAULTED_STR "<faulted>"
7059 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7061 if (tracing_disabled)
7064 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7067 if (cnt > TRACE_BUF_SIZE)
7068 cnt = TRACE_BUF_SIZE;
7070 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7072 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7074 /* If less than "<faulted>", then make sure we can still add that */
7075 if (cnt < FAULTED_SIZE)
7076 size += FAULTED_SIZE - cnt;
7078 buffer = tr->array_buffer.buffer;
7079 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7081 if (unlikely(!event))
7082 /* Ring buffer disabled, return as if not open for write */
7085 entry = ring_buffer_event_data(event);
7086 entry->ip = _THIS_IP_;
7088 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7090 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7096 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7097 /* do not add \n before testing triggers, but add \0 */
7098 entry->buf[cnt] = '\0';
7099 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7102 if (entry->buf[cnt - 1] != '\n') {
7103 entry->buf[cnt] = '\n';
7104 entry->buf[cnt + 1] = '\0';
7106 entry->buf[cnt] = '\0';
7108 if (static_branch_unlikely(&trace_marker_exports_enabled))
7109 ftrace_exports(event, TRACE_EXPORT_MARKER);
7110 __buffer_unlock_commit(buffer, event);
7113 event_triggers_post_call(tr->trace_marker_file, tt);
7121 /* Limit it for now to 3K (including tag) */
7122 #define RAW_DATA_MAX_SIZE (1024*3)
7125 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7126 size_t cnt, loff_t *fpos)
7128 struct trace_array *tr = filp->private_data;
7129 struct ring_buffer_event *event;
7130 struct trace_buffer *buffer;
7131 struct raw_data_entry *entry;
7136 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7138 if (tracing_disabled)
7141 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7144 /* The marker must at least have a tag id */
7145 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7148 if (cnt > TRACE_BUF_SIZE)
7149 cnt = TRACE_BUF_SIZE;
7151 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7153 size = sizeof(*entry) + cnt;
7154 if (cnt < FAULT_SIZE_ID)
7155 size += FAULT_SIZE_ID - cnt;
7157 buffer = tr->array_buffer.buffer;
7158 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7161 /* Ring buffer disabled, return as if not open for write */
7164 entry = ring_buffer_event_data(event);
7166 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7169 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7174 __buffer_unlock_commit(buffer, event);
7182 static int tracing_clock_show(struct seq_file *m, void *v)
7184 struct trace_array *tr = m->private;
7187 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7189 "%s%s%s%s", i ? " " : "",
7190 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7191 i == tr->clock_id ? "]" : "");
7197 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7201 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7202 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7205 if (i == ARRAY_SIZE(trace_clocks))
7208 mutex_lock(&trace_types_lock);
7212 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7215 * New clock may not be consistent with the previous clock.
7216 * Reset the buffer so that it doesn't have incomparable timestamps.
7218 tracing_reset_online_cpus(&tr->array_buffer);
7220 #ifdef CONFIG_TRACER_MAX_TRACE
7221 if (tr->max_buffer.buffer)
7222 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7223 tracing_reset_online_cpus(&tr->max_buffer);
7226 mutex_unlock(&trace_types_lock);
7231 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7232 size_t cnt, loff_t *fpos)
7234 struct seq_file *m = filp->private_data;
7235 struct trace_array *tr = m->private;
7237 const char *clockstr;
7240 if (cnt >= sizeof(buf))
7243 if (copy_from_user(buf, ubuf, cnt))
7248 clockstr = strstrip(buf);
7250 ret = tracing_set_clock(tr, clockstr);
7259 static int tracing_clock_open(struct inode *inode, struct file *file)
7261 struct trace_array *tr = inode->i_private;
7264 ret = tracing_check_open_get_tr(tr);
7268 ret = single_open(file, tracing_clock_show, inode->i_private);
7270 trace_array_put(tr);
7275 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7277 struct trace_array *tr = m->private;
7279 mutex_lock(&trace_types_lock);
7281 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7282 seq_puts(m, "delta [absolute]\n");
7284 seq_puts(m, "[delta] absolute\n");
7286 mutex_unlock(&trace_types_lock);
7291 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7293 struct trace_array *tr = inode->i_private;
7296 ret = tracing_check_open_get_tr(tr);
7300 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7302 trace_array_put(tr);
7307 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7309 if (rbe == this_cpu_read(trace_buffered_event))
7310 return ring_buffer_time_stamp(buffer);
7312 return ring_buffer_event_time_stamp(buffer, rbe);
7316 * Set or disable using the per CPU trace_buffer_event when possible.
7318 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7322 mutex_lock(&trace_types_lock);
7324 if (set && tr->no_filter_buffering_ref++)
7328 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7333 --tr->no_filter_buffering_ref;
7336 mutex_unlock(&trace_types_lock);
7341 struct ftrace_buffer_info {
7342 struct trace_iterator iter;
7344 unsigned int spare_cpu;
7348 #ifdef CONFIG_TRACER_SNAPSHOT
7349 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7351 struct trace_array *tr = inode->i_private;
7352 struct trace_iterator *iter;
7356 ret = tracing_check_open_get_tr(tr);
7360 if (file->f_mode & FMODE_READ) {
7361 iter = __tracing_open(inode, file, true);
7363 ret = PTR_ERR(iter);
7365 /* Writes still need the seq_file to hold the private data */
7367 m = kzalloc(sizeof(*m), GFP_KERNEL);
7370 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7378 iter->array_buffer = &tr->max_buffer;
7379 iter->cpu_file = tracing_get_cpu(inode);
7381 file->private_data = m;
7385 trace_array_put(tr);
7391 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7394 struct seq_file *m = filp->private_data;
7395 struct trace_iterator *iter = m->private;
7396 struct trace_array *tr = iter->tr;
7400 ret = tracing_update_buffers();
7404 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7408 mutex_lock(&trace_types_lock);
7410 if (tr->current_trace->use_max_tr) {
7415 arch_spin_lock(&tr->max_lock);
7416 if (tr->cond_snapshot)
7418 arch_spin_unlock(&tr->max_lock);
7424 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7428 if (tr->allocated_snapshot)
7432 /* Only allow per-cpu swap if the ring buffer supports it */
7433 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7434 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7439 if (tr->allocated_snapshot)
7440 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7441 &tr->array_buffer, iter->cpu_file);
7443 ret = tracing_alloc_snapshot_instance(tr);
7446 local_irq_disable();
7447 /* Now, we're going to swap */
7448 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7449 update_max_tr(tr, current, smp_processor_id(), NULL);
7451 update_max_tr_single(tr, current, iter->cpu_file);
7455 if (tr->allocated_snapshot) {
7456 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7457 tracing_reset_online_cpus(&tr->max_buffer);
7459 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7469 mutex_unlock(&trace_types_lock);
7473 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7475 struct seq_file *m = file->private_data;
7478 ret = tracing_release(inode, file);
7480 if (file->f_mode & FMODE_READ)
7483 /* If write only, the seq_file is just a stub */
7491 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7492 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7493 size_t count, loff_t *ppos);
7494 static int tracing_buffers_release(struct inode *inode, struct file *file);
7495 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7496 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7498 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7500 struct ftrace_buffer_info *info;
7503 /* The following checks for tracefs lockdown */
7504 ret = tracing_buffers_open(inode, filp);
7508 info = filp->private_data;
7510 if (info->iter.trace->use_max_tr) {
7511 tracing_buffers_release(inode, filp);
7515 info->iter.snapshot = true;
7516 info->iter.array_buffer = &info->iter.tr->max_buffer;
7521 #endif /* CONFIG_TRACER_SNAPSHOT */
7524 static const struct file_operations tracing_thresh_fops = {
7525 .open = tracing_open_generic,
7526 .read = tracing_thresh_read,
7527 .write = tracing_thresh_write,
7528 .llseek = generic_file_llseek,
7531 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7532 static const struct file_operations tracing_max_lat_fops = {
7533 .open = tracing_open_generic,
7534 .read = tracing_max_lat_read,
7535 .write = tracing_max_lat_write,
7536 .llseek = generic_file_llseek,
7540 static const struct file_operations set_tracer_fops = {
7541 .open = tracing_open_generic,
7542 .read = tracing_set_trace_read,
7543 .write = tracing_set_trace_write,
7544 .llseek = generic_file_llseek,
7547 static const struct file_operations tracing_pipe_fops = {
7548 .open = tracing_open_pipe,
7549 .poll = tracing_poll_pipe,
7550 .read = tracing_read_pipe,
7551 .splice_read = tracing_splice_read_pipe,
7552 .release = tracing_release_pipe,
7553 .llseek = no_llseek,
7556 static const struct file_operations tracing_entries_fops = {
7557 .open = tracing_open_generic_tr,
7558 .read = tracing_entries_read,
7559 .write = tracing_entries_write,
7560 .llseek = generic_file_llseek,
7561 .release = tracing_release_generic_tr,
7564 static const struct file_operations tracing_total_entries_fops = {
7565 .open = tracing_open_generic_tr,
7566 .read = tracing_total_entries_read,
7567 .llseek = generic_file_llseek,
7568 .release = tracing_release_generic_tr,
7571 static const struct file_operations tracing_free_buffer_fops = {
7572 .open = tracing_open_generic_tr,
7573 .write = tracing_free_buffer_write,
7574 .release = tracing_free_buffer_release,
7577 static const struct file_operations tracing_mark_fops = {
7578 .open = tracing_open_generic_tr,
7579 .write = tracing_mark_write,
7580 .llseek = generic_file_llseek,
7581 .release = tracing_release_generic_tr,
7584 static const struct file_operations tracing_mark_raw_fops = {
7585 .open = tracing_open_generic_tr,
7586 .write = tracing_mark_raw_write,
7587 .llseek = generic_file_llseek,
7588 .release = tracing_release_generic_tr,
7591 static const struct file_operations trace_clock_fops = {
7592 .open = tracing_clock_open,
7594 .llseek = seq_lseek,
7595 .release = tracing_single_release_tr,
7596 .write = tracing_clock_write,
7599 static const struct file_operations trace_time_stamp_mode_fops = {
7600 .open = tracing_time_stamp_mode_open,
7602 .llseek = seq_lseek,
7603 .release = tracing_single_release_tr,
7606 #ifdef CONFIG_TRACER_SNAPSHOT
7607 static const struct file_operations snapshot_fops = {
7608 .open = tracing_snapshot_open,
7610 .write = tracing_snapshot_write,
7611 .llseek = tracing_lseek,
7612 .release = tracing_snapshot_release,
7615 static const struct file_operations snapshot_raw_fops = {
7616 .open = snapshot_raw_open,
7617 .read = tracing_buffers_read,
7618 .release = tracing_buffers_release,
7619 .splice_read = tracing_buffers_splice_read,
7620 .llseek = no_llseek,
7623 #endif /* CONFIG_TRACER_SNAPSHOT */
7626 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7627 * @filp: The active open file structure
7628 * @ubuf: The userspace provided buffer to read value into
7629 * @cnt: The maximum number of bytes to read
7630 * @ppos: The current "file" position
7632 * This function implements the write interface for a struct trace_min_max_param.
7633 * The filp->private_data must point to a trace_min_max_param structure that
7634 * defines where to write the value, the min and the max acceptable values,
7635 * and a lock to protect the write.
7638 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7640 struct trace_min_max_param *param = filp->private_data;
7647 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7652 mutex_lock(param->lock);
7654 if (param->min && val < *param->min)
7657 if (param->max && val > *param->max)
7664 mutex_unlock(param->lock);
7673 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7674 * @filp: The active open file structure
7675 * @ubuf: The userspace provided buffer to read value into
7676 * @cnt: The maximum number of bytes to read
7677 * @ppos: The current "file" position
7679 * This function implements the read interface for a struct trace_min_max_param.
7680 * The filp->private_data must point to a trace_min_max_param struct with valid
7684 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7686 struct trace_min_max_param *param = filp->private_data;
7687 char buf[U64_STR_SIZE];
7696 if (cnt > sizeof(buf))
7699 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7701 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7704 const struct file_operations trace_min_max_fops = {
7705 .open = tracing_open_generic,
7706 .read = trace_min_max_read,
7707 .write = trace_min_max_write,
7710 #define TRACING_LOG_ERRS_MAX 8
7711 #define TRACING_LOG_LOC_MAX 128
7713 #define CMD_PREFIX " Command: "
7716 const char **errs; /* ptr to loc-specific array of err strings */
7717 u8 type; /* index into errs -> specific err string */
7718 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7722 struct tracing_log_err {
7723 struct list_head list;
7724 struct err_info info;
7725 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7726 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7729 static DEFINE_MUTEX(tracing_err_log_lock);
7731 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
7733 struct tracing_log_err *err;
7735 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7736 err = kzalloc(sizeof(*err), GFP_KERNEL);
7738 err = ERR_PTR(-ENOMEM);
7739 tr->n_err_log_entries++;
7744 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7745 list_del(&err->list);
7751 * err_pos - find the position of a string within a command for error careting
7752 * @cmd: The tracing command that caused the error
7753 * @str: The string to position the caret at within @cmd
7755 * Finds the position of the first occurrence of @str within @cmd. The
7756 * return value can be passed to tracing_log_err() for caret placement
7759 * Returns the index within @cmd of the first occurrence of @str or 0
7760 * if @str was not found.
7762 unsigned int err_pos(char *cmd, const char *str)
7766 if (WARN_ON(!strlen(cmd)))
7769 found = strstr(cmd, str);
7777 * tracing_log_err - write an error to the tracing error log
7778 * @tr: The associated trace array for the error (NULL for top level array)
7779 * @loc: A string describing where the error occurred
7780 * @cmd: The tracing command that caused the error
7781 * @errs: The array of loc-specific static error strings
7782 * @type: The index into errs[], which produces the specific static err string
7783 * @pos: The position the caret should be placed in the cmd
7785 * Writes an error into tracing/error_log of the form:
7787 * <loc>: error: <text>
7791 * tracing/error_log is a small log file containing the last
7792 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7793 * unless there has been a tracing error, and the error log can be
7794 * cleared and have its memory freed by writing the empty string in
7795 * truncation mode to it i.e. echo > tracing/error_log.
7797 * NOTE: the @errs array along with the @type param are used to
7798 * produce a static error string - this string is not copied and saved
7799 * when the error is logged - only a pointer to it is saved. See
7800 * existing callers for examples of how static strings are typically
7801 * defined for use with tracing_log_err().
7803 void tracing_log_err(struct trace_array *tr,
7804 const char *loc, const char *cmd,
7805 const char **errs, u8 type, u8 pos)
7807 struct tracing_log_err *err;
7812 mutex_lock(&tracing_err_log_lock);
7813 err = get_tracing_log_err(tr);
7814 if (PTR_ERR(err) == -ENOMEM) {
7815 mutex_unlock(&tracing_err_log_lock);
7819 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7820 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7822 err->info.errs = errs;
7823 err->info.type = type;
7824 err->info.pos = pos;
7825 err->info.ts = local_clock();
7827 list_add_tail(&err->list, &tr->err_log);
7828 mutex_unlock(&tracing_err_log_lock);
7831 static void clear_tracing_err_log(struct trace_array *tr)
7833 struct tracing_log_err *err, *next;
7835 mutex_lock(&tracing_err_log_lock);
7836 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7837 list_del(&err->list);
7841 tr->n_err_log_entries = 0;
7842 mutex_unlock(&tracing_err_log_lock);
7845 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7847 struct trace_array *tr = m->private;
7849 mutex_lock(&tracing_err_log_lock);
7851 return seq_list_start(&tr->err_log, *pos);
7854 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7856 struct trace_array *tr = m->private;
7858 return seq_list_next(v, &tr->err_log, pos);
7861 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7863 mutex_unlock(&tracing_err_log_lock);
7866 static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7870 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7872 for (i = 0; i < pos; i++)
7877 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7879 struct tracing_log_err *err = v;
7882 const char *err_text = err->info.errs[err->info.type];
7883 u64 sec = err->info.ts;
7886 nsec = do_div(sec, NSEC_PER_SEC);
7887 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7888 err->loc, err_text);
7889 seq_printf(m, "%s", err->cmd);
7890 tracing_err_log_show_pos(m, err->info.pos);
7896 static const struct seq_operations tracing_err_log_seq_ops = {
7897 .start = tracing_err_log_seq_start,
7898 .next = tracing_err_log_seq_next,
7899 .stop = tracing_err_log_seq_stop,
7900 .show = tracing_err_log_seq_show
7903 static int tracing_err_log_open(struct inode *inode, struct file *file)
7905 struct trace_array *tr = inode->i_private;
7908 ret = tracing_check_open_get_tr(tr);
7912 /* If this file was opened for write, then erase contents */
7913 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7914 clear_tracing_err_log(tr);
7916 if (file->f_mode & FMODE_READ) {
7917 ret = seq_open(file, &tracing_err_log_seq_ops);
7919 struct seq_file *m = file->private_data;
7922 trace_array_put(tr);
7928 static ssize_t tracing_err_log_write(struct file *file,
7929 const char __user *buffer,
7930 size_t count, loff_t *ppos)
7935 static int tracing_err_log_release(struct inode *inode, struct file *file)
7937 struct trace_array *tr = inode->i_private;
7939 trace_array_put(tr);
7941 if (file->f_mode & FMODE_READ)
7942 seq_release(inode, file);
7947 static const struct file_operations tracing_err_log_fops = {
7948 .open = tracing_err_log_open,
7949 .write = tracing_err_log_write,
7951 .llseek = seq_lseek,
7952 .release = tracing_err_log_release,
7955 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7957 struct trace_array *tr = inode->i_private;
7958 struct ftrace_buffer_info *info;
7961 ret = tracing_check_open_get_tr(tr);
7965 info = kvzalloc(sizeof(*info), GFP_KERNEL);
7967 trace_array_put(tr);
7971 mutex_lock(&trace_types_lock);
7974 info->iter.cpu_file = tracing_get_cpu(inode);
7975 info->iter.trace = tr->current_trace;
7976 info->iter.array_buffer = &tr->array_buffer;
7978 /* Force reading ring buffer for first read */
7979 info->read = (unsigned int)-1;
7981 filp->private_data = info;
7985 mutex_unlock(&trace_types_lock);
7987 ret = nonseekable_open(inode, filp);
7989 trace_array_put(tr);
7995 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7997 struct ftrace_buffer_info *info = filp->private_data;
7998 struct trace_iterator *iter = &info->iter;
8000 return trace_poll(iter, filp, poll_table);
8004 tracing_buffers_read(struct file *filp, char __user *ubuf,
8005 size_t count, loff_t *ppos)
8007 struct ftrace_buffer_info *info = filp->private_data;
8008 struct trace_iterator *iter = &info->iter;
8015 #ifdef CONFIG_TRACER_MAX_TRACE
8016 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8021 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8023 if (IS_ERR(info->spare)) {
8024 ret = PTR_ERR(info->spare);
8027 info->spare_cpu = iter->cpu_file;
8033 /* Do we have previous read data to read? */
8034 if (info->read < PAGE_SIZE)
8038 trace_access_lock(iter->cpu_file);
8039 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8043 trace_access_unlock(iter->cpu_file);
8046 if (trace_empty(iter)) {
8047 if ((filp->f_flags & O_NONBLOCK))
8050 ret = wait_on_pipe(iter, 0);
8061 size = PAGE_SIZE - info->read;
8065 ret = copy_to_user(ubuf, info->spare + info->read, size);
8077 static int tracing_buffers_release(struct inode *inode, struct file *file)
8079 struct ftrace_buffer_info *info = file->private_data;
8080 struct trace_iterator *iter = &info->iter;
8082 mutex_lock(&trace_types_lock);
8084 iter->tr->trace_ref--;
8086 __trace_array_put(iter->tr);
8089 ring_buffer_free_read_page(iter->array_buffer->buffer,
8090 info->spare_cpu, info->spare);
8093 mutex_unlock(&trace_types_lock);
8099 struct trace_buffer *buffer;
8102 refcount_t refcount;
8105 static void buffer_ref_release(struct buffer_ref *ref)
8107 if (!refcount_dec_and_test(&ref->refcount))
8109 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8113 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8114 struct pipe_buffer *buf)
8116 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8118 buffer_ref_release(ref);
8122 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8123 struct pipe_buffer *buf)
8125 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8127 if (refcount_read(&ref->refcount) > INT_MAX/2)
8130 refcount_inc(&ref->refcount);
8134 /* Pipe buffer operations for a buffer. */
8135 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8136 .release = buffer_pipe_buf_release,
8137 .get = buffer_pipe_buf_get,
8141 * Callback from splice_to_pipe(), if we need to release some pages
8142 * at the end of the spd in case we error'ed out in filling the pipe.
8144 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8146 struct buffer_ref *ref =
8147 (struct buffer_ref *)spd->partial[i].private;
8149 buffer_ref_release(ref);
8150 spd->partial[i].private = 0;
8154 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8155 struct pipe_inode_info *pipe, size_t len,
8158 struct ftrace_buffer_info *info = file->private_data;
8159 struct trace_iterator *iter = &info->iter;
8160 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8161 struct page *pages_def[PIPE_DEF_BUFFERS];
8162 struct splice_pipe_desc spd = {
8164 .partial = partial_def,
8165 .nr_pages_max = PIPE_DEF_BUFFERS,
8166 .ops = &buffer_pipe_buf_ops,
8167 .spd_release = buffer_spd_release,
8169 struct buffer_ref *ref;
8173 #ifdef CONFIG_TRACER_MAX_TRACE
8174 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8178 if (*ppos & (PAGE_SIZE - 1))
8181 if (len & (PAGE_SIZE - 1)) {
8182 if (len < PAGE_SIZE)
8187 if (splice_grow_spd(pipe, &spd))
8191 trace_access_lock(iter->cpu_file);
8192 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8194 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8198 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8204 refcount_set(&ref->refcount, 1);
8205 ref->buffer = iter->array_buffer->buffer;
8206 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8207 if (IS_ERR(ref->page)) {
8208 ret = PTR_ERR(ref->page);
8213 ref->cpu = iter->cpu_file;
8215 r = ring_buffer_read_page(ref->buffer, &ref->page,
8216 len, iter->cpu_file, 1);
8218 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8224 page = virt_to_page(ref->page);
8226 spd.pages[i] = page;
8227 spd.partial[i].len = PAGE_SIZE;
8228 spd.partial[i].offset = 0;
8229 spd.partial[i].private = (unsigned long)ref;
8233 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8236 trace_access_unlock(iter->cpu_file);
8239 /* did we read anything? */
8240 if (!spd.nr_pages) {
8245 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8248 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8255 ret = splice_to_pipe(pipe, &spd);
8257 splice_shrink_spd(&spd);
8262 static const struct file_operations tracing_buffers_fops = {
8263 .open = tracing_buffers_open,
8264 .read = tracing_buffers_read,
8265 .poll = tracing_buffers_poll,
8266 .release = tracing_buffers_release,
8267 .splice_read = tracing_buffers_splice_read,
8268 .llseek = no_llseek,
8272 tracing_stats_read(struct file *filp, char __user *ubuf,
8273 size_t count, loff_t *ppos)
8275 struct inode *inode = file_inode(filp);
8276 struct trace_array *tr = inode->i_private;
8277 struct array_buffer *trace_buf = &tr->array_buffer;
8278 int cpu = tracing_get_cpu(inode);
8279 struct trace_seq *s;
8281 unsigned long long t;
8282 unsigned long usec_rem;
8284 s = kmalloc(sizeof(*s), GFP_KERNEL);
8290 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8291 trace_seq_printf(s, "entries: %ld\n", cnt);
8293 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8294 trace_seq_printf(s, "overrun: %ld\n", cnt);
8296 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8297 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8299 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8300 trace_seq_printf(s, "bytes: %ld\n", cnt);
8302 if (trace_clocks[tr->clock_id].in_ns) {
8303 /* local or global for trace_clock */
8304 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8305 usec_rem = do_div(t, USEC_PER_SEC);
8306 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8309 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8310 usec_rem = do_div(t, USEC_PER_SEC);
8311 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8313 /* counter or tsc mode for trace_clock */
8314 trace_seq_printf(s, "oldest event ts: %llu\n",
8315 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8317 trace_seq_printf(s, "now ts: %llu\n",
8318 ring_buffer_time_stamp(trace_buf->buffer));
8321 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8322 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8324 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8325 trace_seq_printf(s, "read events: %ld\n", cnt);
8327 count = simple_read_from_buffer(ubuf, count, ppos,
8328 s->buffer, trace_seq_used(s));
8335 static const struct file_operations tracing_stats_fops = {
8336 .open = tracing_open_generic_tr,
8337 .read = tracing_stats_read,
8338 .llseek = generic_file_llseek,
8339 .release = tracing_release_generic_tr,
8342 #ifdef CONFIG_DYNAMIC_FTRACE
8345 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8346 size_t cnt, loff_t *ppos)
8352 /* 256 should be plenty to hold the amount needed */
8353 buf = kmalloc(256, GFP_KERNEL);
8357 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8358 ftrace_update_tot_cnt,
8359 ftrace_number_of_pages,
8360 ftrace_number_of_groups);
8362 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8367 static const struct file_operations tracing_dyn_info_fops = {
8368 .open = tracing_open_generic,
8369 .read = tracing_read_dyn_info,
8370 .llseek = generic_file_llseek,
8372 #endif /* CONFIG_DYNAMIC_FTRACE */
8374 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8376 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8377 struct trace_array *tr, struct ftrace_probe_ops *ops,
8380 tracing_snapshot_instance(tr);
8384 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8385 struct trace_array *tr, struct ftrace_probe_ops *ops,
8388 struct ftrace_func_mapper *mapper = data;
8392 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8402 tracing_snapshot_instance(tr);
8406 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8407 struct ftrace_probe_ops *ops, void *data)
8409 struct ftrace_func_mapper *mapper = data;
8412 seq_printf(m, "%ps:", (void *)ip);
8414 seq_puts(m, "snapshot");
8417 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8420 seq_printf(m, ":count=%ld\n", *count);
8422 seq_puts(m, ":unlimited\n");
8428 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8429 unsigned long ip, void *init_data, void **data)
8431 struct ftrace_func_mapper *mapper = *data;
8434 mapper = allocate_ftrace_func_mapper();
8440 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8444 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8445 unsigned long ip, void *data)
8447 struct ftrace_func_mapper *mapper = data;
8452 free_ftrace_func_mapper(mapper, NULL);
8456 ftrace_func_mapper_remove_ip(mapper, ip);
8459 static struct ftrace_probe_ops snapshot_probe_ops = {
8460 .func = ftrace_snapshot,
8461 .print = ftrace_snapshot_print,
8464 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8465 .func = ftrace_count_snapshot,
8466 .print = ftrace_snapshot_print,
8467 .init = ftrace_snapshot_init,
8468 .free = ftrace_snapshot_free,
8472 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8473 char *glob, char *cmd, char *param, int enable)
8475 struct ftrace_probe_ops *ops;
8476 void *count = (void *)-1;
8483 /* hash funcs only work with set_ftrace_filter */
8487 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8490 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8495 number = strsep(¶m, ":");
8497 if (!strlen(number))
8501 * We use the callback data field (which is a pointer)
8504 ret = kstrtoul(number, 0, (unsigned long *)&count);
8509 ret = tracing_alloc_snapshot_instance(tr);
8513 ret = register_ftrace_function_probe(glob, tr, ops, count);
8516 return ret < 0 ? ret : 0;
8519 static struct ftrace_func_command ftrace_snapshot_cmd = {
8521 .func = ftrace_trace_snapshot_callback,
8524 static __init int register_snapshot_cmd(void)
8526 return register_ftrace_command(&ftrace_snapshot_cmd);
8529 static inline __init int register_snapshot_cmd(void) { return 0; }
8530 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8532 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8534 if (WARN_ON(!tr->dir))
8535 return ERR_PTR(-ENODEV);
8537 /* Top directory uses NULL as the parent */
8538 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8541 /* All sub buffers have a descriptor */
8545 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8547 struct dentry *d_tracer;
8550 return tr->percpu_dir;
8552 d_tracer = tracing_get_dentry(tr);
8553 if (IS_ERR(d_tracer))
8556 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8558 MEM_FAIL(!tr->percpu_dir,
8559 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8561 return tr->percpu_dir;
8564 static struct dentry *
8565 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8566 void *data, long cpu, const struct file_operations *fops)
8568 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8570 if (ret) /* See tracing_get_cpu() */
8571 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8576 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8578 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8579 struct dentry *d_cpu;
8580 char cpu_dir[30]; /* 30 characters should be more than enough */
8585 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8586 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8588 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8592 /* per cpu trace_pipe */
8593 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
8594 tr, cpu, &tracing_pipe_fops);
8597 trace_create_cpu_file("trace", 0644, d_cpu,
8598 tr, cpu, &tracing_fops);
8600 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
8601 tr, cpu, &tracing_buffers_fops);
8603 trace_create_cpu_file("stats", 0444, d_cpu,
8604 tr, cpu, &tracing_stats_fops);
8606 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
8607 tr, cpu, &tracing_entries_fops);
8609 #ifdef CONFIG_TRACER_SNAPSHOT
8610 trace_create_cpu_file("snapshot", 0644, d_cpu,
8611 tr, cpu, &snapshot_fops);
8613 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
8614 tr, cpu, &snapshot_raw_fops);
8618 #ifdef CONFIG_FTRACE_SELFTEST
8619 /* Let selftest have access to static functions in this file */
8620 #include "trace_selftest.c"
8624 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8627 struct trace_option_dentry *topt = filp->private_data;
8630 if (topt->flags->val & topt->opt->bit)
8635 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8639 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8642 struct trace_option_dentry *topt = filp->private_data;
8646 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8650 if (val != 0 && val != 1)
8653 if (!!(topt->flags->val & topt->opt->bit) != val) {
8654 mutex_lock(&trace_types_lock);
8655 ret = __set_tracer_option(topt->tr, topt->flags,
8657 mutex_unlock(&trace_types_lock);
8668 static const struct file_operations trace_options_fops = {
8669 .open = tracing_open_generic,
8670 .read = trace_options_read,
8671 .write = trace_options_write,
8672 .llseek = generic_file_llseek,
8676 * In order to pass in both the trace_array descriptor as well as the index
8677 * to the flag that the trace option file represents, the trace_array
8678 * has a character array of trace_flags_index[], which holds the index
8679 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8680 * The address of this character array is passed to the flag option file
8681 * read/write callbacks.
8683 * In order to extract both the index and the trace_array descriptor,
8684 * get_tr_index() uses the following algorithm.
8688 * As the pointer itself contains the address of the index (remember
8691 * Then to get the trace_array descriptor, by subtracting that index
8692 * from the ptr, we get to the start of the index itself.
8694 * ptr - idx == &index[0]
8696 * Then a simple container_of() from that pointer gets us to the
8697 * trace_array descriptor.
8699 static void get_tr_index(void *data, struct trace_array **ptr,
8700 unsigned int *pindex)
8702 *pindex = *(unsigned char *)data;
8704 *ptr = container_of(data - *pindex, struct trace_array,
8709 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8712 void *tr_index = filp->private_data;
8713 struct trace_array *tr;
8717 get_tr_index(tr_index, &tr, &index);
8719 if (tr->trace_flags & (1 << index))
8724 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8728 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8731 void *tr_index = filp->private_data;
8732 struct trace_array *tr;
8737 get_tr_index(tr_index, &tr, &index);
8739 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8743 if (val != 0 && val != 1)
8746 mutex_lock(&event_mutex);
8747 mutex_lock(&trace_types_lock);
8748 ret = set_tracer_flag(tr, 1 << index, val);
8749 mutex_unlock(&trace_types_lock);
8750 mutex_unlock(&event_mutex);
8760 static const struct file_operations trace_options_core_fops = {
8761 .open = tracing_open_generic,
8762 .read = trace_options_core_read,
8763 .write = trace_options_core_write,
8764 .llseek = generic_file_llseek,
8767 struct dentry *trace_create_file(const char *name,
8769 struct dentry *parent,
8771 const struct file_operations *fops)
8775 ret = tracefs_create_file(name, mode, parent, data, fops);
8777 pr_warn("Could not create tracefs '%s' entry\n", name);
8783 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8785 struct dentry *d_tracer;
8790 d_tracer = tracing_get_dentry(tr);
8791 if (IS_ERR(d_tracer))
8794 tr->options = tracefs_create_dir("options", d_tracer);
8796 pr_warn("Could not create tracefs directory 'options'\n");
8804 create_trace_option_file(struct trace_array *tr,
8805 struct trace_option_dentry *topt,
8806 struct tracer_flags *flags,
8807 struct tracer_opt *opt)
8809 struct dentry *t_options;
8811 t_options = trace_options_init_dentry(tr);
8815 topt->flags = flags;
8819 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
8820 &trace_options_fops);
8825 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8827 struct trace_option_dentry *topts;
8828 struct trace_options *tr_topts;
8829 struct tracer_flags *flags;
8830 struct tracer_opt *opts;
8837 flags = tracer->flags;
8839 if (!flags || !flags->opts)
8843 * If this is an instance, only create flags for tracers
8844 * the instance may have.
8846 if (!trace_ok_for_array(tracer, tr))
8849 for (i = 0; i < tr->nr_topts; i++) {
8850 /* Make sure there's no duplicate flags. */
8851 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8857 for (cnt = 0; opts[cnt].name; cnt++)
8860 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8864 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8871 tr->topts = tr_topts;
8872 tr->topts[tr->nr_topts].tracer = tracer;
8873 tr->topts[tr->nr_topts].topts = topts;
8876 for (cnt = 0; opts[cnt].name; cnt++) {
8877 create_trace_option_file(tr, &topts[cnt], flags,
8879 MEM_FAIL(topts[cnt].entry == NULL,
8880 "Failed to create trace option: %s",
8885 static struct dentry *
8886 create_trace_option_core_file(struct trace_array *tr,
8887 const char *option, long index)
8889 struct dentry *t_options;
8891 t_options = trace_options_init_dentry(tr);
8895 return trace_create_file(option, 0644, t_options,
8896 (void *)&tr->trace_flags_index[index],
8897 &trace_options_core_fops);
8900 static void create_trace_options_dir(struct trace_array *tr)
8902 struct dentry *t_options;
8903 bool top_level = tr == &global_trace;
8906 t_options = trace_options_init_dentry(tr);
8910 for (i = 0; trace_options[i]; i++) {
8912 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8913 create_trace_option_core_file(tr, trace_options[i], i);
8918 rb_simple_read(struct file *filp, char __user *ubuf,
8919 size_t cnt, loff_t *ppos)
8921 struct trace_array *tr = filp->private_data;
8925 r = tracer_tracing_is_on(tr);
8926 r = sprintf(buf, "%d\n", r);
8928 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8932 rb_simple_write(struct file *filp, const char __user *ubuf,
8933 size_t cnt, loff_t *ppos)
8935 struct trace_array *tr = filp->private_data;
8936 struct trace_buffer *buffer = tr->array_buffer.buffer;
8940 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8945 mutex_lock(&trace_types_lock);
8946 if (!!val == tracer_tracing_is_on(tr)) {
8947 val = 0; /* do nothing */
8949 tracer_tracing_on(tr);
8950 if (tr->current_trace->start)
8951 tr->current_trace->start(tr);
8953 tracer_tracing_off(tr);
8954 if (tr->current_trace->stop)
8955 tr->current_trace->stop(tr);
8957 mutex_unlock(&trace_types_lock);
8965 static const struct file_operations rb_simple_fops = {
8966 .open = tracing_open_generic_tr,
8967 .read = rb_simple_read,
8968 .write = rb_simple_write,
8969 .release = tracing_release_generic_tr,
8970 .llseek = default_llseek,
8974 buffer_percent_read(struct file *filp, char __user *ubuf,
8975 size_t cnt, loff_t *ppos)
8977 struct trace_array *tr = filp->private_data;
8981 r = tr->buffer_percent;
8982 r = sprintf(buf, "%d\n", r);
8984 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8988 buffer_percent_write(struct file *filp, const char __user *ubuf,
8989 size_t cnt, loff_t *ppos)
8991 struct trace_array *tr = filp->private_data;
8995 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9005 tr->buffer_percent = val;
9012 static const struct file_operations buffer_percent_fops = {
9013 .open = tracing_open_generic_tr,
9014 .read = buffer_percent_read,
9015 .write = buffer_percent_write,
9016 .release = tracing_release_generic_tr,
9017 .llseek = default_llseek,
9020 static struct dentry *trace_instance_dir;
9023 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9026 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9028 enum ring_buffer_flags rb_flags;
9030 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9034 buf->buffer = ring_buffer_alloc(size, rb_flags);
9038 buf->data = alloc_percpu(struct trace_array_cpu);
9040 ring_buffer_free(buf->buffer);
9045 /* Allocate the first page for all buffers */
9046 set_buffer_entries(&tr->array_buffer,
9047 ring_buffer_size(tr->array_buffer.buffer, 0));
9052 static int allocate_trace_buffers(struct trace_array *tr, int size)
9056 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9060 #ifdef CONFIG_TRACER_MAX_TRACE
9061 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9062 allocate_snapshot ? size : 1);
9063 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9064 ring_buffer_free(tr->array_buffer.buffer);
9065 tr->array_buffer.buffer = NULL;
9066 free_percpu(tr->array_buffer.data);
9067 tr->array_buffer.data = NULL;
9070 tr->allocated_snapshot = allocate_snapshot;
9073 * Only the top level trace array gets its snapshot allocated
9074 * from the kernel command line.
9076 allocate_snapshot = false;
9082 static void free_trace_buffer(struct array_buffer *buf)
9085 ring_buffer_free(buf->buffer);
9087 free_percpu(buf->data);
9092 static void free_trace_buffers(struct trace_array *tr)
9097 free_trace_buffer(&tr->array_buffer);
9099 #ifdef CONFIG_TRACER_MAX_TRACE
9100 free_trace_buffer(&tr->max_buffer);
9104 static void init_trace_flags_index(struct trace_array *tr)
9108 /* Used by the trace options files */
9109 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9110 tr->trace_flags_index[i] = i;
9113 static void __update_tracer_options(struct trace_array *tr)
9117 for (t = trace_types; t; t = t->next)
9118 add_tracer_options(tr, t);
9121 static void update_tracer_options(struct trace_array *tr)
9123 mutex_lock(&trace_types_lock);
9124 __update_tracer_options(tr);
9125 mutex_unlock(&trace_types_lock);
9128 /* Must have trace_types_lock held */
9129 struct trace_array *trace_array_find(const char *instance)
9131 struct trace_array *tr, *found = NULL;
9133 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9134 if (tr->name && strcmp(tr->name, instance) == 0) {
9143 struct trace_array *trace_array_find_get(const char *instance)
9145 struct trace_array *tr;
9147 mutex_lock(&trace_types_lock);
9148 tr = trace_array_find(instance);
9151 mutex_unlock(&trace_types_lock);
9156 static int trace_array_create_dir(struct trace_array *tr)
9160 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9164 ret = event_trace_add_tracer(tr->dir, tr);
9166 tracefs_remove(tr->dir);
9170 init_tracer_tracefs(tr, tr->dir);
9171 __update_tracer_options(tr);
9176 static struct trace_array *trace_array_create(const char *name)
9178 struct trace_array *tr;
9182 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9184 return ERR_PTR(ret);
9186 tr->name = kstrdup(name, GFP_KERNEL);
9190 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9193 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9195 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9197 raw_spin_lock_init(&tr->start_lock);
9199 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9201 tr->current_trace = &nop_trace;
9203 INIT_LIST_HEAD(&tr->systems);
9204 INIT_LIST_HEAD(&tr->events);
9205 INIT_LIST_HEAD(&tr->hist_vars);
9206 INIT_LIST_HEAD(&tr->err_log);
9208 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9211 if (ftrace_allocate_ftrace_ops(tr) < 0)
9214 ftrace_init_trace_array(tr);
9216 init_trace_flags_index(tr);
9218 if (trace_instance_dir) {
9219 ret = trace_array_create_dir(tr);
9223 __trace_early_add_events(tr);
9225 list_add(&tr->list, &ftrace_trace_arrays);
9232 ftrace_free_ftrace_ops(tr);
9233 free_trace_buffers(tr);
9234 free_cpumask_var(tr->tracing_cpumask);
9238 return ERR_PTR(ret);
9241 static int instance_mkdir(const char *name)
9243 struct trace_array *tr;
9246 mutex_lock(&event_mutex);
9247 mutex_lock(&trace_types_lock);
9250 if (trace_array_find(name))
9253 tr = trace_array_create(name);
9255 ret = PTR_ERR_OR_ZERO(tr);
9258 mutex_unlock(&trace_types_lock);
9259 mutex_unlock(&event_mutex);
9264 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9265 * @name: The name of the trace array to be looked up/created.
9267 * Returns pointer to trace array with given name.
9268 * NULL, if it cannot be created.
9270 * NOTE: This function increments the reference counter associated with the
9271 * trace array returned. This makes sure it cannot be freed while in use.
9272 * Use trace_array_put() once the trace array is no longer needed.
9273 * If the trace_array is to be freed, trace_array_destroy() needs to
9274 * be called after the trace_array_put(), or simply let user space delete
9275 * it from the tracefs instances directory. But until the
9276 * trace_array_put() is called, user space can not delete it.
9279 struct trace_array *trace_array_get_by_name(const char *name)
9281 struct trace_array *tr;
9283 mutex_lock(&event_mutex);
9284 mutex_lock(&trace_types_lock);
9286 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9287 if (tr->name && strcmp(tr->name, name) == 0)
9291 tr = trace_array_create(name);
9299 mutex_unlock(&trace_types_lock);
9300 mutex_unlock(&event_mutex);
9303 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9305 static int __remove_instance(struct trace_array *tr)
9309 /* Reference counter for a newly created trace array = 1. */
9310 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9313 list_del(&tr->list);
9315 /* Disable all the flags that were enabled coming in */
9316 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9317 if ((1 << i) & ZEROED_TRACE_FLAGS)
9318 set_tracer_flag(tr, 1 << i, 0);
9321 tracing_set_nop(tr);
9322 clear_ftrace_function_probes(tr);
9323 event_trace_del_tracer(tr);
9324 ftrace_clear_pids(tr);
9325 ftrace_destroy_function_files(tr);
9326 tracefs_remove(tr->dir);
9327 free_percpu(tr->last_func_repeats);
9328 free_trace_buffers(tr);
9330 for (i = 0; i < tr->nr_topts; i++) {
9331 kfree(tr->topts[i].topts);
9335 free_cpumask_var(tr->tracing_cpumask);
9342 int trace_array_destroy(struct trace_array *this_tr)
9344 struct trace_array *tr;
9350 mutex_lock(&event_mutex);
9351 mutex_lock(&trace_types_lock);
9355 /* Making sure trace array exists before destroying it. */
9356 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9357 if (tr == this_tr) {
9358 ret = __remove_instance(tr);
9363 mutex_unlock(&trace_types_lock);
9364 mutex_unlock(&event_mutex);
9368 EXPORT_SYMBOL_GPL(trace_array_destroy);
9370 static int instance_rmdir(const char *name)
9372 struct trace_array *tr;
9375 mutex_lock(&event_mutex);
9376 mutex_lock(&trace_types_lock);
9379 tr = trace_array_find(name);
9381 ret = __remove_instance(tr);
9383 mutex_unlock(&trace_types_lock);
9384 mutex_unlock(&event_mutex);
9389 static __init void create_trace_instances(struct dentry *d_tracer)
9391 struct trace_array *tr;
9393 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9396 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9399 mutex_lock(&event_mutex);
9400 mutex_lock(&trace_types_lock);
9402 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9405 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9406 "Failed to create instance directory\n"))
9410 mutex_unlock(&trace_types_lock);
9411 mutex_unlock(&event_mutex);
9415 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9417 struct trace_event_file *file;
9420 trace_create_file("available_tracers", 0444, d_tracer,
9421 tr, &show_traces_fops);
9423 trace_create_file("current_tracer", 0644, d_tracer,
9424 tr, &set_tracer_fops);
9426 trace_create_file("tracing_cpumask", 0644, d_tracer,
9427 tr, &tracing_cpumask_fops);
9429 trace_create_file("trace_options", 0644, d_tracer,
9430 tr, &tracing_iter_fops);
9432 trace_create_file("trace", 0644, d_tracer,
9435 trace_create_file("trace_pipe", 0444, d_tracer,
9436 tr, &tracing_pipe_fops);
9438 trace_create_file("buffer_size_kb", 0644, d_tracer,
9439 tr, &tracing_entries_fops);
9441 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
9442 tr, &tracing_total_entries_fops);
9444 trace_create_file("free_buffer", 0200, d_tracer,
9445 tr, &tracing_free_buffer_fops);
9447 trace_create_file("trace_marker", 0220, d_tracer,
9448 tr, &tracing_mark_fops);
9450 file = __find_event_file(tr, "ftrace", "print");
9451 if (file && file->dir)
9452 trace_create_file("trigger", 0644, file->dir, file,
9453 &event_trigger_fops);
9454 tr->trace_marker_file = file;
9456 trace_create_file("trace_marker_raw", 0220, d_tracer,
9457 tr, &tracing_mark_raw_fops);
9459 trace_create_file("trace_clock", 0644, d_tracer, tr,
9462 trace_create_file("tracing_on", 0644, d_tracer,
9463 tr, &rb_simple_fops);
9465 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
9466 &trace_time_stamp_mode_fops);
9468 tr->buffer_percent = 50;
9470 trace_create_file("buffer_percent", 0444, d_tracer,
9471 tr, &buffer_percent_fops);
9473 create_trace_options_dir(tr);
9475 trace_create_maxlat_file(tr, d_tracer);
9477 if (ftrace_create_function_files(tr, d_tracer))
9478 MEM_FAIL(1, "Could not allocate function filter files");
9480 #ifdef CONFIG_TRACER_SNAPSHOT
9481 trace_create_file("snapshot", 0644, d_tracer,
9482 tr, &snapshot_fops);
9485 trace_create_file("error_log", 0644, d_tracer,
9486 tr, &tracing_err_log_fops);
9488 for_each_tracing_cpu(cpu)
9489 tracing_init_tracefs_percpu(tr, cpu);
9491 ftrace_init_tracefs(tr, d_tracer);
9494 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9496 struct vfsmount *mnt;
9497 struct file_system_type *type;
9500 * To maintain backward compatibility for tools that mount
9501 * debugfs to get to the tracing facility, tracefs is automatically
9502 * mounted to the debugfs/tracing directory.
9504 type = get_fs_type("tracefs");
9507 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9508 put_filesystem(type);
9517 * tracing_init_dentry - initialize top level trace array
9519 * This is called when creating files or directories in the tracing
9520 * directory. It is called via fs_initcall() by any of the boot up code
9521 * and expects to return the dentry of the top level tracing directory.
9523 int tracing_init_dentry(void)
9525 struct trace_array *tr = &global_trace;
9527 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9528 pr_warn("Tracing disabled due to lockdown\n");
9532 /* The top level trace array uses NULL as parent */
9536 if (WARN_ON(!tracefs_initialized()))
9540 * As there may still be users that expect the tracing
9541 * files to exist in debugfs/tracing, we must automount
9542 * the tracefs file system there, so older tools still
9543 * work with the newer kernel.
9545 tr->dir = debugfs_create_automount("tracing", NULL,
9546 trace_automount, NULL);
9551 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9552 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9554 static struct workqueue_struct *eval_map_wq __initdata;
9555 static struct work_struct eval_map_work __initdata;
9557 static void __init eval_map_work_func(struct work_struct *work)
9561 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9562 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9565 static int __init trace_eval_init(void)
9567 INIT_WORK(&eval_map_work, eval_map_work_func);
9569 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9571 pr_err("Unable to allocate eval_map_wq\n");
9573 eval_map_work_func(&eval_map_work);
9577 queue_work(eval_map_wq, &eval_map_work);
9581 static int __init trace_eval_sync(void)
9583 /* Make sure the eval map updates are finished */
9585 destroy_workqueue(eval_map_wq);
9589 late_initcall_sync(trace_eval_sync);
9592 #ifdef CONFIG_MODULES
9593 static void trace_module_add_evals(struct module *mod)
9595 if (!mod->num_trace_evals)
9599 * Modules with bad taint do not have events created, do
9600 * not bother with enums either.
9602 if (trace_module_has_bad_taint(mod))
9605 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9608 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9609 static void trace_module_remove_evals(struct module *mod)
9611 union trace_eval_map_item *map;
9612 union trace_eval_map_item **last = &trace_eval_maps;
9614 if (!mod->num_trace_evals)
9617 mutex_lock(&trace_eval_mutex);
9619 map = trace_eval_maps;
9622 if (map->head.mod == mod)
9624 map = trace_eval_jmp_to_tail(map);
9625 last = &map->tail.next;
9626 map = map->tail.next;
9631 *last = trace_eval_jmp_to_tail(map)->tail.next;
9634 mutex_unlock(&trace_eval_mutex);
9637 static inline void trace_module_remove_evals(struct module *mod) { }
9638 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9640 static int trace_module_notify(struct notifier_block *self,
9641 unsigned long val, void *data)
9643 struct module *mod = data;
9646 case MODULE_STATE_COMING:
9647 trace_module_add_evals(mod);
9649 case MODULE_STATE_GOING:
9650 trace_module_remove_evals(mod);
9657 static struct notifier_block trace_module_nb = {
9658 .notifier_call = trace_module_notify,
9661 #endif /* CONFIG_MODULES */
9663 static __init int tracer_init_tracefs(void)
9667 trace_access_lock_init();
9669 ret = tracing_init_dentry();
9675 init_tracer_tracefs(&global_trace, NULL);
9676 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9678 trace_create_file("tracing_thresh", 0644, NULL,
9679 &global_trace, &tracing_thresh_fops);
9681 trace_create_file("README", 0444, NULL,
9682 NULL, &tracing_readme_fops);
9684 trace_create_file("saved_cmdlines", 0444, NULL,
9685 NULL, &tracing_saved_cmdlines_fops);
9687 trace_create_file("saved_cmdlines_size", 0644, NULL,
9688 NULL, &tracing_saved_cmdlines_size_fops);
9690 trace_create_file("saved_tgids", 0444, NULL,
9691 NULL, &tracing_saved_tgids_fops);
9695 trace_create_eval_file(NULL);
9697 #ifdef CONFIG_MODULES
9698 register_module_notifier(&trace_module_nb);
9701 #ifdef CONFIG_DYNAMIC_FTRACE
9702 trace_create_file("dyn_ftrace_total_info", 0444, NULL,
9703 NULL, &tracing_dyn_info_fops);
9706 create_trace_instances(NULL);
9708 update_tracer_options(&global_trace);
9713 fs_initcall(tracer_init_tracefs);
9715 static int trace_panic_handler(struct notifier_block *this,
9716 unsigned long event, void *unused)
9718 if (ftrace_dump_on_oops)
9719 ftrace_dump(ftrace_dump_on_oops);
9723 static struct notifier_block trace_panic_notifier = {
9724 .notifier_call = trace_panic_handler,
9726 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9729 static int trace_die_handler(struct notifier_block *self,
9735 if (ftrace_dump_on_oops)
9736 ftrace_dump(ftrace_dump_on_oops);
9744 static struct notifier_block trace_die_notifier = {
9745 .notifier_call = trace_die_handler,
9750 * printk is set to max of 1024, we really don't need it that big.
9751 * Nothing should be printing 1000 characters anyway.
9753 #define TRACE_MAX_PRINT 1000
9756 * Define here KERN_TRACE so that we have one place to modify
9757 * it if we decide to change what log level the ftrace dump
9760 #define KERN_TRACE KERN_EMERG
9763 trace_printk_seq(struct trace_seq *s)
9765 /* Probably should print a warning here. */
9766 if (s->seq.len >= TRACE_MAX_PRINT)
9767 s->seq.len = TRACE_MAX_PRINT;
9770 * More paranoid code. Although the buffer size is set to
9771 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9772 * an extra layer of protection.
9774 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9775 s->seq.len = s->seq.size - 1;
9777 /* should be zero ended, but we are paranoid. */
9778 s->buffer[s->seq.len] = 0;
9780 printk(KERN_TRACE "%s", s->buffer);
9785 void trace_init_global_iter(struct trace_iterator *iter)
9787 iter->tr = &global_trace;
9788 iter->trace = iter->tr->current_trace;
9789 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9790 iter->array_buffer = &global_trace.array_buffer;
9792 if (iter->trace && iter->trace->open)
9793 iter->trace->open(iter);
9795 /* Annotate start of buffers if we had overruns */
9796 if (ring_buffer_overruns(iter->array_buffer->buffer))
9797 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9799 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9800 if (trace_clocks[iter->tr->clock_id].in_ns)
9801 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9804 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9806 /* use static because iter can be a bit big for the stack */
9807 static struct trace_iterator iter;
9808 static atomic_t dump_running;
9809 struct trace_array *tr = &global_trace;
9810 unsigned int old_userobj;
9811 unsigned long flags;
9814 /* Only allow one dump user at a time. */
9815 if (atomic_inc_return(&dump_running) != 1) {
9816 atomic_dec(&dump_running);
9821 * Always turn off tracing when we dump.
9822 * We don't need to show trace output of what happens
9823 * between multiple crashes.
9825 * If the user does a sysrq-z, then they can re-enable
9826 * tracing with echo 1 > tracing_on.
9830 local_irq_save(flags);
9832 /* Simulate the iterator */
9833 trace_init_global_iter(&iter);
9834 /* Can not use kmalloc for iter.temp and iter.fmt */
9835 iter.temp = static_temp_buf;
9836 iter.temp_size = STATIC_TEMP_BUF_SIZE;
9837 iter.fmt = static_fmt_buf;
9838 iter.fmt_size = STATIC_FMT_BUF_SIZE;
9840 for_each_tracing_cpu(cpu) {
9841 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9844 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9846 /* don't look at user memory in panic mode */
9847 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9849 switch (oops_dump_mode) {
9851 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9854 iter.cpu_file = raw_smp_processor_id();
9859 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9860 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9863 printk(KERN_TRACE "Dumping ftrace buffer:\n");
9865 /* Did function tracer already get disabled? */
9866 if (ftrace_is_dead()) {
9867 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9868 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9872 * We need to stop all tracing on all CPUS to read
9873 * the next buffer. This is a bit expensive, but is
9874 * not done often. We fill all what we can read,
9875 * and then release the locks again.
9878 while (!trace_empty(&iter)) {
9881 printk(KERN_TRACE "---------------------------------\n");
9885 trace_iterator_reset(&iter);
9886 iter.iter_flags |= TRACE_FILE_LAT_FMT;
9888 if (trace_find_next_entry_inc(&iter) != NULL) {
9891 ret = print_trace_line(&iter);
9892 if (ret != TRACE_TYPE_NO_CONSUME)
9893 trace_consume(&iter);
9895 touch_nmi_watchdog();
9897 trace_printk_seq(&iter.seq);
9901 printk(KERN_TRACE " (ftrace buffer empty)\n");
9903 printk(KERN_TRACE "---------------------------------\n");
9906 tr->trace_flags |= old_userobj;
9908 for_each_tracing_cpu(cpu) {
9909 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9911 atomic_dec(&dump_running);
9912 local_irq_restore(flags);
9914 EXPORT_SYMBOL_GPL(ftrace_dump);
9916 #define WRITE_BUFSIZE 4096
9918 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9919 size_t count, loff_t *ppos,
9920 int (*createfn)(const char *))
9922 char *kbuf, *buf, *tmp;
9927 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9931 while (done < count) {
9932 size = count - done;
9934 if (size >= WRITE_BUFSIZE)
9935 size = WRITE_BUFSIZE - 1;
9937 if (copy_from_user(kbuf, buffer + done, size)) {
9944 tmp = strchr(buf, '\n');
9947 size = tmp - buf + 1;
9950 if (done + size < count) {
9953 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9954 pr_warn("Line length is too long: Should be less than %d\n",
9962 /* Remove comments */
9963 tmp = strchr(buf, '#');
9968 ret = createfn(buf);
9973 } while (done < count);
9983 __init static int tracer_alloc_buffers(void)
9989 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9990 pr_warn("Tracing disabled due to lockdown\n");
9995 * Make sure we don't accidentally add more trace options
9996 * than we have bits for.
9998 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10000 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10003 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10004 goto out_free_buffer_mask;
10006 /* Only allocate trace_printk buffers if a trace_printk exists */
10007 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10008 /* Must be called before global_trace.buffer is allocated */
10009 trace_printk_init_buffers();
10011 /* To save memory, keep the ring buffer size to its minimum */
10012 if (ring_buffer_expanded)
10013 ring_buf_size = trace_buf_size;
10017 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10018 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10020 raw_spin_lock_init(&global_trace.start_lock);
10023 * The prepare callbacks allocates some memory for the ring buffer. We
10024 * don't free the buffer if the CPU goes down. If we were to free
10025 * the buffer, then the user would lose any trace that was in the
10026 * buffer. The memory will be removed once the "instance" is removed.
10028 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10029 "trace/RB:preapre", trace_rb_cpu_prepare,
10032 goto out_free_cpumask;
10033 /* Used for event triggers */
10035 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10037 goto out_rm_hp_state;
10039 if (trace_create_savedcmd() < 0)
10040 goto out_free_temp_buffer;
10042 /* TODO: make the number of buffers hot pluggable with CPUS */
10043 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10044 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10045 goto out_free_savedcmd;
10048 if (global_trace.buffer_disabled)
10051 if (trace_boot_clock) {
10052 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10054 pr_warn("Trace clock %s not defined, going back to default\n",
10059 * register_tracer() might reference current_trace, so it
10060 * needs to be set before we register anything. This is
10061 * just a bootstrap of current_trace anyway.
10063 global_trace.current_trace = &nop_trace;
10065 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10067 ftrace_init_global_array_ops(&global_trace);
10069 init_trace_flags_index(&global_trace);
10071 register_tracer(&nop_trace);
10073 /* Function tracing may start here (via kernel command line) */
10074 init_function_trace();
10076 /* All seems OK, enable tracing */
10077 tracing_disabled = 0;
10079 atomic_notifier_chain_register(&panic_notifier_list,
10080 &trace_panic_notifier);
10082 register_die_notifier(&trace_die_notifier);
10084 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10086 INIT_LIST_HEAD(&global_trace.systems);
10087 INIT_LIST_HEAD(&global_trace.events);
10088 INIT_LIST_HEAD(&global_trace.hist_vars);
10089 INIT_LIST_HEAD(&global_trace.err_log);
10090 list_add(&global_trace.list, &ftrace_trace_arrays);
10092 apply_trace_boot_options();
10094 register_snapshot_cmd();
10101 free_saved_cmdlines_buffer(savedcmd);
10102 out_free_temp_buffer:
10103 ring_buffer_free(temp_buffer);
10105 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10107 free_cpumask_var(global_trace.tracing_cpumask);
10108 out_free_buffer_mask:
10109 free_cpumask_var(tracing_buffer_mask);
10114 void __init early_trace_init(void)
10116 if (tracepoint_printk) {
10117 tracepoint_print_iter =
10118 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10119 if (MEM_FAIL(!tracepoint_print_iter,
10120 "Failed to allocate trace iterator\n"))
10121 tracepoint_printk = 0;
10123 static_key_enable(&tracepoint_printk_key.key);
10125 tracer_alloc_buffers();
10128 void __init trace_init(void)
10130 trace_event_init();
10133 __init static void clear_boot_tracer(void)
10136 * The default tracer at boot buffer is an init section.
10137 * This function is called in lateinit. If we did not
10138 * find the boot tracer, then clear it out, to prevent
10139 * later registration from accessing the buffer that is
10140 * about to be freed.
10142 if (!default_bootup_tracer)
10145 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10146 default_bootup_tracer);
10147 default_bootup_tracer = NULL;
10150 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10151 __init static void tracing_set_default_clock(void)
10153 /* sched_clock_stable() is determined in late_initcall */
10154 if (!trace_boot_clock && !sched_clock_stable()) {
10155 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10156 pr_warn("Can not set tracing clock due to lockdown\n");
10160 printk(KERN_WARNING
10161 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10162 "If you want to keep using the local clock, then add:\n"
10163 " \"trace_clock=local\"\n"
10164 "on the kernel command line\n");
10165 tracing_set_clock(&global_trace, "global");
10169 static inline void tracing_set_default_clock(void) { }
10172 __init static int late_trace_init(void)
10174 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10175 static_key_disable(&tracepoint_printk_key.key);
10176 tracepoint_printk = 0;
10179 tracing_set_default_clock();
10180 clear_boot_tracer();
10184 late_initcall_sync(late_trace_init);