1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/panic_notifier.h>
43 #include <linux/poll.h>
44 #include <linux/nmi.h>
46 #include <linux/trace.h>
47 #include <linux/sched/clock.h>
48 #include <linux/sched/rt.h>
49 #include <linux/fsnotify.h>
50 #include <linux/irq_work.h>
51 #include <linux/workqueue.h>
54 #include "trace_output.h"
57 * On boot up, the ring buffer is set to the minimum size, so that
58 * we do not waste memory on systems that are not using tracing.
60 bool ring_buffer_expanded;
63 * We need to change this state when a selftest is running.
64 * A selftest will lurk into the ring-buffer to count the
65 * entries inserted during the selftest although some concurrent
66 * insertions into the ring-buffer such as trace_printk could occurred
67 * at the same time, giving false positive or negative results.
69 static bool __read_mostly tracing_selftest_running;
72 * If boot-time tracing including tracers/events via kernel cmdline
73 * is running, we do not want to run SELFTEST.
75 bool __read_mostly tracing_selftest_disabled;
77 #ifdef CONFIG_FTRACE_STARTUP_TEST
78 void __init disable_tracing_selftest(const char *reason)
80 if (!tracing_selftest_disabled) {
81 tracing_selftest_disabled = true;
82 pr_info("Ftrace startup test is disabled due to %s\n", reason);
87 /* Pipe tracepoints to printk */
88 struct trace_iterator *tracepoint_print_iter;
89 int tracepoint_printk;
90 static bool tracepoint_printk_stop_on_boot __initdata;
91 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
93 /* For tracers that don't implement custom flags */
94 static struct tracer_opt dummy_tracer_opt[] = {
99 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
105 * To prevent the comm cache from being overwritten when no
106 * tracing is active, only save the comm when a trace event
109 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
112 * Kill all tracing for good (never come back).
113 * It is initialized to 1 but will turn to zero if the initialization
114 * of the tracer is successful. But that is the only place that sets
117 static int tracing_disabled = 1;
119 cpumask_var_t __read_mostly tracing_buffer_mask;
122 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
124 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
125 * is set, then ftrace_dump is called. This will output the contents
126 * of the ftrace buffers to the console. This is very useful for
127 * capturing traces that lead to crashes and outputing it to a
130 * It is default off, but you can enable it with either specifying
131 * "ftrace_dump_on_oops" in the kernel command line, or setting
132 * /proc/sys/kernel/ftrace_dump_on_oops
133 * Set 1 if you want to dump buffers of all CPUs
134 * Set 2 if you want to dump the buffer of the CPU that triggered oops
137 enum ftrace_dump_mode ftrace_dump_on_oops;
139 /* When set, tracing will stop when a WARN*() is hit */
140 int __disable_trace_on_warning;
142 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
143 /* Map of enums to their values, for "eval_map" file */
144 struct trace_eval_map_head {
146 unsigned long length;
149 union trace_eval_map_item;
151 struct trace_eval_map_tail {
153 * "end" is first and points to NULL as it must be different
154 * than "mod" or "eval_string"
156 union trace_eval_map_item *next;
157 const char *end; /* points to NULL */
160 static DEFINE_MUTEX(trace_eval_mutex);
163 * The trace_eval_maps are saved in an array with two extra elements,
164 * one at the beginning, and one at the end. The beginning item contains
165 * the count of the saved maps (head.length), and the module they
166 * belong to if not built in (head.mod). The ending item contains a
167 * pointer to the next array of saved eval_map items.
169 union trace_eval_map_item {
170 struct trace_eval_map map;
171 struct trace_eval_map_head head;
172 struct trace_eval_map_tail tail;
175 static union trace_eval_map_item *trace_eval_maps;
176 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
178 int tracing_set_tracer(struct trace_array *tr, const char *buf);
179 static void ftrace_trace_userstack(struct trace_array *tr,
180 struct trace_buffer *buffer,
181 unsigned int trace_ctx);
183 #define MAX_TRACER_SIZE 100
184 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
185 static char *default_bootup_tracer;
187 static bool allocate_snapshot;
189 static int __init set_cmdline_ftrace(char *str)
191 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
192 default_bootup_tracer = bootup_tracer_buf;
193 /* We are using ftrace early, expand it */
194 ring_buffer_expanded = true;
197 __setup("ftrace=", set_cmdline_ftrace);
199 static int __init set_ftrace_dump_on_oops(char *str)
201 if (*str++ != '=' || !*str || !strcmp("1", str)) {
202 ftrace_dump_on_oops = DUMP_ALL;
206 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
207 ftrace_dump_on_oops = DUMP_ORIG;
213 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
215 static int __init stop_trace_on_warning(char *str)
217 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
218 __disable_trace_on_warning = 1;
221 __setup("traceoff_on_warning", stop_trace_on_warning);
223 static int __init boot_alloc_snapshot(char *str)
225 allocate_snapshot = true;
226 /* We also need the main ring buffer expanded */
227 ring_buffer_expanded = true;
230 __setup("alloc_snapshot", boot_alloc_snapshot);
233 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
235 static int __init set_trace_boot_options(char *str)
237 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
240 __setup("trace_options=", set_trace_boot_options);
242 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
243 static char *trace_boot_clock __initdata;
245 static int __init set_trace_boot_clock(char *str)
247 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
248 trace_boot_clock = trace_boot_clock_buf;
251 __setup("trace_clock=", set_trace_boot_clock);
253 static int __init set_tracepoint_printk(char *str)
255 /* Ignore the "tp_printk_stop_on_boot" param */
259 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
260 tracepoint_printk = 1;
263 __setup("tp_printk", set_tracepoint_printk);
265 static int __init set_tracepoint_printk_stop(char *str)
267 tracepoint_printk_stop_on_boot = true;
270 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
272 unsigned long long ns2usecs(u64 nsec)
280 trace_process_export(struct trace_export *export,
281 struct ring_buffer_event *event, int flag)
283 struct trace_entry *entry;
284 unsigned int size = 0;
286 if (export->flags & flag) {
287 entry = ring_buffer_event_data(event);
288 size = ring_buffer_event_length(event);
289 export->write(export, entry, size);
293 static DEFINE_MUTEX(ftrace_export_lock);
295 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
297 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
298 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
299 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
301 static inline void ftrace_exports_enable(struct trace_export *export)
303 if (export->flags & TRACE_EXPORT_FUNCTION)
304 static_branch_inc(&trace_function_exports_enabled);
306 if (export->flags & TRACE_EXPORT_EVENT)
307 static_branch_inc(&trace_event_exports_enabled);
309 if (export->flags & TRACE_EXPORT_MARKER)
310 static_branch_inc(&trace_marker_exports_enabled);
313 static inline void ftrace_exports_disable(struct trace_export *export)
315 if (export->flags & TRACE_EXPORT_FUNCTION)
316 static_branch_dec(&trace_function_exports_enabled);
318 if (export->flags & TRACE_EXPORT_EVENT)
319 static_branch_dec(&trace_event_exports_enabled);
321 if (export->flags & TRACE_EXPORT_MARKER)
322 static_branch_dec(&trace_marker_exports_enabled);
325 static void ftrace_exports(struct ring_buffer_event *event, int flag)
327 struct trace_export *export;
329 preempt_disable_notrace();
331 export = rcu_dereference_raw_check(ftrace_exports_list);
333 trace_process_export(export, event, flag);
334 export = rcu_dereference_raw_check(export->next);
337 preempt_enable_notrace();
341 add_trace_export(struct trace_export **list, struct trace_export *export)
343 rcu_assign_pointer(export->next, *list);
345 * We are entering export into the list but another
346 * CPU might be walking that list. We need to make sure
347 * the export->next pointer is valid before another CPU sees
348 * the export pointer included into the list.
350 rcu_assign_pointer(*list, export);
354 rm_trace_export(struct trace_export **list, struct trace_export *export)
356 struct trace_export **p;
358 for (p = list; *p != NULL; p = &(*p)->next)
365 rcu_assign_pointer(*p, (*p)->next);
371 add_ftrace_export(struct trace_export **list, struct trace_export *export)
373 ftrace_exports_enable(export);
375 add_trace_export(list, export);
379 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
383 ret = rm_trace_export(list, export);
384 ftrace_exports_disable(export);
389 int register_ftrace_export(struct trace_export *export)
391 if (WARN_ON_ONCE(!export->write))
394 mutex_lock(&ftrace_export_lock);
396 add_ftrace_export(&ftrace_exports_list, export);
398 mutex_unlock(&ftrace_export_lock);
402 EXPORT_SYMBOL_GPL(register_ftrace_export);
404 int unregister_ftrace_export(struct trace_export *export)
408 mutex_lock(&ftrace_export_lock);
410 ret = rm_ftrace_export(&ftrace_exports_list, export);
412 mutex_unlock(&ftrace_export_lock);
416 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
418 /* trace_flags holds trace_options default values */
419 #define TRACE_DEFAULT_FLAGS \
420 (FUNCTION_DEFAULT_FLAGS | \
421 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
422 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
423 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
424 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
427 /* trace_options that are only supported by global_trace */
428 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
429 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
431 /* trace_flags that are default zero for instances */
432 #define ZEROED_TRACE_FLAGS \
433 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
436 * The global_trace is the descriptor that holds the top-level tracing
437 * buffers for the live tracing.
439 static struct trace_array global_trace = {
440 .trace_flags = TRACE_DEFAULT_FLAGS,
443 LIST_HEAD(ftrace_trace_arrays);
445 int trace_array_get(struct trace_array *this_tr)
447 struct trace_array *tr;
450 mutex_lock(&trace_types_lock);
451 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
458 mutex_unlock(&trace_types_lock);
463 static void __trace_array_put(struct trace_array *this_tr)
465 WARN_ON(!this_tr->ref);
470 * trace_array_put - Decrement the reference counter for this trace array.
471 * @this_tr : pointer to the trace array
473 * NOTE: Use this when we no longer need the trace array returned by
474 * trace_array_get_by_name(). This ensures the trace array can be later
478 void trace_array_put(struct trace_array *this_tr)
483 mutex_lock(&trace_types_lock);
484 __trace_array_put(this_tr);
485 mutex_unlock(&trace_types_lock);
487 EXPORT_SYMBOL_GPL(trace_array_put);
489 int tracing_check_open_get_tr(struct trace_array *tr)
493 ret = security_locked_down(LOCKDOWN_TRACEFS);
497 if (tracing_disabled)
500 if (tr && trace_array_get(tr) < 0)
506 int call_filter_check_discard(struct trace_event_call *call, void *rec,
507 struct trace_buffer *buffer,
508 struct ring_buffer_event *event)
510 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
511 !filter_match_preds(call->filter, rec)) {
512 __trace_event_discard_commit(buffer, event);
520 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
521 * @filtered_pids: The list of pids to check
522 * @search_pid: The PID to find in @filtered_pids
524 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
527 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
529 return trace_pid_list_is_set(filtered_pids, search_pid);
533 * trace_ignore_this_task - should a task be ignored for tracing
534 * @filtered_pids: The list of pids to check
535 * @filtered_no_pids: The list of pids not to be traced
536 * @task: The task that should be ignored if not filtered
538 * Checks if @task should be traced or not from @filtered_pids.
539 * Returns true if @task should *NOT* be traced.
540 * Returns false if @task should be traced.
543 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
544 struct trace_pid_list *filtered_no_pids,
545 struct task_struct *task)
548 * If filtered_no_pids is not empty, and the task's pid is listed
549 * in filtered_no_pids, then return true.
550 * Otherwise, if filtered_pids is empty, that means we can
551 * trace all tasks. If it has content, then only trace pids
552 * within filtered_pids.
555 return (filtered_pids &&
556 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
558 trace_find_filtered_pid(filtered_no_pids, task->pid));
562 * trace_filter_add_remove_task - Add or remove a task from a pid_list
563 * @pid_list: The list to modify
564 * @self: The current task for fork or NULL for exit
565 * @task: The task to add or remove
567 * If adding a task, if @self is defined, the task is only added if @self
568 * is also included in @pid_list. This happens on fork and tasks should
569 * only be added when the parent is listed. If @self is NULL, then the
570 * @task pid will be removed from the list, which would happen on exit
573 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
574 struct task_struct *self,
575 struct task_struct *task)
580 /* For forks, we only add if the forking task is listed */
582 if (!trace_find_filtered_pid(pid_list, self->pid))
586 /* "self" is set for forks, and NULL for exits */
588 trace_pid_list_set(pid_list, task->pid);
590 trace_pid_list_clear(pid_list, task->pid);
594 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
595 * @pid_list: The pid list to show
596 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
597 * @pos: The position of the file
599 * This is used by the seq_file "next" operation to iterate the pids
600 * listed in a trace_pid_list structure.
602 * Returns the pid+1 as we want to display pid of zero, but NULL would
603 * stop the iteration.
605 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
607 long pid = (unsigned long)v;
612 /* pid already is +1 of the actual previous bit */
613 if (trace_pid_list_next(pid_list, pid, &next) < 0)
618 /* Return pid + 1 to allow zero to be represented */
619 return (void *)(pid + 1);
623 * trace_pid_start - Used for seq_file to start reading pid lists
624 * @pid_list: The pid list to show
625 * @pos: The position of the file
627 * This is used by seq_file "start" operation to start the iteration
630 * Returns the pid+1 as we want to display pid of zero, but NULL would
631 * stop the iteration.
633 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
639 if (trace_pid_list_first(pid_list, &first) < 0)
644 /* Return pid + 1 so that zero can be the exit value */
645 for (pid++; pid && l < *pos;
646 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
652 * trace_pid_show - show the current pid in seq_file processing
653 * @m: The seq_file structure to write into
654 * @v: A void pointer of the pid (+1) value to display
656 * Can be directly used by seq_file operations to display the current
659 int trace_pid_show(struct seq_file *m, void *v)
661 unsigned long pid = (unsigned long)v - 1;
663 seq_printf(m, "%lu\n", pid);
667 /* 128 should be much more than enough */
668 #define PID_BUF_SIZE 127
670 int trace_pid_write(struct trace_pid_list *filtered_pids,
671 struct trace_pid_list **new_pid_list,
672 const char __user *ubuf, size_t cnt)
674 struct trace_pid_list *pid_list;
675 struct trace_parser parser;
683 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
687 * Always recreate a new array. The write is an all or nothing
688 * operation. Always create a new array when adding new pids by
689 * the user. If the operation fails, then the current list is
692 pid_list = trace_pid_list_alloc();
694 trace_parser_put(&parser);
699 /* copy the current bits to the new max */
700 ret = trace_pid_list_first(filtered_pids, &pid);
702 trace_pid_list_set(pid_list, pid);
703 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
713 ret = trace_get_user(&parser, ubuf, cnt, &pos);
721 if (!trace_parser_loaded(&parser))
725 if (kstrtoul(parser.buffer, 0, &val))
730 if (trace_pid_list_set(pid_list, pid) < 0) {
736 trace_parser_clear(&parser);
739 trace_parser_put(&parser);
742 trace_pid_list_free(pid_list);
747 /* Cleared the list of pids */
748 trace_pid_list_free(pid_list);
752 *new_pid_list = pid_list;
757 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
761 /* Early boot up does not have a buffer yet */
763 return trace_clock_local();
765 ts = ring_buffer_time_stamp(buf->buffer);
766 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
771 u64 ftrace_now(int cpu)
773 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
777 * tracing_is_enabled - Show if global_trace has been enabled
779 * Shows if the global trace has been enabled or not. It uses the
780 * mirror flag "buffer_disabled" to be used in fast paths such as for
781 * the irqsoff tracer. But it may be inaccurate due to races. If you
782 * need to know the accurate state, use tracing_is_on() which is a little
783 * slower, but accurate.
785 int tracing_is_enabled(void)
788 * For quick access (irqsoff uses this in fast path), just
789 * return the mirror variable of the state of the ring buffer.
790 * It's a little racy, but we don't really care.
793 return !global_trace.buffer_disabled;
797 * trace_buf_size is the size in bytes that is allocated
798 * for a buffer. Note, the number of bytes is always rounded
801 * This number is purposely set to a low number of 16384.
802 * If the dump on oops happens, it will be much appreciated
803 * to not have to wait for all that output. Anyway this can be
804 * boot time and run time configurable.
806 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
808 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
810 /* trace_types holds a link list of available tracers. */
811 static struct tracer *trace_types __read_mostly;
814 * trace_types_lock is used to protect the trace_types list.
816 DEFINE_MUTEX(trace_types_lock);
819 * serialize the access of the ring buffer
821 * ring buffer serializes readers, but it is low level protection.
822 * The validity of the events (which returns by ring_buffer_peek() ..etc)
823 * are not protected by ring buffer.
825 * The content of events may become garbage if we allow other process consumes
826 * these events concurrently:
827 * A) the page of the consumed events may become a normal page
828 * (not reader page) in ring buffer, and this page will be rewritten
829 * by events producer.
830 * B) The page of the consumed events may become a page for splice_read,
831 * and this page will be returned to system.
833 * These primitives allow multi process access to different cpu ring buffer
836 * These primitives don't distinguish read-only and read-consume access.
837 * Multi read-only access are also serialized.
841 static DECLARE_RWSEM(all_cpu_access_lock);
842 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
844 static inline void trace_access_lock(int cpu)
846 if (cpu == RING_BUFFER_ALL_CPUS) {
847 /* gain it for accessing the whole ring buffer. */
848 down_write(&all_cpu_access_lock);
850 /* gain it for accessing a cpu ring buffer. */
852 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
853 down_read(&all_cpu_access_lock);
855 /* Secondly block other access to this @cpu ring buffer. */
856 mutex_lock(&per_cpu(cpu_access_lock, cpu));
860 static inline void trace_access_unlock(int cpu)
862 if (cpu == RING_BUFFER_ALL_CPUS) {
863 up_write(&all_cpu_access_lock);
865 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
866 up_read(&all_cpu_access_lock);
870 static inline void trace_access_lock_init(void)
874 for_each_possible_cpu(cpu)
875 mutex_init(&per_cpu(cpu_access_lock, cpu));
880 static DEFINE_MUTEX(access_lock);
882 static inline void trace_access_lock(int cpu)
885 mutex_lock(&access_lock);
888 static inline void trace_access_unlock(int cpu)
891 mutex_unlock(&access_lock);
894 static inline void trace_access_lock_init(void)
900 #ifdef CONFIG_STACKTRACE
901 static void __ftrace_trace_stack(struct trace_buffer *buffer,
902 unsigned int trace_ctx,
903 int skip, struct pt_regs *regs);
904 static inline void ftrace_trace_stack(struct trace_array *tr,
905 struct trace_buffer *buffer,
906 unsigned int trace_ctx,
907 int skip, struct pt_regs *regs);
910 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
911 unsigned int trace_ctx,
912 int skip, struct pt_regs *regs)
915 static inline void ftrace_trace_stack(struct trace_array *tr,
916 struct trace_buffer *buffer,
917 unsigned long trace_ctx,
918 int skip, struct pt_regs *regs)
924 static __always_inline void
925 trace_event_setup(struct ring_buffer_event *event,
926 int type, unsigned int trace_ctx)
928 struct trace_entry *ent = ring_buffer_event_data(event);
930 tracing_generic_entry_update(ent, type, trace_ctx);
933 static __always_inline struct ring_buffer_event *
934 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
937 unsigned int trace_ctx)
939 struct ring_buffer_event *event;
941 event = ring_buffer_lock_reserve(buffer, len);
943 trace_event_setup(event, type, trace_ctx);
948 void tracer_tracing_on(struct trace_array *tr)
950 if (tr->array_buffer.buffer)
951 ring_buffer_record_on(tr->array_buffer.buffer);
953 * This flag is looked at when buffers haven't been allocated
954 * yet, or by some tracers (like irqsoff), that just want to
955 * know if the ring buffer has been disabled, but it can handle
956 * races of where it gets disabled but we still do a record.
957 * As the check is in the fast path of the tracers, it is more
958 * important to be fast than accurate.
960 tr->buffer_disabled = 0;
961 /* Make the flag seen by readers */
966 * tracing_on - enable tracing buffers
968 * This function enables tracing buffers that may have been
969 * disabled with tracing_off.
971 void tracing_on(void)
973 tracer_tracing_on(&global_trace);
975 EXPORT_SYMBOL_GPL(tracing_on);
978 static __always_inline void
979 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
981 __this_cpu_write(trace_taskinfo_save, true);
983 /* If this is the temp buffer, we need to commit fully */
984 if (this_cpu_read(trace_buffered_event) == event) {
985 /* Length is in event->array[0] */
986 ring_buffer_write(buffer, event->array[0], &event->array[1]);
987 /* Release the temp buffer */
988 this_cpu_dec(trace_buffered_event_cnt);
990 ring_buffer_unlock_commit(buffer, event);
994 * __trace_puts - write a constant string into the trace buffer.
995 * @ip: The address of the caller
996 * @str: The constant string to write
997 * @size: The size of the string.
999 int __trace_puts(unsigned long ip, const char *str, int size)
1001 struct ring_buffer_event *event;
1002 struct trace_buffer *buffer;
1003 struct print_entry *entry;
1004 unsigned int trace_ctx;
1007 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1010 if (unlikely(tracing_selftest_running || tracing_disabled))
1013 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1015 trace_ctx = tracing_gen_ctx();
1016 buffer = global_trace.array_buffer.buffer;
1017 ring_buffer_nest_start(buffer);
1018 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1025 entry = ring_buffer_event_data(event);
1028 memcpy(&entry->buf, str, size);
1030 /* Add a newline if necessary */
1031 if (entry->buf[size - 1] != '\n') {
1032 entry->buf[size] = '\n';
1033 entry->buf[size + 1] = '\0';
1035 entry->buf[size] = '\0';
1037 __buffer_unlock_commit(buffer, event);
1038 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1040 ring_buffer_nest_end(buffer);
1043 EXPORT_SYMBOL_GPL(__trace_puts);
1046 * __trace_bputs - write the pointer to a constant string into trace buffer
1047 * @ip: The address of the caller
1048 * @str: The constant string to write to the buffer to
1050 int __trace_bputs(unsigned long ip, const char *str)
1052 struct ring_buffer_event *event;
1053 struct trace_buffer *buffer;
1054 struct bputs_entry *entry;
1055 unsigned int trace_ctx;
1056 int size = sizeof(struct bputs_entry);
1059 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1062 if (unlikely(tracing_selftest_running || tracing_disabled))
1065 trace_ctx = tracing_gen_ctx();
1066 buffer = global_trace.array_buffer.buffer;
1068 ring_buffer_nest_start(buffer);
1069 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1074 entry = ring_buffer_event_data(event);
1078 __buffer_unlock_commit(buffer, event);
1079 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1083 ring_buffer_nest_end(buffer);
1086 EXPORT_SYMBOL_GPL(__trace_bputs);
1088 #ifdef CONFIG_TRACER_SNAPSHOT
1089 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1092 struct tracer *tracer = tr->current_trace;
1093 unsigned long flags;
1096 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1097 internal_trace_puts("*** snapshot is being ignored ***\n");
1101 if (!tr->allocated_snapshot) {
1102 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1103 internal_trace_puts("*** stopping trace here! ***\n");
1108 /* Note, snapshot can not be used when the tracer uses it */
1109 if (tracer->use_max_tr) {
1110 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1111 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
1115 local_irq_save(flags);
1116 update_max_tr(tr, current, smp_processor_id(), cond_data);
1117 local_irq_restore(flags);
1120 void tracing_snapshot_instance(struct trace_array *tr)
1122 tracing_snapshot_instance_cond(tr, NULL);
1126 * tracing_snapshot - take a snapshot of the current buffer.
1128 * This causes a swap between the snapshot buffer and the current live
1129 * tracing buffer. You can use this to take snapshots of the live
1130 * trace when some condition is triggered, but continue to trace.
1132 * Note, make sure to allocate the snapshot with either
1133 * a tracing_snapshot_alloc(), or by doing it manually
1134 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1136 * If the snapshot buffer is not allocated, it will stop tracing.
1137 * Basically making a permanent snapshot.
1139 void tracing_snapshot(void)
1141 struct trace_array *tr = &global_trace;
1143 tracing_snapshot_instance(tr);
1145 EXPORT_SYMBOL_GPL(tracing_snapshot);
1148 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1149 * @tr: The tracing instance to snapshot
1150 * @cond_data: The data to be tested conditionally, and possibly saved
1152 * This is the same as tracing_snapshot() except that the snapshot is
1153 * conditional - the snapshot will only happen if the
1154 * cond_snapshot.update() implementation receiving the cond_data
1155 * returns true, which means that the trace array's cond_snapshot
1156 * update() operation used the cond_data to determine whether the
1157 * snapshot should be taken, and if it was, presumably saved it along
1158 * with the snapshot.
1160 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1162 tracing_snapshot_instance_cond(tr, cond_data);
1164 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1167 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1168 * @tr: The tracing instance
1170 * When the user enables a conditional snapshot using
1171 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1172 * with the snapshot. This accessor is used to retrieve it.
1174 * Should not be called from cond_snapshot.update(), since it takes
1175 * the tr->max_lock lock, which the code calling
1176 * cond_snapshot.update() has already done.
1178 * Returns the cond_data associated with the trace array's snapshot.
1180 void *tracing_cond_snapshot_data(struct trace_array *tr)
1182 void *cond_data = NULL;
1184 local_irq_disable();
1185 arch_spin_lock(&tr->max_lock);
1187 if (tr->cond_snapshot)
1188 cond_data = tr->cond_snapshot->cond_data;
1190 arch_spin_unlock(&tr->max_lock);
1195 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1197 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1198 struct array_buffer *size_buf, int cpu_id);
1199 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1201 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1205 if (!tr->allocated_snapshot) {
1207 /* allocate spare buffer */
1208 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1209 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1213 tr->allocated_snapshot = true;
1219 static void free_snapshot(struct trace_array *tr)
1222 * We don't free the ring buffer. instead, resize it because
1223 * The max_tr ring buffer has some state (e.g. ring->clock) and
1224 * we want preserve it.
1226 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1227 set_buffer_entries(&tr->max_buffer, 1);
1228 tracing_reset_online_cpus(&tr->max_buffer);
1229 tr->allocated_snapshot = false;
1233 * tracing_alloc_snapshot - allocate snapshot buffer.
1235 * This only allocates the snapshot buffer if it isn't already
1236 * allocated - it doesn't also take a snapshot.
1238 * This is meant to be used in cases where the snapshot buffer needs
1239 * to be set up for events that can't sleep but need to be able to
1240 * trigger a snapshot.
1242 int tracing_alloc_snapshot(void)
1244 struct trace_array *tr = &global_trace;
1247 ret = tracing_alloc_snapshot_instance(tr);
1252 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1255 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1257 * This is similar to tracing_snapshot(), but it will allocate the
1258 * snapshot buffer if it isn't already allocated. Use this only
1259 * where it is safe to sleep, as the allocation may sleep.
1261 * This causes a swap between the snapshot buffer and the current live
1262 * tracing buffer. You can use this to take snapshots of the live
1263 * trace when some condition is triggered, but continue to trace.
1265 void tracing_snapshot_alloc(void)
1269 ret = tracing_alloc_snapshot();
1275 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1278 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1279 * @tr: The tracing instance
1280 * @cond_data: User data to associate with the snapshot
1281 * @update: Implementation of the cond_snapshot update function
1283 * Check whether the conditional snapshot for the given instance has
1284 * already been enabled, or if the current tracer is already using a
1285 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1286 * save the cond_data and update function inside.
1288 * Returns 0 if successful, error otherwise.
1290 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1291 cond_update_fn_t update)
1293 struct cond_snapshot *cond_snapshot;
1296 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1300 cond_snapshot->cond_data = cond_data;
1301 cond_snapshot->update = update;
1303 mutex_lock(&trace_types_lock);
1305 ret = tracing_alloc_snapshot_instance(tr);
1309 if (tr->current_trace->use_max_tr) {
1315 * The cond_snapshot can only change to NULL without the
1316 * trace_types_lock. We don't care if we race with it going
1317 * to NULL, but we want to make sure that it's not set to
1318 * something other than NULL when we get here, which we can
1319 * do safely with only holding the trace_types_lock and not
1320 * having to take the max_lock.
1322 if (tr->cond_snapshot) {
1327 local_irq_disable();
1328 arch_spin_lock(&tr->max_lock);
1329 tr->cond_snapshot = cond_snapshot;
1330 arch_spin_unlock(&tr->max_lock);
1333 mutex_unlock(&trace_types_lock);
1338 mutex_unlock(&trace_types_lock);
1339 kfree(cond_snapshot);
1342 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1345 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1346 * @tr: The tracing instance
1348 * Check whether the conditional snapshot for the given instance is
1349 * enabled; if so, free the cond_snapshot associated with it,
1350 * otherwise return -EINVAL.
1352 * Returns 0 if successful, error otherwise.
1354 int tracing_snapshot_cond_disable(struct trace_array *tr)
1358 local_irq_disable();
1359 arch_spin_lock(&tr->max_lock);
1361 if (!tr->cond_snapshot)
1364 kfree(tr->cond_snapshot);
1365 tr->cond_snapshot = NULL;
1368 arch_spin_unlock(&tr->max_lock);
1373 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1375 void tracing_snapshot(void)
1377 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1379 EXPORT_SYMBOL_GPL(tracing_snapshot);
1380 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1382 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1384 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1385 int tracing_alloc_snapshot(void)
1387 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1390 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1391 void tracing_snapshot_alloc(void)
1396 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1397 void *tracing_cond_snapshot_data(struct trace_array *tr)
1401 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1402 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1406 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1407 int tracing_snapshot_cond_disable(struct trace_array *tr)
1411 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1412 #endif /* CONFIG_TRACER_SNAPSHOT */
1414 void tracer_tracing_off(struct trace_array *tr)
1416 if (tr->array_buffer.buffer)
1417 ring_buffer_record_off(tr->array_buffer.buffer);
1419 * This flag is looked at when buffers haven't been allocated
1420 * yet, or by some tracers (like irqsoff), that just want to
1421 * know if the ring buffer has been disabled, but it can handle
1422 * races of where it gets disabled but we still do a record.
1423 * As the check is in the fast path of the tracers, it is more
1424 * important to be fast than accurate.
1426 tr->buffer_disabled = 1;
1427 /* Make the flag seen by readers */
1432 * tracing_off - turn off tracing buffers
1434 * This function stops the tracing buffers from recording data.
1435 * It does not disable any overhead the tracers themselves may
1436 * be causing. This function simply causes all recording to
1437 * the ring buffers to fail.
1439 void tracing_off(void)
1441 tracer_tracing_off(&global_trace);
1443 EXPORT_SYMBOL_GPL(tracing_off);
1445 void disable_trace_on_warning(void)
1447 if (__disable_trace_on_warning) {
1448 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1449 "Disabling tracing due to warning\n");
1455 * tracer_tracing_is_on - show real state of ring buffer enabled
1456 * @tr : the trace array to know if ring buffer is enabled
1458 * Shows real state of the ring buffer if it is enabled or not.
1460 bool tracer_tracing_is_on(struct trace_array *tr)
1462 if (tr->array_buffer.buffer)
1463 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1464 return !tr->buffer_disabled;
1468 * tracing_is_on - show state of ring buffers enabled
1470 int tracing_is_on(void)
1472 return tracer_tracing_is_on(&global_trace);
1474 EXPORT_SYMBOL_GPL(tracing_is_on);
1476 static int __init set_buf_size(char *str)
1478 unsigned long buf_size;
1482 buf_size = memparse(str, &str);
1484 * nr_entries can not be zero and the startup
1485 * tests require some buffer space. Therefore
1486 * ensure we have at least 4096 bytes of buffer.
1488 trace_buf_size = max(4096UL, buf_size);
1491 __setup("trace_buf_size=", set_buf_size);
1493 static int __init set_tracing_thresh(char *str)
1495 unsigned long threshold;
1500 ret = kstrtoul(str, 0, &threshold);
1503 tracing_thresh = threshold * 1000;
1506 __setup("tracing_thresh=", set_tracing_thresh);
1508 unsigned long nsecs_to_usecs(unsigned long nsecs)
1510 return nsecs / 1000;
1514 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1515 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1516 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1517 * of strings in the order that the evals (enum) were defined.
1522 /* These must match the bit positions in trace_iterator_flags */
1523 static const char *trace_options[] = {
1531 int in_ns; /* is this clock in nanoseconds? */
1532 } trace_clocks[] = {
1533 { trace_clock_local, "local", 1 },
1534 { trace_clock_global, "global", 1 },
1535 { trace_clock_counter, "counter", 0 },
1536 { trace_clock_jiffies, "uptime", 0 },
1537 { trace_clock, "perf", 1 },
1538 { ktime_get_mono_fast_ns, "mono", 1 },
1539 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1540 { ktime_get_boot_fast_ns, "boot", 1 },
1544 bool trace_clock_in_ns(struct trace_array *tr)
1546 if (trace_clocks[tr->clock_id].in_ns)
1553 * trace_parser_get_init - gets the buffer for trace parser
1555 int trace_parser_get_init(struct trace_parser *parser, int size)
1557 memset(parser, 0, sizeof(*parser));
1559 parser->buffer = kmalloc(size, GFP_KERNEL);
1560 if (!parser->buffer)
1563 parser->size = size;
1568 * trace_parser_put - frees the buffer for trace parser
1570 void trace_parser_put(struct trace_parser *parser)
1572 kfree(parser->buffer);
1573 parser->buffer = NULL;
1577 * trace_get_user - reads the user input string separated by space
1578 * (matched by isspace(ch))
1580 * For each string found the 'struct trace_parser' is updated,
1581 * and the function returns.
1583 * Returns number of bytes read.
1585 * See kernel/trace/trace.h for 'struct trace_parser' details.
1587 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1588 size_t cnt, loff_t *ppos)
1595 trace_parser_clear(parser);
1597 ret = get_user(ch, ubuf++);
1605 * The parser is not finished with the last write,
1606 * continue reading the user input without skipping spaces.
1608 if (!parser->cont) {
1609 /* skip white space */
1610 while (cnt && isspace(ch)) {
1611 ret = get_user(ch, ubuf++);
1620 /* only spaces were written */
1621 if (isspace(ch) || !ch) {
1628 /* read the non-space input */
1629 while (cnt && !isspace(ch) && ch) {
1630 if (parser->idx < parser->size - 1)
1631 parser->buffer[parser->idx++] = ch;
1636 ret = get_user(ch, ubuf++);
1643 /* We either got finished input or we have to wait for another call. */
1644 if (isspace(ch) || !ch) {
1645 parser->buffer[parser->idx] = 0;
1646 parser->cont = false;
1647 } else if (parser->idx < parser->size - 1) {
1648 parser->cont = true;
1649 parser->buffer[parser->idx++] = ch;
1650 /* Make sure the parsed string always terminates with '\0'. */
1651 parser->buffer[parser->idx] = 0;
1664 /* TODO add a seq_buf_to_buffer() */
1665 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1669 if (trace_seq_used(s) <= s->seq.readpos)
1672 len = trace_seq_used(s) - s->seq.readpos;
1675 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1677 s->seq.readpos += cnt;
1681 unsigned long __read_mostly tracing_thresh;
1682 static const struct file_operations tracing_max_lat_fops;
1684 #ifdef LATENCY_FS_NOTIFY
1686 static struct workqueue_struct *fsnotify_wq;
1688 static void latency_fsnotify_workfn(struct work_struct *work)
1690 struct trace_array *tr = container_of(work, struct trace_array,
1692 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1695 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1697 struct trace_array *tr = container_of(iwork, struct trace_array,
1699 queue_work(fsnotify_wq, &tr->fsnotify_work);
1702 static void trace_create_maxlat_file(struct trace_array *tr,
1703 struct dentry *d_tracer)
1705 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1706 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1707 tr->d_max_latency = trace_create_file("tracing_max_latency",
1709 d_tracer, &tr->max_latency,
1710 &tracing_max_lat_fops);
1713 __init static int latency_fsnotify_init(void)
1715 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1716 WQ_UNBOUND | WQ_HIGHPRI, 0);
1718 pr_err("Unable to allocate tr_max_lat_wq\n");
1724 late_initcall_sync(latency_fsnotify_init);
1726 void latency_fsnotify(struct trace_array *tr)
1731 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1732 * possible that we are called from __schedule() or do_idle(), which
1733 * could cause a deadlock.
1735 irq_work_queue(&tr->fsnotify_irqwork);
1738 #elif defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
1739 || defined(CONFIG_OSNOISE_TRACER)
1741 #define trace_create_maxlat_file(tr, d_tracer) \
1742 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1743 d_tracer, &tr->max_latency, &tracing_max_lat_fops)
1746 #define trace_create_maxlat_file(tr, d_tracer) do { } while (0)
1749 #ifdef CONFIG_TRACER_MAX_TRACE
1751 * Copy the new maximum trace into the separate maximum-trace
1752 * structure. (this way the maximum trace is permanently saved,
1753 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1756 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1758 struct array_buffer *trace_buf = &tr->array_buffer;
1759 struct array_buffer *max_buf = &tr->max_buffer;
1760 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1761 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1764 max_buf->time_start = data->preempt_timestamp;
1766 max_data->saved_latency = tr->max_latency;
1767 max_data->critical_start = data->critical_start;
1768 max_data->critical_end = data->critical_end;
1770 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1771 max_data->pid = tsk->pid;
1773 * If tsk == current, then use current_uid(), as that does not use
1774 * RCU. The irq tracer can be called out of RCU scope.
1777 max_data->uid = current_uid();
1779 max_data->uid = task_uid(tsk);
1781 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1782 max_data->policy = tsk->policy;
1783 max_data->rt_priority = tsk->rt_priority;
1785 /* record this tasks comm */
1786 tracing_record_cmdline(tsk);
1787 latency_fsnotify(tr);
1791 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1793 * @tsk: the task with the latency
1794 * @cpu: The cpu that initiated the trace.
1795 * @cond_data: User data associated with a conditional snapshot
1797 * Flip the buffers between the @tr and the max_tr and record information
1798 * about which task was the cause of this latency.
1801 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1807 WARN_ON_ONCE(!irqs_disabled());
1809 if (!tr->allocated_snapshot) {
1810 /* Only the nop tracer should hit this when disabling */
1811 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1815 arch_spin_lock(&tr->max_lock);
1817 /* Inherit the recordable setting from array_buffer */
1818 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1819 ring_buffer_record_on(tr->max_buffer.buffer);
1821 ring_buffer_record_off(tr->max_buffer.buffer);
1823 #ifdef CONFIG_TRACER_SNAPSHOT
1824 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1827 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1829 __update_max_tr(tr, tsk, cpu);
1832 arch_spin_unlock(&tr->max_lock);
1836 * update_max_tr_single - only copy one trace over, and reset the rest
1838 * @tsk: task with the latency
1839 * @cpu: the cpu of the buffer to copy.
1841 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1844 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1851 WARN_ON_ONCE(!irqs_disabled());
1852 if (!tr->allocated_snapshot) {
1853 /* Only the nop tracer should hit this when disabling */
1854 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1858 arch_spin_lock(&tr->max_lock);
1860 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1862 if (ret == -EBUSY) {
1864 * We failed to swap the buffer due to a commit taking
1865 * place on this CPU. We fail to record, but we reset
1866 * the max trace buffer (no one writes directly to it)
1867 * and flag that it failed.
1869 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1870 "Failed to swap buffers due to commit in progress\n");
1873 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1875 __update_max_tr(tr, tsk, cpu);
1876 arch_spin_unlock(&tr->max_lock);
1878 #endif /* CONFIG_TRACER_MAX_TRACE */
1880 static int wait_on_pipe(struct trace_iterator *iter, int full)
1882 /* Iterators are static, they should be filled or empty */
1883 if (trace_buffer_iter(iter, iter->cpu_file))
1886 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1890 #ifdef CONFIG_FTRACE_STARTUP_TEST
1891 static bool selftests_can_run;
1893 struct trace_selftests {
1894 struct list_head list;
1895 struct tracer *type;
1898 static LIST_HEAD(postponed_selftests);
1900 static int save_selftest(struct tracer *type)
1902 struct trace_selftests *selftest;
1904 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1908 selftest->type = type;
1909 list_add(&selftest->list, &postponed_selftests);
1913 static int run_tracer_selftest(struct tracer *type)
1915 struct trace_array *tr = &global_trace;
1916 struct tracer *saved_tracer = tr->current_trace;
1919 if (!type->selftest || tracing_selftest_disabled)
1923 * If a tracer registers early in boot up (before scheduling is
1924 * initialized and such), then do not run its selftests yet.
1925 * Instead, run it a little later in the boot process.
1927 if (!selftests_can_run)
1928 return save_selftest(type);
1930 if (!tracing_is_on()) {
1931 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1937 * Run a selftest on this tracer.
1938 * Here we reset the trace buffer, and set the current
1939 * tracer to be this tracer. The tracer can then run some
1940 * internal tracing to verify that everything is in order.
1941 * If we fail, we do not register this tracer.
1943 tracing_reset_online_cpus(&tr->array_buffer);
1945 tr->current_trace = type;
1947 #ifdef CONFIG_TRACER_MAX_TRACE
1948 if (type->use_max_tr) {
1949 /* If we expanded the buffers, make sure the max is expanded too */
1950 if (ring_buffer_expanded)
1951 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1952 RING_BUFFER_ALL_CPUS);
1953 tr->allocated_snapshot = true;
1957 /* the test is responsible for initializing and enabling */
1958 pr_info("Testing tracer %s: ", type->name);
1959 ret = type->selftest(type, tr);
1960 /* the test is responsible for resetting too */
1961 tr->current_trace = saved_tracer;
1963 printk(KERN_CONT "FAILED!\n");
1964 /* Add the warning after printing 'FAILED' */
1968 /* Only reset on passing, to avoid touching corrupted buffers */
1969 tracing_reset_online_cpus(&tr->array_buffer);
1971 #ifdef CONFIG_TRACER_MAX_TRACE
1972 if (type->use_max_tr) {
1973 tr->allocated_snapshot = false;
1975 /* Shrink the max buffer again */
1976 if (ring_buffer_expanded)
1977 ring_buffer_resize(tr->max_buffer.buffer, 1,
1978 RING_BUFFER_ALL_CPUS);
1982 printk(KERN_CONT "PASSED\n");
1986 static __init int init_trace_selftests(void)
1988 struct trace_selftests *p, *n;
1989 struct tracer *t, **last;
1992 selftests_can_run = true;
1994 mutex_lock(&trace_types_lock);
1996 if (list_empty(&postponed_selftests))
1999 pr_info("Running postponed tracer tests:\n");
2001 tracing_selftest_running = true;
2002 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2003 /* This loop can take minutes when sanitizers are enabled, so
2004 * lets make sure we allow RCU processing.
2007 ret = run_tracer_selftest(p->type);
2008 /* If the test fails, then warn and remove from available_tracers */
2010 WARN(1, "tracer: %s failed selftest, disabling\n",
2012 last = &trace_types;
2013 for (t = trace_types; t; t = t->next) {
2024 tracing_selftest_running = false;
2027 mutex_unlock(&trace_types_lock);
2031 core_initcall(init_trace_selftests);
2033 static inline int run_tracer_selftest(struct tracer *type)
2037 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2039 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2041 static void __init apply_trace_boot_options(void);
2044 * register_tracer - register a tracer with the ftrace system.
2045 * @type: the plugin for the tracer
2047 * Register a new plugin tracer.
2049 int __init register_tracer(struct tracer *type)
2055 pr_info("Tracer must have a name\n");
2059 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2060 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2064 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2065 pr_warn("Can not register tracer %s due to lockdown\n",
2070 mutex_lock(&trace_types_lock);
2072 tracing_selftest_running = true;
2074 for (t = trace_types; t; t = t->next) {
2075 if (strcmp(type->name, t->name) == 0) {
2077 pr_info("Tracer %s already registered\n",
2084 if (!type->set_flag)
2085 type->set_flag = &dummy_set_flag;
2087 /*allocate a dummy tracer_flags*/
2088 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2093 type->flags->val = 0;
2094 type->flags->opts = dummy_tracer_opt;
2096 if (!type->flags->opts)
2097 type->flags->opts = dummy_tracer_opt;
2099 /* store the tracer for __set_tracer_option */
2100 type->flags->trace = type;
2102 ret = run_tracer_selftest(type);
2106 type->next = trace_types;
2108 add_tracer_options(&global_trace, type);
2111 tracing_selftest_running = false;
2112 mutex_unlock(&trace_types_lock);
2114 if (ret || !default_bootup_tracer)
2117 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2120 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2121 /* Do we want this tracer to start on bootup? */
2122 tracing_set_tracer(&global_trace, type->name);
2123 default_bootup_tracer = NULL;
2125 apply_trace_boot_options();
2127 /* disable other selftests, since this will break it. */
2128 disable_tracing_selftest("running a tracer");
2134 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2136 struct trace_buffer *buffer = buf->buffer;
2141 ring_buffer_record_disable(buffer);
2143 /* Make sure all commits have finished */
2145 ring_buffer_reset_cpu(buffer, cpu);
2147 ring_buffer_record_enable(buffer);
2150 void tracing_reset_online_cpus(struct array_buffer *buf)
2152 struct trace_buffer *buffer = buf->buffer;
2157 ring_buffer_record_disable(buffer);
2159 /* Make sure all commits have finished */
2162 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2164 ring_buffer_reset_online_cpus(buffer);
2166 ring_buffer_record_enable(buffer);
2169 /* Must have trace_types_lock held */
2170 void tracing_reset_all_online_cpus(void)
2172 struct trace_array *tr;
2174 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2175 if (!tr->clear_trace)
2177 tr->clear_trace = false;
2178 tracing_reset_online_cpus(&tr->array_buffer);
2179 #ifdef CONFIG_TRACER_MAX_TRACE
2180 tracing_reset_online_cpus(&tr->max_buffer);
2186 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2187 * is the tgid last observed corresponding to pid=i.
2189 static int *tgid_map;
2191 /* The maximum valid index into tgid_map. */
2192 static size_t tgid_map_max;
2194 #define SAVED_CMDLINES_DEFAULT 128
2195 #define NO_CMDLINE_MAP UINT_MAX
2197 * Preemption must be disabled before acquiring trace_cmdline_lock.
2198 * The various trace_arrays' max_lock must be acquired in a context
2199 * where interrupt is disabled.
2201 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2202 struct saved_cmdlines_buffer {
2203 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2204 unsigned *map_cmdline_to_pid;
2205 unsigned cmdline_num;
2207 char *saved_cmdlines;
2209 static struct saved_cmdlines_buffer *savedcmd;
2211 static inline char *get_saved_cmdlines(int idx)
2213 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2216 static inline void set_cmdline(int idx, const char *cmdline)
2218 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2221 static int allocate_cmdlines_buffer(unsigned int val,
2222 struct saved_cmdlines_buffer *s)
2224 s->map_cmdline_to_pid = kmalloc_array(val,
2225 sizeof(*s->map_cmdline_to_pid),
2227 if (!s->map_cmdline_to_pid)
2230 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2231 if (!s->saved_cmdlines) {
2232 kfree(s->map_cmdline_to_pid);
2237 s->cmdline_num = val;
2238 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2239 sizeof(s->map_pid_to_cmdline));
2240 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2241 val * sizeof(*s->map_cmdline_to_pid));
2246 static int trace_create_savedcmd(void)
2250 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2254 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2264 int is_tracing_stopped(void)
2266 return global_trace.stop_count;
2270 * tracing_start - quick start of the tracer
2272 * If tracing is enabled but was stopped by tracing_stop,
2273 * this will start the tracer back up.
2275 void tracing_start(void)
2277 struct trace_buffer *buffer;
2278 unsigned long flags;
2280 if (tracing_disabled)
2283 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2284 if (--global_trace.stop_count) {
2285 if (global_trace.stop_count < 0) {
2286 /* Someone screwed up their debugging */
2288 global_trace.stop_count = 0;
2293 /* Prevent the buffers from switching */
2294 arch_spin_lock(&global_trace.max_lock);
2296 buffer = global_trace.array_buffer.buffer;
2298 ring_buffer_record_enable(buffer);
2300 #ifdef CONFIG_TRACER_MAX_TRACE
2301 buffer = global_trace.max_buffer.buffer;
2303 ring_buffer_record_enable(buffer);
2306 arch_spin_unlock(&global_trace.max_lock);
2309 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2312 static void tracing_start_tr(struct trace_array *tr)
2314 struct trace_buffer *buffer;
2315 unsigned long flags;
2317 if (tracing_disabled)
2320 /* If global, we need to also start the max tracer */
2321 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2322 return tracing_start();
2324 raw_spin_lock_irqsave(&tr->start_lock, flags);
2326 if (--tr->stop_count) {
2327 if (tr->stop_count < 0) {
2328 /* Someone screwed up their debugging */
2335 buffer = tr->array_buffer.buffer;
2337 ring_buffer_record_enable(buffer);
2340 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2344 * tracing_stop - quick stop of the tracer
2346 * Light weight way to stop tracing. Use in conjunction with
2349 void tracing_stop(void)
2351 struct trace_buffer *buffer;
2352 unsigned long flags;
2354 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2355 if (global_trace.stop_count++)
2358 /* Prevent the buffers from switching */
2359 arch_spin_lock(&global_trace.max_lock);
2361 buffer = global_trace.array_buffer.buffer;
2363 ring_buffer_record_disable(buffer);
2365 #ifdef CONFIG_TRACER_MAX_TRACE
2366 buffer = global_trace.max_buffer.buffer;
2368 ring_buffer_record_disable(buffer);
2371 arch_spin_unlock(&global_trace.max_lock);
2374 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2377 static void tracing_stop_tr(struct trace_array *tr)
2379 struct trace_buffer *buffer;
2380 unsigned long flags;
2382 /* If global, we need to also stop the max tracer */
2383 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2384 return tracing_stop();
2386 raw_spin_lock_irqsave(&tr->start_lock, flags);
2387 if (tr->stop_count++)
2390 buffer = tr->array_buffer.buffer;
2392 ring_buffer_record_disable(buffer);
2395 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2398 static int trace_save_cmdline(struct task_struct *tsk)
2402 /* treat recording of idle task as a success */
2406 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2409 * It's not the end of the world if we don't get
2410 * the lock, but we also don't want to spin
2411 * nor do we want to disable interrupts,
2412 * so if we miss here, then better luck next time.
2414 * This is called within the scheduler and wake up, so interrupts
2415 * had better been disabled and run queue lock been held.
2417 lockdep_assert_preemption_disabled();
2418 if (!arch_spin_trylock(&trace_cmdline_lock))
2421 idx = savedcmd->map_pid_to_cmdline[tpid];
2422 if (idx == NO_CMDLINE_MAP) {
2423 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2425 savedcmd->map_pid_to_cmdline[tpid] = idx;
2426 savedcmd->cmdline_idx = idx;
2429 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2430 set_cmdline(idx, tsk->comm);
2432 arch_spin_unlock(&trace_cmdline_lock);
2437 static void __trace_find_cmdline(int pid, char comm[])
2443 strcpy(comm, "<idle>");
2447 if (WARN_ON_ONCE(pid < 0)) {
2448 strcpy(comm, "<XXX>");
2452 tpid = pid & (PID_MAX_DEFAULT - 1);
2453 map = savedcmd->map_pid_to_cmdline[tpid];
2454 if (map != NO_CMDLINE_MAP) {
2455 tpid = savedcmd->map_cmdline_to_pid[map];
2457 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2461 strcpy(comm, "<...>");
2464 void trace_find_cmdline(int pid, char comm[])
2467 arch_spin_lock(&trace_cmdline_lock);
2469 __trace_find_cmdline(pid, comm);
2471 arch_spin_unlock(&trace_cmdline_lock);
2475 static int *trace_find_tgid_ptr(int pid)
2478 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2479 * if we observe a non-NULL tgid_map then we also observe the correct
2482 int *map = smp_load_acquire(&tgid_map);
2484 if (unlikely(!map || pid > tgid_map_max))
2490 int trace_find_tgid(int pid)
2492 int *ptr = trace_find_tgid_ptr(pid);
2494 return ptr ? *ptr : 0;
2497 static int trace_save_tgid(struct task_struct *tsk)
2501 /* treat recording of idle task as a success */
2505 ptr = trace_find_tgid_ptr(tsk->pid);
2513 static bool tracing_record_taskinfo_skip(int flags)
2515 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2517 if (!__this_cpu_read(trace_taskinfo_save))
2523 * tracing_record_taskinfo - record the task info of a task
2525 * @task: task to record
2526 * @flags: TRACE_RECORD_CMDLINE for recording comm
2527 * TRACE_RECORD_TGID for recording tgid
2529 void tracing_record_taskinfo(struct task_struct *task, int flags)
2533 if (tracing_record_taskinfo_skip(flags))
2537 * Record as much task information as possible. If some fail, continue
2538 * to try to record the others.
2540 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2541 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2543 /* If recording any information failed, retry again soon. */
2547 __this_cpu_write(trace_taskinfo_save, false);
2551 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2553 * @prev: previous task during sched_switch
2554 * @next: next task during sched_switch
2555 * @flags: TRACE_RECORD_CMDLINE for recording comm
2556 * TRACE_RECORD_TGID for recording tgid
2558 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2559 struct task_struct *next, int flags)
2563 if (tracing_record_taskinfo_skip(flags))
2567 * Record as much task information as possible. If some fail, continue
2568 * to try to record the others.
2570 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2571 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2572 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2573 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2575 /* If recording any information failed, retry again soon. */
2579 __this_cpu_write(trace_taskinfo_save, false);
2582 /* Helpers to record a specific task information */
2583 void tracing_record_cmdline(struct task_struct *task)
2585 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2588 void tracing_record_tgid(struct task_struct *task)
2590 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2594 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2595 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2596 * simplifies those functions and keeps them in sync.
2598 enum print_line_t trace_handle_return(struct trace_seq *s)
2600 return trace_seq_has_overflowed(s) ?
2601 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2603 EXPORT_SYMBOL_GPL(trace_handle_return);
2605 static unsigned short migration_disable_value(void)
2607 #if defined(CONFIG_SMP)
2608 return current->migration_disabled;
2614 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2616 unsigned int trace_flags = irqs_status;
2619 pc = preempt_count();
2622 trace_flags |= TRACE_FLAG_NMI;
2623 if (pc & HARDIRQ_MASK)
2624 trace_flags |= TRACE_FLAG_HARDIRQ;
2625 if (in_serving_softirq())
2626 trace_flags |= TRACE_FLAG_SOFTIRQ;
2628 if (tif_need_resched())
2629 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2630 if (test_preempt_need_resched())
2631 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2632 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2633 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2636 struct ring_buffer_event *
2637 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2640 unsigned int trace_ctx)
2642 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2645 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2646 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2647 static int trace_buffered_event_ref;
2650 * trace_buffered_event_enable - enable buffering events
2652 * When events are being filtered, it is quicker to use a temporary
2653 * buffer to write the event data into if there's a likely chance
2654 * that it will not be committed. The discard of the ring buffer
2655 * is not as fast as committing, and is much slower than copying
2658 * When an event is to be filtered, allocate per cpu buffers to
2659 * write the event data into, and if the event is filtered and discarded
2660 * it is simply dropped, otherwise, the entire data is to be committed
2663 void trace_buffered_event_enable(void)
2665 struct ring_buffer_event *event;
2669 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2671 if (trace_buffered_event_ref++)
2674 for_each_tracing_cpu(cpu) {
2675 page = alloc_pages_node(cpu_to_node(cpu),
2676 GFP_KERNEL | __GFP_NORETRY, 0);
2680 event = page_address(page);
2681 memset(event, 0, sizeof(*event));
2683 per_cpu(trace_buffered_event, cpu) = event;
2686 if (cpu == smp_processor_id() &&
2687 __this_cpu_read(trace_buffered_event) !=
2688 per_cpu(trace_buffered_event, cpu))
2695 trace_buffered_event_disable();
2698 static void enable_trace_buffered_event(void *data)
2700 /* Probably not needed, but do it anyway */
2702 this_cpu_dec(trace_buffered_event_cnt);
2705 static void disable_trace_buffered_event(void *data)
2707 this_cpu_inc(trace_buffered_event_cnt);
2711 * trace_buffered_event_disable - disable buffering events
2713 * When a filter is removed, it is faster to not use the buffered
2714 * events, and to commit directly into the ring buffer. Free up
2715 * the temp buffers when there are no more users. This requires
2716 * special synchronization with current events.
2718 void trace_buffered_event_disable(void)
2722 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2724 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2727 if (--trace_buffered_event_ref)
2731 /* For each CPU, set the buffer as used. */
2732 smp_call_function_many(tracing_buffer_mask,
2733 disable_trace_buffered_event, NULL, 1);
2736 /* Wait for all current users to finish */
2739 for_each_tracing_cpu(cpu) {
2740 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2741 per_cpu(trace_buffered_event, cpu) = NULL;
2744 * Make sure trace_buffered_event is NULL before clearing
2745 * trace_buffered_event_cnt.
2750 /* Do the work on each cpu */
2751 smp_call_function_many(tracing_buffer_mask,
2752 enable_trace_buffered_event, NULL, 1);
2756 static struct trace_buffer *temp_buffer;
2758 struct ring_buffer_event *
2759 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2760 struct trace_event_file *trace_file,
2761 int type, unsigned long len,
2762 unsigned int trace_ctx)
2764 struct ring_buffer_event *entry;
2765 struct trace_array *tr = trace_file->tr;
2768 *current_rb = tr->array_buffer.buffer;
2770 if (!tr->no_filter_buffering_ref &&
2771 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2772 (entry = this_cpu_read(trace_buffered_event))) {
2774 * Filtering is on, so try to use the per cpu buffer first.
2775 * This buffer will simulate a ring_buffer_event,
2776 * where the type_len is zero and the array[0] will
2777 * hold the full length.
2778 * (see include/linux/ring-buffer.h for details on
2779 * how the ring_buffer_event is structured).
2781 * Using a temp buffer during filtering and copying it
2782 * on a matched filter is quicker than writing directly
2783 * into the ring buffer and then discarding it when
2784 * it doesn't match. That is because the discard
2785 * requires several atomic operations to get right.
2786 * Copying on match and doing nothing on a failed match
2787 * is still quicker than no copy on match, but having
2788 * to discard out of the ring buffer on a failed match.
2790 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2792 val = this_cpu_inc_return(trace_buffered_event_cnt);
2795 * Preemption is disabled, but interrupts and NMIs
2796 * can still come in now. If that happens after
2797 * the above increment, then it will have to go
2798 * back to the old method of allocating the event
2799 * on the ring buffer, and if the filter fails, it
2800 * will have to call ring_buffer_discard_commit()
2803 * Need to also check the unlikely case that the
2804 * length is bigger than the temp buffer size.
2805 * If that happens, then the reserve is pretty much
2806 * guaranteed to fail, as the ring buffer currently
2807 * only allows events less than a page. But that may
2808 * change in the future, so let the ring buffer reserve
2809 * handle the failure in that case.
2811 if (val == 1 && likely(len <= max_len)) {
2812 trace_event_setup(entry, type, trace_ctx);
2813 entry->array[0] = len;
2816 this_cpu_dec(trace_buffered_event_cnt);
2819 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2822 * If tracing is off, but we have triggers enabled
2823 * we still need to look at the event data. Use the temp_buffer
2824 * to store the trace event for the trigger to use. It's recursive
2825 * safe and will not be recorded anywhere.
2827 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2828 *current_rb = temp_buffer;
2829 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2834 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2836 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2837 static DEFINE_MUTEX(tracepoint_printk_mutex);
2839 static void output_printk(struct trace_event_buffer *fbuffer)
2841 struct trace_event_call *event_call;
2842 struct trace_event_file *file;
2843 struct trace_event *event;
2844 unsigned long flags;
2845 struct trace_iterator *iter = tracepoint_print_iter;
2847 /* We should never get here if iter is NULL */
2848 if (WARN_ON_ONCE(!iter))
2851 event_call = fbuffer->trace_file->event_call;
2852 if (!event_call || !event_call->event.funcs ||
2853 !event_call->event.funcs->trace)
2856 file = fbuffer->trace_file;
2857 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2858 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2859 !filter_match_preds(file->filter, fbuffer->entry)))
2862 event = &fbuffer->trace_file->event_call->event;
2864 raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2865 trace_seq_init(&iter->seq);
2866 iter->ent = fbuffer->entry;
2867 event_call->event.funcs->trace(iter, 0, event);
2868 trace_seq_putc(&iter->seq, 0);
2869 printk("%s", iter->seq.buffer);
2871 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2874 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2875 void *buffer, size_t *lenp,
2878 int save_tracepoint_printk;
2881 mutex_lock(&tracepoint_printk_mutex);
2882 save_tracepoint_printk = tracepoint_printk;
2884 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2887 * This will force exiting early, as tracepoint_printk
2888 * is always zero when tracepoint_printk_iter is not allocated
2890 if (!tracepoint_print_iter)
2891 tracepoint_printk = 0;
2893 if (save_tracepoint_printk == tracepoint_printk)
2896 if (tracepoint_printk)
2897 static_key_enable(&tracepoint_printk_key.key);
2899 static_key_disable(&tracepoint_printk_key.key);
2902 mutex_unlock(&tracepoint_printk_mutex);
2907 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2909 enum event_trigger_type tt = ETT_NONE;
2910 struct trace_event_file *file = fbuffer->trace_file;
2912 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2913 fbuffer->entry, &tt))
2916 if (static_key_false(&tracepoint_printk_key.key))
2917 output_printk(fbuffer);
2919 if (static_branch_unlikely(&trace_event_exports_enabled))
2920 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2922 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2923 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2927 event_triggers_post_call(file, tt);
2930 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2935 * trace_buffer_unlock_commit_regs()
2936 * trace_event_buffer_commit()
2937 * trace_event_raw_event_xxx()
2939 # define STACK_SKIP 3
2941 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2942 struct trace_buffer *buffer,
2943 struct ring_buffer_event *event,
2944 unsigned int trace_ctx,
2945 struct pt_regs *regs)
2947 __buffer_unlock_commit(buffer, event);
2950 * If regs is not set, then skip the necessary functions.
2951 * Note, we can still get here via blktrace, wakeup tracer
2952 * and mmiotrace, but that's ok if they lose a function or
2953 * two. They are not that meaningful.
2955 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2956 ftrace_trace_userstack(tr, buffer, trace_ctx);
2960 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2963 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2964 struct ring_buffer_event *event)
2966 __buffer_unlock_commit(buffer, event);
2970 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2971 parent_ip, unsigned int trace_ctx)
2973 struct trace_event_call *call = &event_function;
2974 struct trace_buffer *buffer = tr->array_buffer.buffer;
2975 struct ring_buffer_event *event;
2976 struct ftrace_entry *entry;
2978 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2982 entry = ring_buffer_event_data(event);
2984 entry->parent_ip = parent_ip;
2986 if (!call_filter_check_discard(call, entry, buffer, event)) {
2987 if (static_branch_unlikely(&trace_function_exports_enabled))
2988 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2989 __buffer_unlock_commit(buffer, event);
2993 #ifdef CONFIG_STACKTRACE
2995 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2996 #define FTRACE_KSTACK_NESTING 4
2998 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
3000 struct ftrace_stack {
3001 unsigned long calls[FTRACE_KSTACK_ENTRIES];
3005 struct ftrace_stacks {
3006 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
3009 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
3010 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
3012 static void __ftrace_trace_stack(struct trace_buffer *buffer,
3013 unsigned int trace_ctx,
3014 int skip, struct pt_regs *regs)
3016 struct trace_event_call *call = &event_kernel_stack;
3017 struct ring_buffer_event *event;
3018 unsigned int size, nr_entries;
3019 struct ftrace_stack *fstack;
3020 struct stack_entry *entry;
3024 * Add one, for this function and the call to save_stack_trace()
3025 * If regs is set, then these functions will not be in the way.
3027 #ifndef CONFIG_UNWINDER_ORC
3032 preempt_disable_notrace();
3034 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3036 /* This should never happen. If it does, yell once and skip */
3037 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3041 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3042 * interrupt will either see the value pre increment or post
3043 * increment. If the interrupt happens pre increment it will have
3044 * restored the counter when it returns. We just need a barrier to
3045 * keep gcc from moving things around.
3049 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3050 size = ARRAY_SIZE(fstack->calls);
3053 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3056 nr_entries = stack_trace_save(fstack->calls, size, skip);
3059 size = nr_entries * sizeof(unsigned long);
3060 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3061 (sizeof(*entry) - sizeof(entry->caller)) + size,
3065 entry = ring_buffer_event_data(event);
3067 memcpy(&entry->caller, fstack->calls, size);
3068 entry->size = nr_entries;
3070 if (!call_filter_check_discard(call, entry, buffer, event))
3071 __buffer_unlock_commit(buffer, event);
3074 /* Again, don't let gcc optimize things here */
3076 __this_cpu_dec(ftrace_stack_reserve);
3077 preempt_enable_notrace();
3081 static inline void ftrace_trace_stack(struct trace_array *tr,
3082 struct trace_buffer *buffer,
3083 unsigned int trace_ctx,
3084 int skip, struct pt_regs *regs)
3086 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3089 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3092 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3095 struct trace_buffer *buffer = tr->array_buffer.buffer;
3097 if (rcu_is_watching()) {
3098 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3103 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3104 * but if the above rcu_is_watching() failed, then the NMI
3105 * triggered someplace critical, and rcu_irq_enter() should
3106 * not be called from NMI.
3108 if (unlikely(in_nmi()))
3111 rcu_irq_enter_irqson();
3112 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3113 rcu_irq_exit_irqson();
3117 * trace_dump_stack - record a stack back trace in the trace buffer
3118 * @skip: Number of functions to skip (helper handlers)
3120 void trace_dump_stack(int skip)
3122 if (tracing_disabled || tracing_selftest_running)
3125 #ifndef CONFIG_UNWINDER_ORC
3126 /* Skip 1 to skip this function. */
3129 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3130 tracing_gen_ctx(), skip, NULL);
3132 EXPORT_SYMBOL_GPL(trace_dump_stack);
3134 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3135 static DEFINE_PER_CPU(int, user_stack_count);
3138 ftrace_trace_userstack(struct trace_array *tr,
3139 struct trace_buffer *buffer, unsigned int trace_ctx)
3141 struct trace_event_call *call = &event_user_stack;
3142 struct ring_buffer_event *event;
3143 struct userstack_entry *entry;
3145 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3149 * NMIs can not handle page faults, even with fix ups.
3150 * The save user stack can (and often does) fault.
3152 if (unlikely(in_nmi()))
3156 * prevent recursion, since the user stack tracing may
3157 * trigger other kernel events.
3160 if (__this_cpu_read(user_stack_count))
3163 __this_cpu_inc(user_stack_count);
3165 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3166 sizeof(*entry), trace_ctx);
3168 goto out_drop_count;
3169 entry = ring_buffer_event_data(event);
3171 entry->tgid = current->tgid;
3172 memset(&entry->caller, 0, sizeof(entry->caller));
3174 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3175 if (!call_filter_check_discard(call, entry, buffer, event))
3176 __buffer_unlock_commit(buffer, event);
3179 __this_cpu_dec(user_stack_count);
3183 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3184 static void ftrace_trace_userstack(struct trace_array *tr,
3185 struct trace_buffer *buffer,
3186 unsigned int trace_ctx)
3189 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3191 #endif /* CONFIG_STACKTRACE */
3194 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3195 unsigned long long delta)
3197 entry->bottom_delta_ts = delta & U32_MAX;
3198 entry->top_delta_ts = (delta >> 32);
3201 void trace_last_func_repeats(struct trace_array *tr,
3202 struct trace_func_repeats *last_info,
3203 unsigned int trace_ctx)
3205 struct trace_buffer *buffer = tr->array_buffer.buffer;
3206 struct func_repeats_entry *entry;
3207 struct ring_buffer_event *event;
3210 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3211 sizeof(*entry), trace_ctx);
3215 delta = ring_buffer_event_time_stamp(buffer, event) -
3216 last_info->ts_last_call;
3218 entry = ring_buffer_event_data(event);
3219 entry->ip = last_info->ip;
3220 entry->parent_ip = last_info->parent_ip;
3221 entry->count = last_info->count;
3222 func_repeats_set_delta_ts(entry, delta);
3224 __buffer_unlock_commit(buffer, event);
3227 /* created for use with alloc_percpu */
3228 struct trace_buffer_struct {
3230 char buffer[4][TRACE_BUF_SIZE];
3233 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3236 * This allows for lockless recording. If we're nested too deeply, then
3237 * this returns NULL.
3239 static char *get_trace_buf(void)
3241 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3243 if (!trace_percpu_buffer || buffer->nesting >= 4)
3248 /* Interrupts must see nesting incremented before we use the buffer */
3250 return &buffer->buffer[buffer->nesting - 1][0];
3253 static void put_trace_buf(void)
3255 /* Don't let the decrement of nesting leak before this */
3257 this_cpu_dec(trace_percpu_buffer->nesting);
3260 static int alloc_percpu_trace_buffer(void)
3262 struct trace_buffer_struct __percpu *buffers;
3264 if (trace_percpu_buffer)
3267 buffers = alloc_percpu(struct trace_buffer_struct);
3268 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3271 trace_percpu_buffer = buffers;
3275 static int buffers_allocated;
3277 void trace_printk_init_buffers(void)
3279 if (buffers_allocated)
3282 if (alloc_percpu_trace_buffer())
3285 /* trace_printk() is for debug use only. Don't use it in production. */
3288 pr_warn("**********************************************************\n");
3289 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3291 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3293 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3294 pr_warn("** unsafe for production use. **\n");
3296 pr_warn("** If you see this message and you are not debugging **\n");
3297 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3299 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3300 pr_warn("**********************************************************\n");
3302 /* Expand the buffers to set size */
3303 tracing_update_buffers();
3305 buffers_allocated = 1;
3308 * trace_printk_init_buffers() can be called by modules.
3309 * If that happens, then we need to start cmdline recording
3310 * directly here. If the global_trace.buffer is already
3311 * allocated here, then this was called by module code.
3313 if (global_trace.array_buffer.buffer)
3314 tracing_start_cmdline_record();
3316 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3318 void trace_printk_start_comm(void)
3320 /* Start tracing comms if trace printk is set */
3321 if (!buffers_allocated)
3323 tracing_start_cmdline_record();
3326 static void trace_printk_start_stop_comm(int enabled)
3328 if (!buffers_allocated)
3332 tracing_start_cmdline_record();
3334 tracing_stop_cmdline_record();
3338 * trace_vbprintk - write binary msg to tracing buffer
3339 * @ip: The address of the caller
3340 * @fmt: The string format to write to the buffer
3341 * @args: Arguments for @fmt
3343 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3345 struct trace_event_call *call = &event_bprint;
3346 struct ring_buffer_event *event;
3347 struct trace_buffer *buffer;
3348 struct trace_array *tr = &global_trace;
3349 struct bprint_entry *entry;
3350 unsigned int trace_ctx;
3354 if (unlikely(tracing_selftest_running || tracing_disabled))
3357 /* Don't pollute graph traces with trace_vprintk internals */
3358 pause_graph_tracing();
3360 trace_ctx = tracing_gen_ctx();
3361 preempt_disable_notrace();
3363 tbuffer = get_trace_buf();
3369 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3371 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3374 size = sizeof(*entry) + sizeof(u32) * len;
3375 buffer = tr->array_buffer.buffer;
3376 ring_buffer_nest_start(buffer);
3377 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3381 entry = ring_buffer_event_data(event);
3385 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3386 if (!call_filter_check_discard(call, entry, buffer, event)) {
3387 __buffer_unlock_commit(buffer, event);
3388 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3392 ring_buffer_nest_end(buffer);
3397 preempt_enable_notrace();
3398 unpause_graph_tracing();
3402 EXPORT_SYMBOL_GPL(trace_vbprintk);
3406 __trace_array_vprintk(struct trace_buffer *buffer,
3407 unsigned long ip, const char *fmt, va_list args)
3409 struct trace_event_call *call = &event_print;
3410 struct ring_buffer_event *event;
3412 struct print_entry *entry;
3413 unsigned int trace_ctx;
3416 if (tracing_disabled || tracing_selftest_running)
3419 /* Don't pollute graph traces with trace_vprintk internals */
3420 pause_graph_tracing();
3422 trace_ctx = tracing_gen_ctx();
3423 preempt_disable_notrace();
3426 tbuffer = get_trace_buf();
3432 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3434 size = sizeof(*entry) + len + 1;
3435 ring_buffer_nest_start(buffer);
3436 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3440 entry = ring_buffer_event_data(event);
3443 memcpy(&entry->buf, tbuffer, len + 1);
3444 if (!call_filter_check_discard(call, entry, buffer, event)) {
3445 __buffer_unlock_commit(buffer, event);
3446 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3450 ring_buffer_nest_end(buffer);
3454 preempt_enable_notrace();
3455 unpause_graph_tracing();
3461 int trace_array_vprintk(struct trace_array *tr,
3462 unsigned long ip, const char *fmt, va_list args)
3464 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3468 * trace_array_printk - Print a message to a specific instance
3469 * @tr: The instance trace_array descriptor
3470 * @ip: The instruction pointer that this is called from.
3471 * @fmt: The format to print (printf format)
3473 * If a subsystem sets up its own instance, they have the right to
3474 * printk strings into their tracing instance buffer using this
3475 * function. Note, this function will not write into the top level
3476 * buffer (use trace_printk() for that), as writing into the top level
3477 * buffer should only have events that can be individually disabled.
3478 * trace_printk() is only used for debugging a kernel, and should not
3479 * be ever incorporated in normal use.
3481 * trace_array_printk() can be used, as it will not add noise to the
3482 * top level tracing buffer.
3484 * Note, trace_array_init_printk() must be called on @tr before this
3488 int trace_array_printk(struct trace_array *tr,
3489 unsigned long ip, const char *fmt, ...)
3497 /* This is only allowed for created instances */
3498 if (tr == &global_trace)
3501 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3505 ret = trace_array_vprintk(tr, ip, fmt, ap);
3509 EXPORT_SYMBOL_GPL(trace_array_printk);
3512 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3513 * @tr: The trace array to initialize the buffers for
3515 * As trace_array_printk() only writes into instances, they are OK to
3516 * have in the kernel (unlike trace_printk()). This needs to be called
3517 * before trace_array_printk() can be used on a trace_array.
3519 int trace_array_init_printk(struct trace_array *tr)
3524 /* This is only allowed for created instances */
3525 if (tr == &global_trace)
3528 return alloc_percpu_trace_buffer();
3530 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3533 int trace_array_printk_buf(struct trace_buffer *buffer,
3534 unsigned long ip, const char *fmt, ...)
3539 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3543 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3549 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3551 return trace_array_vprintk(&global_trace, ip, fmt, args);
3553 EXPORT_SYMBOL_GPL(trace_vprintk);
3555 static void trace_iterator_increment(struct trace_iterator *iter)
3557 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3561 ring_buffer_iter_advance(buf_iter);
3564 static struct trace_entry *
3565 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3566 unsigned long *lost_events)
3568 struct ring_buffer_event *event;
3569 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3572 event = ring_buffer_iter_peek(buf_iter, ts);
3574 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3575 (unsigned long)-1 : 0;
3577 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3582 iter->ent_size = ring_buffer_event_length(event);
3583 return ring_buffer_event_data(event);
3589 static struct trace_entry *
3590 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3591 unsigned long *missing_events, u64 *ent_ts)
3593 struct trace_buffer *buffer = iter->array_buffer->buffer;
3594 struct trace_entry *ent, *next = NULL;
3595 unsigned long lost_events = 0, next_lost = 0;
3596 int cpu_file = iter->cpu_file;
3597 u64 next_ts = 0, ts;
3603 * If we are in a per_cpu trace file, don't bother by iterating over
3604 * all cpu and peek directly.
3606 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3607 if (ring_buffer_empty_cpu(buffer, cpu_file))
3609 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3611 *ent_cpu = cpu_file;
3616 for_each_tracing_cpu(cpu) {
3618 if (ring_buffer_empty_cpu(buffer, cpu))
3621 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3624 * Pick the entry with the smallest timestamp:
3626 if (ent && (!next || ts < next_ts)) {
3630 next_lost = lost_events;
3631 next_size = iter->ent_size;
3635 iter->ent_size = next_size;
3638 *ent_cpu = next_cpu;
3644 *missing_events = next_lost;
3649 #define STATIC_FMT_BUF_SIZE 128
3650 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3652 static char *trace_iter_expand_format(struct trace_iterator *iter)
3657 * iter->tr is NULL when used with tp_printk, which makes
3658 * this get called where it is not safe to call krealloc().
3660 if (!iter->tr || iter->fmt == static_fmt_buf)
3663 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3666 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3673 /* Returns true if the string is safe to dereference from an event */
3674 static bool trace_safe_str(struct trace_iterator *iter, const char *str,
3677 unsigned long addr = (unsigned long)str;
3678 struct trace_event *trace_event;
3679 struct trace_event_call *event;
3681 /* Ignore strings with no length */
3685 /* OK if part of the event data */
3686 if ((addr >= (unsigned long)iter->ent) &&
3687 (addr < (unsigned long)iter->ent + iter->ent_size))
3690 /* OK if part of the temp seq buffer */
3691 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3692 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3695 /* Core rodata can not be freed */
3696 if (is_kernel_rodata(addr))
3699 if (trace_is_tracepoint_string(str))
3703 * Now this could be a module event, referencing core module
3704 * data, which is OK.
3709 trace_event = ftrace_find_event(iter->ent->type);
3713 event = container_of(trace_event, struct trace_event_call, event);
3714 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3717 /* Would rather have rodata, but this will suffice */
3718 if (within_module_core(addr, event->module))
3724 static const char *show_buffer(struct trace_seq *s)
3726 struct seq_buf *seq = &s->seq;
3728 seq_buf_terminate(seq);
3733 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3735 static int test_can_verify_check(const char *fmt, ...)
3742 * The verifier is dependent on vsnprintf() modifies the va_list
3743 * passed to it, where it is sent as a reference. Some architectures
3744 * (like x86_32) passes it by value, which means that vsnprintf()
3745 * does not modify the va_list passed to it, and the verifier
3746 * would then need to be able to understand all the values that
3747 * vsnprintf can use. If it is passed by value, then the verifier
3751 vsnprintf(buf, 16, "%d", ap);
3752 ret = va_arg(ap, int);
3758 static void test_can_verify(void)
3760 if (!test_can_verify_check("%d %d", 0, 1)) {
3761 pr_info("trace event string verifier disabled\n");
3762 static_branch_inc(&trace_no_verify);
3767 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3768 * @iter: The iterator that holds the seq buffer and the event being printed
3769 * @fmt: The format used to print the event
3770 * @ap: The va_list holding the data to print from @fmt.
3772 * This writes the data into the @iter->seq buffer using the data from
3773 * @fmt and @ap. If the format has a %s, then the source of the string
3774 * is examined to make sure it is safe to print, otherwise it will
3775 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3778 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3781 const char *p = fmt;
3785 if (WARN_ON_ONCE(!fmt))
3788 if (static_branch_unlikely(&trace_no_verify))
3791 /* Don't bother checking when doing a ftrace_dump() */
3792 if (iter->fmt == static_fmt_buf)
3801 /* We only care about %s and variants */
3802 for (i = 0; p[i]; i++) {
3803 if (i + 1 >= iter->fmt_size) {
3805 * If we can't expand the copy buffer,
3808 if (!trace_iter_expand_format(iter))
3812 if (p[i] == '\\' && p[i+1]) {
3817 /* Need to test cases like %08.*s */
3818 for (j = 1; p[i+j]; j++) {
3819 if (isdigit(p[i+j]) ||
3822 if (p[i+j] == '*') {
3834 /* If no %s found then just print normally */
3838 /* Copy up to the %s, and print that */
3839 strncpy(iter->fmt, p, i);
3840 iter->fmt[i] = '\0';
3841 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3844 * If iter->seq is full, the above call no longer guarantees
3845 * that ap is in sync with fmt processing, and further calls
3846 * to va_arg() can return wrong positional arguments.
3848 * Ensure that ap is no longer used in this case.
3850 if (iter->seq.full) {
3856 len = va_arg(ap, int);
3858 /* The ap now points to the string data of the %s */
3859 str = va_arg(ap, const char *);
3862 * If you hit this warning, it is likely that the
3863 * trace event in question used %s on a string that
3864 * was saved at the time of the event, but may not be
3865 * around when the trace is read. Use __string(),
3866 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3867 * instead. See samples/trace_events/trace-events-sample.h
3870 if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
3871 "fmt: '%s' current_buffer: '%s'",
3872 fmt, show_buffer(&iter->seq))) {
3875 /* Try to safely read the string */
3877 if (len + 1 > iter->fmt_size)
3878 len = iter->fmt_size - 1;
3881 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3885 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3889 trace_seq_printf(&iter->seq, "(0x%px)", str);
3891 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3893 str = "[UNSAFE-MEMORY]";
3894 strcpy(iter->fmt, "%s");
3896 strncpy(iter->fmt, p + i, j + 1);
3897 iter->fmt[j+1] = '\0';
3900 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3902 trace_seq_printf(&iter->seq, iter->fmt, str);
3908 trace_seq_vprintf(&iter->seq, p, ap);
3911 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3913 const char *p, *new_fmt;
3916 if (WARN_ON_ONCE(!fmt))
3919 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3923 new_fmt = q = iter->fmt;
3925 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3926 if (!trace_iter_expand_format(iter))
3929 q += iter->fmt - new_fmt;
3930 new_fmt = iter->fmt;
3935 /* Replace %p with %px */
3939 } else if (p[0] == 'p' && !isalnum(p[1])) {
3950 #define STATIC_TEMP_BUF_SIZE 128
3951 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3953 /* Find the next real entry, without updating the iterator itself */
3954 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3955 int *ent_cpu, u64 *ent_ts)
3957 /* __find_next_entry will reset ent_size */
3958 int ent_size = iter->ent_size;
3959 struct trace_entry *entry;
3962 * If called from ftrace_dump(), then the iter->temp buffer
3963 * will be the static_temp_buf and not created from kmalloc.
3964 * If the entry size is greater than the buffer, we can
3965 * not save it. Just return NULL in that case. This is only
3966 * used to add markers when two consecutive events' time
3967 * stamps have a large delta. See trace_print_lat_context()
3969 if (iter->temp == static_temp_buf &&
3970 STATIC_TEMP_BUF_SIZE < ent_size)
3974 * The __find_next_entry() may call peek_next_entry(), which may
3975 * call ring_buffer_peek() that may make the contents of iter->ent
3976 * undefined. Need to copy iter->ent now.
3978 if (iter->ent && iter->ent != iter->temp) {
3979 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3980 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3982 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3987 iter->temp_size = iter->ent_size;
3989 memcpy(iter->temp, iter->ent, iter->ent_size);
3990 iter->ent = iter->temp;
3992 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3993 /* Put back the original ent_size */
3994 iter->ent_size = ent_size;
3999 /* Find the next real entry, and increment the iterator to the next entry */
4000 void *trace_find_next_entry_inc(struct trace_iterator *iter)
4002 iter->ent = __find_next_entry(iter, &iter->cpu,
4003 &iter->lost_events, &iter->ts);
4006 trace_iterator_increment(iter);
4008 return iter->ent ? iter : NULL;
4011 static void trace_consume(struct trace_iterator *iter)
4013 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
4014 &iter->lost_events);
4017 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
4019 struct trace_iterator *iter = m->private;
4023 WARN_ON_ONCE(iter->leftover);
4027 /* can't go backwards */
4032 ent = trace_find_next_entry_inc(iter);
4036 while (ent && iter->idx < i)
4037 ent = trace_find_next_entry_inc(iter);
4044 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4046 struct ring_buffer_iter *buf_iter;
4047 unsigned long entries = 0;
4050 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4052 buf_iter = trace_buffer_iter(iter, cpu);
4056 ring_buffer_iter_reset(buf_iter);
4059 * We could have the case with the max latency tracers
4060 * that a reset never took place on a cpu. This is evident
4061 * by the timestamp being before the start of the buffer.
4063 while (ring_buffer_iter_peek(buf_iter, &ts)) {
4064 if (ts >= iter->array_buffer->time_start)
4067 ring_buffer_iter_advance(buf_iter);
4070 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4074 * The current tracer is copied to avoid a global locking
4077 static void *s_start(struct seq_file *m, loff_t *pos)
4079 struct trace_iterator *iter = m->private;
4080 struct trace_array *tr = iter->tr;
4081 int cpu_file = iter->cpu_file;
4087 * copy the tracer to avoid using a global lock all around.
4088 * iter->trace is a copy of current_trace, the pointer to the
4089 * name may be used instead of a strcmp(), as iter->trace->name
4090 * will point to the same string as current_trace->name.
4092 mutex_lock(&trace_types_lock);
4093 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
4094 *iter->trace = *tr->current_trace;
4095 mutex_unlock(&trace_types_lock);
4097 #ifdef CONFIG_TRACER_MAX_TRACE
4098 if (iter->snapshot && iter->trace->use_max_tr)
4099 return ERR_PTR(-EBUSY);
4102 if (*pos != iter->pos) {
4107 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4108 for_each_tracing_cpu(cpu)
4109 tracing_iter_reset(iter, cpu);
4111 tracing_iter_reset(iter, cpu_file);
4114 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4119 * If we overflowed the seq_file before, then we want
4120 * to just reuse the trace_seq buffer again.
4126 p = s_next(m, p, &l);
4130 trace_event_read_lock();
4131 trace_access_lock(cpu_file);
4135 static void s_stop(struct seq_file *m, void *p)
4137 struct trace_iterator *iter = m->private;
4139 #ifdef CONFIG_TRACER_MAX_TRACE
4140 if (iter->snapshot && iter->trace->use_max_tr)
4144 trace_access_unlock(iter->cpu_file);
4145 trace_event_read_unlock();
4149 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4150 unsigned long *entries, int cpu)
4152 unsigned long count;
4154 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4156 * If this buffer has skipped entries, then we hold all
4157 * entries for the trace and we need to ignore the
4158 * ones before the time stamp.
4160 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4161 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4162 /* total is the same as the entries */
4166 ring_buffer_overrun_cpu(buf->buffer, cpu);
4171 get_total_entries(struct array_buffer *buf,
4172 unsigned long *total, unsigned long *entries)
4180 for_each_tracing_cpu(cpu) {
4181 get_total_entries_cpu(buf, &t, &e, cpu);
4187 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4189 unsigned long total, entries;
4194 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4199 unsigned long trace_total_entries(struct trace_array *tr)
4201 unsigned long total, entries;
4206 get_total_entries(&tr->array_buffer, &total, &entries);
4211 static void print_lat_help_header(struct seq_file *m)
4213 seq_puts(m, "# _------=> CPU# \n"
4214 "# / _-----=> irqs-off \n"
4215 "# | / _----=> need-resched \n"
4216 "# || / _---=> hardirq/softirq \n"
4217 "# ||| / _--=> preempt-depth \n"
4218 "# |||| / _-=> migrate-disable \n"
4219 "# ||||| / delay \n"
4220 "# cmd pid |||||| time | caller \n"
4221 "# \\ / |||||| \\ | / \n");
4224 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4226 unsigned long total;
4227 unsigned long entries;
4229 get_total_entries(buf, &total, &entries);
4230 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4231 entries, total, num_online_cpus());
4235 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4238 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4240 print_event_info(buf, m);
4242 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4243 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4246 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4249 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4250 const char *space = " ";
4251 int prec = tgid ? 12 : 2;
4253 print_event_info(buf, m);
4255 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
4256 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4257 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4258 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4259 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4260 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4261 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4262 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
4266 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4268 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4269 struct array_buffer *buf = iter->array_buffer;
4270 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4271 struct tracer *type = iter->trace;
4272 unsigned long entries;
4273 unsigned long total;
4274 const char *name = "preemption";
4278 get_total_entries(buf, &total, &entries);
4280 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4282 seq_puts(m, "# -----------------------------------"
4283 "---------------------------------\n");
4284 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4285 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4286 nsecs_to_usecs(data->saved_latency),
4290 #if defined(CONFIG_PREEMPT_NONE)
4292 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
4294 #elif defined(CONFIG_PREEMPT)
4296 #elif defined(CONFIG_PREEMPT_RT)
4301 /* These are reserved for later use */
4304 seq_printf(m, " #P:%d)\n", num_online_cpus());
4308 seq_puts(m, "# -----------------\n");
4309 seq_printf(m, "# | task: %.16s-%d "
4310 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4311 data->comm, data->pid,
4312 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4313 data->policy, data->rt_priority);
4314 seq_puts(m, "# -----------------\n");
4316 if (data->critical_start) {
4317 seq_puts(m, "# => started at: ");
4318 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4319 trace_print_seq(m, &iter->seq);
4320 seq_puts(m, "\n# => ended at: ");
4321 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4322 trace_print_seq(m, &iter->seq);
4323 seq_puts(m, "\n#\n");
4329 static void test_cpu_buff_start(struct trace_iterator *iter)
4331 struct trace_seq *s = &iter->seq;
4332 struct trace_array *tr = iter->tr;
4334 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4337 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4340 if (cpumask_available(iter->started) &&
4341 cpumask_test_cpu(iter->cpu, iter->started))
4344 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4347 if (cpumask_available(iter->started))
4348 cpumask_set_cpu(iter->cpu, iter->started);
4350 /* Don't print started cpu buffer for the first entry of the trace */
4352 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4356 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4358 struct trace_array *tr = iter->tr;
4359 struct trace_seq *s = &iter->seq;
4360 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4361 struct trace_entry *entry;
4362 struct trace_event *event;
4366 test_cpu_buff_start(iter);
4368 event = ftrace_find_event(entry->type);
4370 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4371 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4372 trace_print_lat_context(iter);
4374 trace_print_context(iter);
4377 if (trace_seq_has_overflowed(s))
4378 return TRACE_TYPE_PARTIAL_LINE;
4381 return event->funcs->trace(iter, sym_flags, event);
4383 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4385 return trace_handle_return(s);
4388 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4390 struct trace_array *tr = iter->tr;
4391 struct trace_seq *s = &iter->seq;
4392 struct trace_entry *entry;
4393 struct trace_event *event;
4397 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4398 trace_seq_printf(s, "%d %d %llu ",
4399 entry->pid, iter->cpu, iter->ts);
4401 if (trace_seq_has_overflowed(s))
4402 return TRACE_TYPE_PARTIAL_LINE;
4404 event = ftrace_find_event(entry->type);
4406 return event->funcs->raw(iter, 0, event);
4408 trace_seq_printf(s, "%d ?\n", entry->type);
4410 return trace_handle_return(s);
4413 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4415 struct trace_array *tr = iter->tr;
4416 struct trace_seq *s = &iter->seq;
4417 unsigned char newline = '\n';
4418 struct trace_entry *entry;
4419 struct trace_event *event;
4423 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4424 SEQ_PUT_HEX_FIELD(s, entry->pid);
4425 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4426 SEQ_PUT_HEX_FIELD(s, iter->ts);
4427 if (trace_seq_has_overflowed(s))
4428 return TRACE_TYPE_PARTIAL_LINE;
4431 event = ftrace_find_event(entry->type);
4433 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4434 if (ret != TRACE_TYPE_HANDLED)
4438 SEQ_PUT_FIELD(s, newline);
4440 return trace_handle_return(s);
4443 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4445 struct trace_array *tr = iter->tr;
4446 struct trace_seq *s = &iter->seq;
4447 struct trace_entry *entry;
4448 struct trace_event *event;
4452 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4453 SEQ_PUT_FIELD(s, entry->pid);
4454 SEQ_PUT_FIELD(s, iter->cpu);
4455 SEQ_PUT_FIELD(s, iter->ts);
4456 if (trace_seq_has_overflowed(s))
4457 return TRACE_TYPE_PARTIAL_LINE;
4460 event = ftrace_find_event(entry->type);
4461 return event ? event->funcs->binary(iter, 0, event) :
4465 int trace_empty(struct trace_iterator *iter)
4467 struct ring_buffer_iter *buf_iter;
4470 /* If we are looking at one CPU buffer, only check that one */
4471 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4472 cpu = iter->cpu_file;
4473 buf_iter = trace_buffer_iter(iter, cpu);
4475 if (!ring_buffer_iter_empty(buf_iter))
4478 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4484 for_each_tracing_cpu(cpu) {
4485 buf_iter = trace_buffer_iter(iter, cpu);
4487 if (!ring_buffer_iter_empty(buf_iter))
4490 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4498 /* Called with trace_event_read_lock() held. */
4499 enum print_line_t print_trace_line(struct trace_iterator *iter)
4501 struct trace_array *tr = iter->tr;
4502 unsigned long trace_flags = tr->trace_flags;
4503 enum print_line_t ret;
4505 if (iter->lost_events) {
4506 if (iter->lost_events == (unsigned long)-1)
4507 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4510 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4511 iter->cpu, iter->lost_events);
4512 if (trace_seq_has_overflowed(&iter->seq))
4513 return TRACE_TYPE_PARTIAL_LINE;
4516 if (iter->trace && iter->trace->print_line) {
4517 ret = iter->trace->print_line(iter);
4518 if (ret != TRACE_TYPE_UNHANDLED)
4522 if (iter->ent->type == TRACE_BPUTS &&
4523 trace_flags & TRACE_ITER_PRINTK &&
4524 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4525 return trace_print_bputs_msg_only(iter);
4527 if (iter->ent->type == TRACE_BPRINT &&
4528 trace_flags & TRACE_ITER_PRINTK &&
4529 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4530 return trace_print_bprintk_msg_only(iter);
4532 if (iter->ent->type == TRACE_PRINT &&
4533 trace_flags & TRACE_ITER_PRINTK &&
4534 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4535 return trace_print_printk_msg_only(iter);
4537 if (trace_flags & TRACE_ITER_BIN)
4538 return print_bin_fmt(iter);
4540 if (trace_flags & TRACE_ITER_HEX)
4541 return print_hex_fmt(iter);
4543 if (trace_flags & TRACE_ITER_RAW)
4544 return print_raw_fmt(iter);
4546 return print_trace_fmt(iter);
4549 void trace_latency_header(struct seq_file *m)
4551 struct trace_iterator *iter = m->private;
4552 struct trace_array *tr = iter->tr;
4554 /* print nothing if the buffers are empty */
4555 if (trace_empty(iter))
4558 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4559 print_trace_header(m, iter);
4561 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4562 print_lat_help_header(m);
4565 void trace_default_header(struct seq_file *m)
4567 struct trace_iterator *iter = m->private;
4568 struct trace_array *tr = iter->tr;
4569 unsigned long trace_flags = tr->trace_flags;
4571 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4574 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4575 /* print nothing if the buffers are empty */
4576 if (trace_empty(iter))
4578 print_trace_header(m, iter);
4579 if (!(trace_flags & TRACE_ITER_VERBOSE))
4580 print_lat_help_header(m);
4582 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4583 if (trace_flags & TRACE_ITER_IRQ_INFO)
4584 print_func_help_header_irq(iter->array_buffer,
4587 print_func_help_header(iter->array_buffer, m,
4593 static void test_ftrace_alive(struct seq_file *m)
4595 if (!ftrace_is_dead())
4597 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4598 "# MAY BE MISSING FUNCTION EVENTS\n");
4601 #ifdef CONFIG_TRACER_MAX_TRACE
4602 static void show_snapshot_main_help(struct seq_file *m)
4604 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4605 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4606 "# Takes a snapshot of the main buffer.\n"
4607 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4608 "# (Doesn't have to be '2' works with any number that\n"
4609 "# is not a '0' or '1')\n");
4612 static void show_snapshot_percpu_help(struct seq_file *m)
4614 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4615 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4616 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4617 "# Takes a snapshot of the main buffer for this cpu.\n");
4619 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4620 "# Must use main snapshot file to allocate.\n");
4622 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4623 "# (Doesn't have to be '2' works with any number that\n"
4624 "# is not a '0' or '1')\n");
4627 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4629 if (iter->tr->allocated_snapshot)
4630 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4632 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4634 seq_puts(m, "# Snapshot commands:\n");
4635 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4636 show_snapshot_main_help(m);
4638 show_snapshot_percpu_help(m);
4641 /* Should never be called */
4642 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4645 static int s_show(struct seq_file *m, void *v)
4647 struct trace_iterator *iter = v;
4650 if (iter->ent == NULL) {
4652 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4654 test_ftrace_alive(m);
4656 if (iter->snapshot && trace_empty(iter))
4657 print_snapshot_help(m, iter);
4658 else if (iter->trace && iter->trace->print_header)
4659 iter->trace->print_header(m);
4661 trace_default_header(m);
4663 } else if (iter->leftover) {
4665 * If we filled the seq_file buffer earlier, we
4666 * want to just show it now.
4668 ret = trace_print_seq(m, &iter->seq);
4670 /* ret should this time be zero, but you never know */
4671 iter->leftover = ret;
4674 print_trace_line(iter);
4675 ret = trace_print_seq(m, &iter->seq);
4677 * If we overflow the seq_file buffer, then it will
4678 * ask us for this data again at start up.
4680 * ret is 0 if seq_file write succeeded.
4683 iter->leftover = ret;
4690 * Should be used after trace_array_get(), trace_types_lock
4691 * ensures that i_cdev was already initialized.
4693 static inline int tracing_get_cpu(struct inode *inode)
4695 if (inode->i_cdev) /* See trace_create_cpu_file() */
4696 return (long)inode->i_cdev - 1;
4697 return RING_BUFFER_ALL_CPUS;
4700 static const struct seq_operations tracer_seq_ops = {
4707 static struct trace_iterator *
4708 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4710 struct trace_array *tr = inode->i_private;
4711 struct trace_iterator *iter;
4714 if (tracing_disabled)
4715 return ERR_PTR(-ENODEV);
4717 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4719 return ERR_PTR(-ENOMEM);
4721 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4723 if (!iter->buffer_iter)
4727 * trace_find_next_entry() may need to save off iter->ent.
4728 * It will place it into the iter->temp buffer. As most
4729 * events are less than 128, allocate a buffer of that size.
4730 * If one is greater, then trace_find_next_entry() will
4731 * allocate a new buffer to adjust for the bigger iter->ent.
4732 * It's not critical if it fails to get allocated here.
4734 iter->temp = kmalloc(128, GFP_KERNEL);
4736 iter->temp_size = 128;
4739 * trace_event_printf() may need to modify given format
4740 * string to replace %p with %px so that it shows real address
4741 * instead of hash value. However, that is only for the event
4742 * tracing, other tracer may not need. Defer the allocation
4743 * until it is needed.
4749 * We make a copy of the current tracer to avoid concurrent
4750 * changes on it while we are reading.
4752 mutex_lock(&trace_types_lock);
4753 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4757 *iter->trace = *tr->current_trace;
4759 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4764 #ifdef CONFIG_TRACER_MAX_TRACE
4765 /* Currently only the top directory has a snapshot */
4766 if (tr->current_trace->print_max || snapshot)
4767 iter->array_buffer = &tr->max_buffer;
4770 iter->array_buffer = &tr->array_buffer;
4771 iter->snapshot = snapshot;
4773 iter->cpu_file = tracing_get_cpu(inode);
4774 mutex_init(&iter->mutex);
4776 /* Notify the tracer early; before we stop tracing. */
4777 if (iter->trace->open)
4778 iter->trace->open(iter);
4780 /* Annotate start of buffers if we had overruns */
4781 if (ring_buffer_overruns(iter->array_buffer->buffer))
4782 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4784 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4785 if (trace_clocks[tr->clock_id].in_ns)
4786 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4789 * If pause-on-trace is enabled, then stop the trace while
4790 * dumping, unless this is the "snapshot" file
4792 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4793 tracing_stop_tr(tr);
4795 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4796 for_each_tracing_cpu(cpu) {
4797 iter->buffer_iter[cpu] =
4798 ring_buffer_read_prepare(iter->array_buffer->buffer,
4801 ring_buffer_read_prepare_sync();
4802 for_each_tracing_cpu(cpu) {
4803 ring_buffer_read_start(iter->buffer_iter[cpu]);
4804 tracing_iter_reset(iter, cpu);
4807 cpu = iter->cpu_file;
4808 iter->buffer_iter[cpu] =
4809 ring_buffer_read_prepare(iter->array_buffer->buffer,
4811 ring_buffer_read_prepare_sync();
4812 ring_buffer_read_start(iter->buffer_iter[cpu]);
4813 tracing_iter_reset(iter, cpu);
4816 mutex_unlock(&trace_types_lock);
4821 mutex_unlock(&trace_types_lock);
4824 kfree(iter->buffer_iter);
4826 seq_release_private(inode, file);
4827 return ERR_PTR(-ENOMEM);
4830 int tracing_open_generic(struct inode *inode, struct file *filp)
4834 ret = tracing_check_open_get_tr(NULL);
4838 filp->private_data = inode->i_private;
4842 bool tracing_is_disabled(void)
4844 return (tracing_disabled) ? true: false;
4848 * Open and update trace_array ref count.
4849 * Must have the current trace_array passed to it.
4851 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4853 struct trace_array *tr = inode->i_private;
4856 ret = tracing_check_open_get_tr(tr);
4860 filp->private_data = inode->i_private;
4865 static int tracing_release(struct inode *inode, struct file *file)
4867 struct trace_array *tr = inode->i_private;
4868 struct seq_file *m = file->private_data;
4869 struct trace_iterator *iter;
4872 if (!(file->f_mode & FMODE_READ)) {
4873 trace_array_put(tr);
4877 /* Writes do not use seq_file */
4879 mutex_lock(&trace_types_lock);
4881 for_each_tracing_cpu(cpu) {
4882 if (iter->buffer_iter[cpu])
4883 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4886 if (iter->trace && iter->trace->close)
4887 iter->trace->close(iter);
4889 if (!iter->snapshot && tr->stop_count)
4890 /* reenable tracing if it was previously enabled */
4891 tracing_start_tr(tr);
4893 __trace_array_put(tr);
4895 mutex_unlock(&trace_types_lock);
4897 mutex_destroy(&iter->mutex);
4898 free_cpumask_var(iter->started);
4902 kfree(iter->buffer_iter);
4903 seq_release_private(inode, file);
4908 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4910 struct trace_array *tr = inode->i_private;
4912 trace_array_put(tr);
4916 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4918 struct trace_array *tr = inode->i_private;
4920 trace_array_put(tr);
4922 return single_release(inode, file);
4925 static int tracing_open(struct inode *inode, struct file *file)
4927 struct trace_array *tr = inode->i_private;
4928 struct trace_iterator *iter;
4931 ret = tracing_check_open_get_tr(tr);
4935 /* If this file was open for write, then erase contents */
4936 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4937 int cpu = tracing_get_cpu(inode);
4938 struct array_buffer *trace_buf = &tr->array_buffer;
4940 #ifdef CONFIG_TRACER_MAX_TRACE
4941 if (tr->current_trace->print_max)
4942 trace_buf = &tr->max_buffer;
4945 if (cpu == RING_BUFFER_ALL_CPUS)
4946 tracing_reset_online_cpus(trace_buf);
4948 tracing_reset_cpu(trace_buf, cpu);
4951 if (file->f_mode & FMODE_READ) {
4952 iter = __tracing_open(inode, file, false);
4954 ret = PTR_ERR(iter);
4955 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4956 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4960 trace_array_put(tr);
4966 * Some tracers are not suitable for instance buffers.
4967 * A tracer is always available for the global array (toplevel)
4968 * or if it explicitly states that it is.
4971 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4973 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4976 /* Find the next tracer that this trace array may use */
4977 static struct tracer *
4978 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4980 while (t && !trace_ok_for_array(t, tr))
4987 t_next(struct seq_file *m, void *v, loff_t *pos)
4989 struct trace_array *tr = m->private;
4990 struct tracer *t = v;
4995 t = get_tracer_for_array(tr, t->next);
5000 static void *t_start(struct seq_file *m, loff_t *pos)
5002 struct trace_array *tr = m->private;
5006 mutex_lock(&trace_types_lock);
5008 t = get_tracer_for_array(tr, trace_types);
5009 for (; t && l < *pos; t = t_next(m, t, &l))
5015 static void t_stop(struct seq_file *m, void *p)
5017 mutex_unlock(&trace_types_lock);
5020 static int t_show(struct seq_file *m, void *v)
5022 struct tracer *t = v;
5027 seq_puts(m, t->name);
5036 static const struct seq_operations show_traces_seq_ops = {
5043 static int show_traces_open(struct inode *inode, struct file *file)
5045 struct trace_array *tr = inode->i_private;
5049 ret = tracing_check_open_get_tr(tr);
5053 ret = seq_open(file, &show_traces_seq_ops);
5055 trace_array_put(tr);
5059 m = file->private_data;
5065 static int show_traces_release(struct inode *inode, struct file *file)
5067 struct trace_array *tr = inode->i_private;
5069 trace_array_put(tr);
5070 return seq_release(inode, file);
5074 tracing_write_stub(struct file *filp, const char __user *ubuf,
5075 size_t count, loff_t *ppos)
5080 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5084 if (file->f_mode & FMODE_READ)
5085 ret = seq_lseek(file, offset, whence);
5087 file->f_pos = ret = 0;
5092 static const struct file_operations tracing_fops = {
5093 .open = tracing_open,
5095 .write = tracing_write_stub,
5096 .llseek = tracing_lseek,
5097 .release = tracing_release,
5100 static const struct file_operations show_traces_fops = {
5101 .open = show_traces_open,
5103 .llseek = seq_lseek,
5104 .release = show_traces_release,
5108 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5109 size_t count, loff_t *ppos)
5111 struct trace_array *tr = file_inode(filp)->i_private;
5115 len = snprintf(NULL, 0, "%*pb\n",
5116 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5117 mask_str = kmalloc(len, GFP_KERNEL);
5121 len = snprintf(mask_str, len, "%*pb\n",
5122 cpumask_pr_args(tr->tracing_cpumask));
5127 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5135 int tracing_set_cpumask(struct trace_array *tr,
5136 cpumask_var_t tracing_cpumask_new)
5143 local_irq_disable();
5144 arch_spin_lock(&tr->max_lock);
5145 for_each_tracing_cpu(cpu) {
5147 * Increase/decrease the disabled counter if we are
5148 * about to flip a bit in the cpumask:
5150 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5151 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5152 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5153 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5155 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5156 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5157 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5158 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5161 arch_spin_unlock(&tr->max_lock);
5164 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5170 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5171 size_t count, loff_t *ppos)
5173 struct trace_array *tr = file_inode(filp)->i_private;
5174 cpumask_var_t tracing_cpumask_new;
5177 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5180 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5184 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5188 free_cpumask_var(tracing_cpumask_new);
5193 free_cpumask_var(tracing_cpumask_new);
5198 static const struct file_operations tracing_cpumask_fops = {
5199 .open = tracing_open_generic_tr,
5200 .read = tracing_cpumask_read,
5201 .write = tracing_cpumask_write,
5202 .release = tracing_release_generic_tr,
5203 .llseek = generic_file_llseek,
5206 static int tracing_trace_options_show(struct seq_file *m, void *v)
5208 struct tracer_opt *trace_opts;
5209 struct trace_array *tr = m->private;
5213 mutex_lock(&trace_types_lock);
5214 tracer_flags = tr->current_trace->flags->val;
5215 trace_opts = tr->current_trace->flags->opts;
5217 for (i = 0; trace_options[i]; i++) {
5218 if (tr->trace_flags & (1 << i))
5219 seq_printf(m, "%s\n", trace_options[i]);
5221 seq_printf(m, "no%s\n", trace_options[i]);
5224 for (i = 0; trace_opts[i].name; i++) {
5225 if (tracer_flags & trace_opts[i].bit)
5226 seq_printf(m, "%s\n", trace_opts[i].name);
5228 seq_printf(m, "no%s\n", trace_opts[i].name);
5230 mutex_unlock(&trace_types_lock);
5235 static int __set_tracer_option(struct trace_array *tr,
5236 struct tracer_flags *tracer_flags,
5237 struct tracer_opt *opts, int neg)
5239 struct tracer *trace = tracer_flags->trace;
5242 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5247 tracer_flags->val &= ~opts->bit;
5249 tracer_flags->val |= opts->bit;
5253 /* Try to assign a tracer specific option */
5254 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5256 struct tracer *trace = tr->current_trace;
5257 struct tracer_flags *tracer_flags = trace->flags;
5258 struct tracer_opt *opts = NULL;
5261 for (i = 0; tracer_flags->opts[i].name; i++) {
5262 opts = &tracer_flags->opts[i];
5264 if (strcmp(cmp, opts->name) == 0)
5265 return __set_tracer_option(tr, trace->flags, opts, neg);
5271 /* Some tracers require overwrite to stay enabled */
5272 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5274 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5280 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5284 if ((mask == TRACE_ITER_RECORD_TGID) ||
5285 (mask == TRACE_ITER_RECORD_CMD))
5286 lockdep_assert_held(&event_mutex);
5288 /* do nothing if flag is already set */
5289 if (!!(tr->trace_flags & mask) == !!enabled)
5292 /* Give the tracer a chance to approve the change */
5293 if (tr->current_trace->flag_changed)
5294 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5298 tr->trace_flags |= mask;
5300 tr->trace_flags &= ~mask;
5302 if (mask == TRACE_ITER_RECORD_CMD)
5303 trace_event_enable_cmd_record(enabled);
5305 if (mask == TRACE_ITER_RECORD_TGID) {
5307 tgid_map_max = pid_max;
5308 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5312 * Pairs with smp_load_acquire() in
5313 * trace_find_tgid_ptr() to ensure that if it observes
5314 * the tgid_map we just allocated then it also observes
5315 * the corresponding tgid_map_max value.
5317 smp_store_release(&tgid_map, map);
5320 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5324 trace_event_enable_tgid_record(enabled);
5327 if (mask == TRACE_ITER_EVENT_FORK)
5328 trace_event_follow_fork(tr, enabled);
5330 if (mask == TRACE_ITER_FUNC_FORK)
5331 ftrace_pid_follow_fork(tr, enabled);
5333 if (mask == TRACE_ITER_OVERWRITE) {
5334 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5335 #ifdef CONFIG_TRACER_MAX_TRACE
5336 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5340 if (mask == TRACE_ITER_PRINTK) {
5341 trace_printk_start_stop_comm(enabled);
5342 trace_printk_control(enabled);
5348 int trace_set_options(struct trace_array *tr, char *option)
5353 size_t orig_len = strlen(option);
5356 cmp = strstrip(option);
5358 len = str_has_prefix(cmp, "no");
5364 mutex_lock(&event_mutex);
5365 mutex_lock(&trace_types_lock);
5367 ret = match_string(trace_options, -1, cmp);
5368 /* If no option could be set, test the specific tracer options */
5370 ret = set_tracer_option(tr, cmp, neg);
5372 ret = set_tracer_flag(tr, 1 << ret, !neg);
5374 mutex_unlock(&trace_types_lock);
5375 mutex_unlock(&event_mutex);
5378 * If the first trailing whitespace is replaced with '\0' by strstrip,
5379 * turn it back into a space.
5381 if (orig_len > strlen(option))
5382 option[strlen(option)] = ' ';
5387 static void __init apply_trace_boot_options(void)
5389 char *buf = trace_boot_options_buf;
5393 option = strsep(&buf, ",");
5399 trace_set_options(&global_trace, option);
5401 /* Put back the comma to allow this to be called again */
5408 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5409 size_t cnt, loff_t *ppos)
5411 struct seq_file *m = filp->private_data;
5412 struct trace_array *tr = m->private;
5416 if (cnt >= sizeof(buf))
5419 if (copy_from_user(buf, ubuf, cnt))
5424 ret = trace_set_options(tr, buf);
5433 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5435 struct trace_array *tr = inode->i_private;
5438 ret = tracing_check_open_get_tr(tr);
5442 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5444 trace_array_put(tr);
5449 static const struct file_operations tracing_iter_fops = {
5450 .open = tracing_trace_options_open,
5452 .llseek = seq_lseek,
5453 .release = tracing_single_release_tr,
5454 .write = tracing_trace_options_write,
5457 static const char readme_msg[] =
5458 "tracing mini-HOWTO:\n\n"
5459 "# echo 0 > tracing_on : quick way to disable tracing\n"
5460 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5461 " Important files:\n"
5462 " trace\t\t\t- The static contents of the buffer\n"
5463 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5464 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5465 " current_tracer\t- function and latency tracers\n"
5466 " available_tracers\t- list of configured tracers for current_tracer\n"
5467 " error_log\t- error log for failed commands (that support it)\n"
5468 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5469 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5470 " trace_clock\t\t-change the clock used to order events\n"
5471 " local: Per cpu clock but may not be synced across CPUs\n"
5472 " global: Synced across CPUs but slows tracing down.\n"
5473 " counter: Not a clock, but just an increment\n"
5474 " uptime: Jiffy counter from time of boot\n"
5475 " perf: Same clock that perf events use\n"
5476 #ifdef CONFIG_X86_64
5477 " x86-tsc: TSC cycle counter\n"
5479 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5480 " delta: Delta difference against a buffer-wide timestamp\n"
5481 " absolute: Absolute (standalone) timestamp\n"
5482 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5483 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5484 " tracing_cpumask\t- Limit which CPUs to trace\n"
5485 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5486 "\t\t\t Remove sub-buffer with rmdir\n"
5487 " trace_options\t\t- Set format or modify how tracing happens\n"
5488 "\t\t\t Disable an option by prefixing 'no' to the\n"
5489 "\t\t\t option name\n"
5490 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5491 #ifdef CONFIG_DYNAMIC_FTRACE
5492 "\n available_filter_functions - list of functions that can be filtered on\n"
5493 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5494 "\t\t\t functions\n"
5495 "\t accepts: func_full_name or glob-matching-pattern\n"
5496 "\t modules: Can select a group via module\n"
5497 "\t Format: :mod:<module-name>\n"
5498 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5499 "\t triggers: a command to perform when function is hit\n"
5500 "\t Format: <function>:<trigger>[:count]\n"
5501 "\t trigger: traceon, traceoff\n"
5502 "\t\t enable_event:<system>:<event>\n"
5503 "\t\t disable_event:<system>:<event>\n"
5504 #ifdef CONFIG_STACKTRACE
5507 #ifdef CONFIG_TRACER_SNAPSHOT
5512 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5513 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5514 "\t The first one will disable tracing every time do_fault is hit\n"
5515 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5516 "\t The first time do trap is hit and it disables tracing, the\n"
5517 "\t counter will decrement to 2. If tracing is already disabled,\n"
5518 "\t the counter will not decrement. It only decrements when the\n"
5519 "\t trigger did work\n"
5520 "\t To remove trigger without count:\n"
5521 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5522 "\t To remove trigger with a count:\n"
5523 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5524 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5525 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5526 "\t modules: Can select a group via module command :mod:\n"
5527 "\t Does not accept triggers\n"
5528 #endif /* CONFIG_DYNAMIC_FTRACE */
5529 #ifdef CONFIG_FUNCTION_TRACER
5530 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5532 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5535 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5536 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5537 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5538 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5540 #ifdef CONFIG_TRACER_SNAPSHOT
5541 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5542 "\t\t\t snapshot buffer. Read the contents for more\n"
5543 "\t\t\t information\n"
5545 #ifdef CONFIG_STACK_TRACER
5546 " stack_trace\t\t- Shows the max stack trace when active\n"
5547 " stack_max_size\t- Shows current max stack size that was traced\n"
5548 "\t\t\t Write into this file to reset the max size (trigger a\n"
5549 "\t\t\t new trace)\n"
5550 #ifdef CONFIG_DYNAMIC_FTRACE
5551 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5554 #endif /* CONFIG_STACK_TRACER */
5555 #ifdef CONFIG_DYNAMIC_EVENTS
5556 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5557 "\t\t\t Write into this file to define/undefine new trace events.\n"
5559 #ifdef CONFIG_KPROBE_EVENTS
5560 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5561 "\t\t\t Write into this file to define/undefine new trace events.\n"
5563 #ifdef CONFIG_UPROBE_EVENTS
5564 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5565 "\t\t\t Write into this file to define/undefine new trace events.\n"
5567 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5568 "\t accepts: event-definitions (one definition per line)\n"
5569 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5570 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
5571 #ifdef CONFIG_HIST_TRIGGERS
5572 "\t s:[synthetic/]<event> <field> [<field>]\n"
5574 "\t e[:[<group>/]<event>] <attached-group>.<attached-event> [<args>]\n"
5575 "\t -:[<group>/]<event>\n"
5576 #ifdef CONFIG_KPROBE_EVENTS
5577 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5578 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5580 #ifdef CONFIG_UPROBE_EVENTS
5581 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5583 "\t args: <name>=fetcharg[:type]\n"
5584 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5585 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5586 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5588 "\t $stack<index>, $stack, $retval, $comm,\n"
5590 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5591 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5592 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5593 "\t <type>\\[<array-size>\\]\n"
5594 #ifdef CONFIG_HIST_TRIGGERS
5595 "\t field: <stype> <name>;\n"
5596 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5597 "\t [unsigned] char/int/long\n"
5599 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5600 "\t of the <attached-group>/<attached-event>.\n"
5602 " events/\t\t- Directory containing all trace event subsystems:\n"
5603 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5604 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5605 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5607 " filter\t\t- If set, only events passing filter are traced\n"
5608 " events/<system>/<event>/\t- Directory containing control files for\n"
5610 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5611 " filter\t\t- If set, only events passing filter are traced\n"
5612 " trigger\t\t- If set, a command to perform when event is hit\n"
5613 "\t Format: <trigger>[:count][if <filter>]\n"
5614 "\t trigger: traceon, traceoff\n"
5615 "\t enable_event:<system>:<event>\n"
5616 "\t disable_event:<system>:<event>\n"
5617 #ifdef CONFIG_HIST_TRIGGERS
5618 "\t enable_hist:<system>:<event>\n"
5619 "\t disable_hist:<system>:<event>\n"
5621 #ifdef CONFIG_STACKTRACE
5624 #ifdef CONFIG_TRACER_SNAPSHOT
5627 #ifdef CONFIG_HIST_TRIGGERS
5628 "\t\t hist (see below)\n"
5630 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5631 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5632 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5633 "\t events/block/block_unplug/trigger\n"
5634 "\t The first disables tracing every time block_unplug is hit.\n"
5635 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5636 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5637 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5638 "\t Like function triggers, the counter is only decremented if it\n"
5639 "\t enabled or disabled tracing.\n"
5640 "\t To remove a trigger without a count:\n"
5641 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5642 "\t To remove a trigger with a count:\n"
5643 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5644 "\t Filters can be ignored when removing a trigger.\n"
5645 #ifdef CONFIG_HIST_TRIGGERS
5646 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5647 "\t Format: hist:keys=<field1[,field2,...]>\n"
5648 "\t [:values=<field1[,field2,...]>]\n"
5649 "\t [:sort=<field1[,field2,...]>]\n"
5650 "\t [:size=#entries]\n"
5651 "\t [:pause][:continue][:clear]\n"
5652 "\t [:name=histname1]\n"
5653 "\t [:<handler>.<action>]\n"
5654 "\t [if <filter>]\n\n"
5655 "\t Note, special fields can be used as well:\n"
5656 "\t common_timestamp - to record current timestamp\n"
5657 "\t common_cpu - to record the CPU the event happened on\n"
5659 "\t When a matching event is hit, an entry is added to a hash\n"
5660 "\t table using the key(s) and value(s) named, and the value of a\n"
5661 "\t sum called 'hitcount' is incremented. Keys and values\n"
5662 "\t correspond to fields in the event's format description. Keys\n"
5663 "\t can be any field, or the special string 'stacktrace'.\n"
5664 "\t Compound keys consisting of up to two fields can be specified\n"
5665 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5666 "\t fields. Sort keys consisting of up to two fields can be\n"
5667 "\t specified using the 'sort' keyword. The sort direction can\n"
5668 "\t be modified by appending '.descending' or '.ascending' to a\n"
5669 "\t sort field. The 'size' parameter can be used to specify more\n"
5670 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5671 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5672 "\t its histogram data will be shared with other triggers of the\n"
5673 "\t same name, and trigger hits will update this common data.\n\n"
5674 "\t Reading the 'hist' file for the event will dump the hash\n"
5675 "\t table in its entirety to stdout. If there are multiple hist\n"
5676 "\t triggers attached to an event, there will be a table for each\n"
5677 "\t trigger in the output. The table displayed for a named\n"
5678 "\t trigger will be the same as any other instance having the\n"
5679 "\t same name. The default format used to display a given field\n"
5680 "\t can be modified by appending any of the following modifiers\n"
5681 "\t to the field name, as applicable:\n\n"
5682 "\t .hex display a number as a hex value\n"
5683 "\t .sym display an address as a symbol\n"
5684 "\t .sym-offset display an address as a symbol and offset\n"
5685 "\t .execname display a common_pid as a program name\n"
5686 "\t .syscall display a syscall id as a syscall name\n"
5687 "\t .log2 display log2 value rather than raw number\n"
5688 "\t .buckets=size display values in groups of size rather than raw number\n"
5689 "\t .usecs display a common_timestamp in microseconds\n\n"
5690 "\t The 'pause' parameter can be used to pause an existing hist\n"
5691 "\t trigger or to start a hist trigger but not log any events\n"
5692 "\t until told to do so. 'continue' can be used to start or\n"
5693 "\t restart a paused hist trigger.\n\n"
5694 "\t The 'clear' parameter will clear the contents of a running\n"
5695 "\t hist trigger and leave its current paused/active state\n"
5697 "\t The enable_hist and disable_hist triggers can be used to\n"
5698 "\t have one event conditionally start and stop another event's\n"
5699 "\t already-attached hist trigger. The syntax is analogous to\n"
5700 "\t the enable_event and disable_event triggers.\n\n"
5701 "\t Hist trigger handlers and actions are executed whenever a\n"
5702 "\t a histogram entry is added or updated. They take the form:\n\n"
5703 "\t <handler>.<action>\n\n"
5704 "\t The available handlers are:\n\n"
5705 "\t onmatch(matching.event) - invoke on addition or update\n"
5706 "\t onmax(var) - invoke if var exceeds current max\n"
5707 "\t onchange(var) - invoke action if var changes\n\n"
5708 "\t The available actions are:\n\n"
5709 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5710 "\t save(field,...) - save current event fields\n"
5711 #ifdef CONFIG_TRACER_SNAPSHOT
5712 "\t snapshot() - snapshot the trace buffer\n\n"
5714 #ifdef CONFIG_SYNTH_EVENTS
5715 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5716 "\t Write into this file to define/undefine new synthetic events.\n"
5717 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
5723 tracing_readme_read(struct file *filp, char __user *ubuf,
5724 size_t cnt, loff_t *ppos)
5726 return simple_read_from_buffer(ubuf, cnt, ppos,
5727 readme_msg, strlen(readme_msg));
5730 static const struct file_operations tracing_readme_fops = {
5731 .open = tracing_open_generic,
5732 .read = tracing_readme_read,
5733 .llseek = generic_file_llseek,
5736 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5740 return trace_find_tgid_ptr(pid);
5743 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5747 return trace_find_tgid_ptr(pid);
5750 static void saved_tgids_stop(struct seq_file *m, void *v)
5754 static int saved_tgids_show(struct seq_file *m, void *v)
5756 int *entry = (int *)v;
5757 int pid = entry - tgid_map;
5763 seq_printf(m, "%d %d\n", pid, tgid);
5767 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5768 .start = saved_tgids_start,
5769 .stop = saved_tgids_stop,
5770 .next = saved_tgids_next,
5771 .show = saved_tgids_show,
5774 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5778 ret = tracing_check_open_get_tr(NULL);
5782 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5786 static const struct file_operations tracing_saved_tgids_fops = {
5787 .open = tracing_saved_tgids_open,
5789 .llseek = seq_lseek,
5790 .release = seq_release,
5793 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5795 unsigned int *ptr = v;
5797 if (*pos || m->count)
5802 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5804 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5813 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5819 arch_spin_lock(&trace_cmdline_lock);
5821 v = &savedcmd->map_cmdline_to_pid[0];
5823 v = saved_cmdlines_next(m, v, &l);
5831 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5833 arch_spin_unlock(&trace_cmdline_lock);
5837 static int saved_cmdlines_show(struct seq_file *m, void *v)
5839 char buf[TASK_COMM_LEN];
5840 unsigned int *pid = v;
5842 __trace_find_cmdline(*pid, buf);
5843 seq_printf(m, "%d %s\n", *pid, buf);
5847 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5848 .start = saved_cmdlines_start,
5849 .next = saved_cmdlines_next,
5850 .stop = saved_cmdlines_stop,
5851 .show = saved_cmdlines_show,
5854 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5858 ret = tracing_check_open_get_tr(NULL);
5862 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5865 static const struct file_operations tracing_saved_cmdlines_fops = {
5866 .open = tracing_saved_cmdlines_open,
5868 .llseek = seq_lseek,
5869 .release = seq_release,
5873 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5874 size_t cnt, loff_t *ppos)
5880 arch_spin_lock(&trace_cmdline_lock);
5881 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5882 arch_spin_unlock(&trace_cmdline_lock);
5885 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5888 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5890 kfree(s->saved_cmdlines);
5891 kfree(s->map_cmdline_to_pid);
5895 static int tracing_resize_saved_cmdlines(unsigned int val)
5897 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5899 s = kmalloc(sizeof(*s), GFP_KERNEL);
5903 if (allocate_cmdlines_buffer(val, s) < 0) {
5909 arch_spin_lock(&trace_cmdline_lock);
5910 savedcmd_temp = savedcmd;
5912 arch_spin_unlock(&trace_cmdline_lock);
5914 free_saved_cmdlines_buffer(savedcmd_temp);
5920 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5921 size_t cnt, loff_t *ppos)
5926 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5930 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5931 if (!val || val > PID_MAX_DEFAULT)
5934 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5943 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5944 .open = tracing_open_generic,
5945 .read = tracing_saved_cmdlines_size_read,
5946 .write = tracing_saved_cmdlines_size_write,
5949 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5950 static union trace_eval_map_item *
5951 update_eval_map(union trace_eval_map_item *ptr)
5953 if (!ptr->map.eval_string) {
5954 if (ptr->tail.next) {
5955 ptr = ptr->tail.next;
5956 /* Set ptr to the next real item (skip head) */
5964 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5966 union trace_eval_map_item *ptr = v;
5969 * Paranoid! If ptr points to end, we don't want to increment past it.
5970 * This really should never happen.
5973 ptr = update_eval_map(ptr);
5974 if (WARN_ON_ONCE(!ptr))
5978 ptr = update_eval_map(ptr);
5983 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5985 union trace_eval_map_item *v;
5988 mutex_lock(&trace_eval_mutex);
5990 v = trace_eval_maps;
5994 while (v && l < *pos) {
5995 v = eval_map_next(m, v, &l);
6001 static void eval_map_stop(struct seq_file *m, void *v)
6003 mutex_unlock(&trace_eval_mutex);
6006 static int eval_map_show(struct seq_file *m, void *v)
6008 union trace_eval_map_item *ptr = v;
6010 seq_printf(m, "%s %ld (%s)\n",
6011 ptr->map.eval_string, ptr->map.eval_value,
6017 static const struct seq_operations tracing_eval_map_seq_ops = {
6018 .start = eval_map_start,
6019 .next = eval_map_next,
6020 .stop = eval_map_stop,
6021 .show = eval_map_show,
6024 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
6028 ret = tracing_check_open_get_tr(NULL);
6032 return seq_open(filp, &tracing_eval_map_seq_ops);
6035 static const struct file_operations tracing_eval_map_fops = {
6036 .open = tracing_eval_map_open,
6038 .llseek = seq_lseek,
6039 .release = seq_release,
6042 static inline union trace_eval_map_item *
6043 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
6045 /* Return tail of array given the head */
6046 return ptr + ptr->head.length + 1;
6050 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
6053 struct trace_eval_map **stop;
6054 struct trace_eval_map **map;
6055 union trace_eval_map_item *map_array;
6056 union trace_eval_map_item *ptr;
6061 * The trace_eval_maps contains the map plus a head and tail item,
6062 * where the head holds the module and length of array, and the
6063 * tail holds a pointer to the next list.
6065 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6067 pr_warn("Unable to allocate trace eval mapping\n");
6071 mutex_lock(&trace_eval_mutex);
6073 if (!trace_eval_maps)
6074 trace_eval_maps = map_array;
6076 ptr = trace_eval_maps;
6078 ptr = trace_eval_jmp_to_tail(ptr);
6079 if (!ptr->tail.next)
6081 ptr = ptr->tail.next;
6084 ptr->tail.next = map_array;
6086 map_array->head.mod = mod;
6087 map_array->head.length = len;
6090 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6091 map_array->map = **map;
6094 memset(map_array, 0, sizeof(*map_array));
6096 mutex_unlock(&trace_eval_mutex);
6099 static void trace_create_eval_file(struct dentry *d_tracer)
6101 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
6102 NULL, &tracing_eval_map_fops);
6105 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
6106 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6107 static inline void trace_insert_eval_map_file(struct module *mod,
6108 struct trace_eval_map **start, int len) { }
6109 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6111 static void trace_insert_eval_map(struct module *mod,
6112 struct trace_eval_map **start, int len)
6114 struct trace_eval_map **map;
6121 trace_event_eval_update(map, len);
6123 trace_insert_eval_map_file(mod, start, len);
6127 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6128 size_t cnt, loff_t *ppos)
6130 struct trace_array *tr = filp->private_data;
6131 char buf[MAX_TRACER_SIZE+2];
6134 mutex_lock(&trace_types_lock);
6135 r = sprintf(buf, "%s\n", tr->current_trace->name);
6136 mutex_unlock(&trace_types_lock);
6138 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6141 int tracer_init(struct tracer *t, struct trace_array *tr)
6143 tracing_reset_online_cpus(&tr->array_buffer);
6147 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6151 for_each_tracing_cpu(cpu)
6152 per_cpu_ptr(buf->data, cpu)->entries = val;
6155 #ifdef CONFIG_TRACER_MAX_TRACE
6156 /* resize @tr's buffer to the size of @size_tr's entries */
6157 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6158 struct array_buffer *size_buf, int cpu_id)
6162 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6163 for_each_tracing_cpu(cpu) {
6164 ret = ring_buffer_resize(trace_buf->buffer,
6165 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6168 per_cpu_ptr(trace_buf->data, cpu)->entries =
6169 per_cpu_ptr(size_buf->data, cpu)->entries;
6172 ret = ring_buffer_resize(trace_buf->buffer,
6173 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6175 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6176 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6181 #endif /* CONFIG_TRACER_MAX_TRACE */
6183 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6184 unsigned long size, int cpu)
6189 * If kernel or user changes the size of the ring buffer
6190 * we use the size that was given, and we can forget about
6191 * expanding it later.
6193 ring_buffer_expanded = true;
6195 /* May be called before buffers are initialized */
6196 if (!tr->array_buffer.buffer)
6199 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6203 #ifdef CONFIG_TRACER_MAX_TRACE
6204 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
6205 !tr->current_trace->use_max_tr)
6208 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6210 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6211 &tr->array_buffer, cpu);
6214 * AARGH! We are left with different
6215 * size max buffer!!!!
6216 * The max buffer is our "snapshot" buffer.
6217 * When a tracer needs a snapshot (one of the
6218 * latency tracers), it swaps the max buffer
6219 * with the saved snap shot. We succeeded to
6220 * update the size of the main buffer, but failed to
6221 * update the size of the max buffer. But when we tried
6222 * to reset the main buffer to the original size, we
6223 * failed there too. This is very unlikely to
6224 * happen, but if it does, warn and kill all
6228 tracing_disabled = 1;
6233 if (cpu == RING_BUFFER_ALL_CPUS)
6234 set_buffer_entries(&tr->max_buffer, size);
6236 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
6239 #endif /* CONFIG_TRACER_MAX_TRACE */
6241 if (cpu == RING_BUFFER_ALL_CPUS)
6242 set_buffer_entries(&tr->array_buffer, size);
6244 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
6249 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6250 unsigned long size, int cpu_id)
6254 mutex_lock(&trace_types_lock);
6256 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6257 /* make sure, this cpu is enabled in the mask */
6258 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6264 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6269 mutex_unlock(&trace_types_lock);
6276 * tracing_update_buffers - used by tracing facility to expand ring buffers
6278 * To save on memory when the tracing is never used on a system with it
6279 * configured in. The ring buffers are set to a minimum size. But once
6280 * a user starts to use the tracing facility, then they need to grow
6281 * to their default size.
6283 * This function is to be called when a tracer is about to be used.
6285 int tracing_update_buffers(void)
6289 mutex_lock(&trace_types_lock);
6290 if (!ring_buffer_expanded)
6291 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6292 RING_BUFFER_ALL_CPUS);
6293 mutex_unlock(&trace_types_lock);
6298 struct trace_option_dentry;
6301 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6304 * Used to clear out the tracer before deletion of an instance.
6305 * Must have trace_types_lock held.
6307 static void tracing_set_nop(struct trace_array *tr)
6309 if (tr->current_trace == &nop_trace)
6312 tr->current_trace->enabled--;
6314 if (tr->current_trace->reset)
6315 tr->current_trace->reset(tr);
6317 tr->current_trace = &nop_trace;
6320 static bool tracer_options_updated;
6322 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6324 /* Only enable if the directory has been created already. */
6328 /* Only create trace option files after update_tracer_options finish */
6329 if (!tracer_options_updated)
6332 create_trace_option_files(tr, t);
6335 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6338 #ifdef CONFIG_TRACER_MAX_TRACE
6343 mutex_lock(&trace_types_lock);
6345 if (!ring_buffer_expanded) {
6346 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6347 RING_BUFFER_ALL_CPUS);
6353 for (t = trace_types; t; t = t->next) {
6354 if (strcmp(t->name, buf) == 0)
6361 if (t == tr->current_trace)
6364 #ifdef CONFIG_TRACER_SNAPSHOT
6365 if (t->use_max_tr) {
6366 local_irq_disable();
6367 arch_spin_lock(&tr->max_lock);
6368 if (tr->cond_snapshot)
6370 arch_spin_unlock(&tr->max_lock);
6376 /* Some tracers won't work on kernel command line */
6377 if (system_state < SYSTEM_RUNNING && t->noboot) {
6378 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6383 /* Some tracers are only allowed for the top level buffer */
6384 if (!trace_ok_for_array(t, tr)) {
6389 /* If trace pipe files are being read, we can't change the tracer */
6390 if (tr->trace_ref) {
6395 trace_branch_disable();
6397 tr->current_trace->enabled--;
6399 if (tr->current_trace->reset)
6400 tr->current_trace->reset(tr);
6402 #ifdef CONFIG_TRACER_MAX_TRACE
6403 had_max_tr = tr->current_trace->use_max_tr;
6405 /* Current trace needs to be nop_trace before synchronize_rcu */
6406 tr->current_trace = &nop_trace;
6408 if (had_max_tr && !t->use_max_tr) {
6410 * We need to make sure that the update_max_tr sees that
6411 * current_trace changed to nop_trace to keep it from
6412 * swapping the buffers after we resize it.
6413 * The update_max_tr is called from interrupts disabled
6414 * so a synchronized_sched() is sufficient.
6420 if (t->use_max_tr && !tr->allocated_snapshot) {
6421 ret = tracing_alloc_snapshot_instance(tr);
6426 tr->current_trace = &nop_trace;
6430 ret = tracer_init(t, tr);
6435 tr->current_trace = t;
6436 tr->current_trace->enabled++;
6437 trace_branch_enable(tr);
6439 mutex_unlock(&trace_types_lock);
6445 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6446 size_t cnt, loff_t *ppos)
6448 struct trace_array *tr = filp->private_data;
6449 char buf[MAX_TRACER_SIZE+1];
6456 if (cnt > MAX_TRACER_SIZE)
6457 cnt = MAX_TRACER_SIZE;
6459 if (copy_from_user(buf, ubuf, cnt))
6464 /* strip ending whitespace. */
6465 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6468 err = tracing_set_tracer(tr, buf);
6478 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6479 size_t cnt, loff_t *ppos)
6484 r = snprintf(buf, sizeof(buf), "%ld\n",
6485 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6486 if (r > sizeof(buf))
6488 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6492 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6493 size_t cnt, loff_t *ppos)
6498 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6508 tracing_thresh_read(struct file *filp, char __user *ubuf,
6509 size_t cnt, loff_t *ppos)
6511 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6515 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6516 size_t cnt, loff_t *ppos)
6518 struct trace_array *tr = filp->private_data;
6521 mutex_lock(&trace_types_lock);
6522 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6526 if (tr->current_trace->update_thresh) {
6527 ret = tr->current_trace->update_thresh(tr);
6534 mutex_unlock(&trace_types_lock);
6539 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6542 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6543 size_t cnt, loff_t *ppos)
6545 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6549 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6550 size_t cnt, loff_t *ppos)
6552 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6557 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6559 struct trace_array *tr = inode->i_private;
6560 struct trace_iterator *iter;
6563 ret = tracing_check_open_get_tr(tr);
6567 mutex_lock(&trace_types_lock);
6569 /* create a buffer to store the information to pass to userspace */
6570 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6573 __trace_array_put(tr);
6577 trace_seq_init(&iter->seq);
6578 iter->trace = tr->current_trace;
6580 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6585 /* trace pipe does not show start of buffer */
6586 cpumask_setall(iter->started);
6588 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6589 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6591 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6592 if (trace_clocks[tr->clock_id].in_ns)
6593 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6596 iter->array_buffer = &tr->array_buffer;
6597 iter->cpu_file = tracing_get_cpu(inode);
6598 mutex_init(&iter->mutex);
6599 filp->private_data = iter;
6601 if (iter->trace->pipe_open)
6602 iter->trace->pipe_open(iter);
6604 nonseekable_open(inode, filp);
6608 mutex_unlock(&trace_types_lock);
6613 __trace_array_put(tr);
6614 mutex_unlock(&trace_types_lock);
6618 static int tracing_release_pipe(struct inode *inode, struct file *file)
6620 struct trace_iterator *iter = file->private_data;
6621 struct trace_array *tr = inode->i_private;
6623 mutex_lock(&trace_types_lock);
6627 if (iter->trace->pipe_close)
6628 iter->trace->pipe_close(iter);
6630 mutex_unlock(&trace_types_lock);
6632 free_cpumask_var(iter->started);
6633 mutex_destroy(&iter->mutex);
6636 trace_array_put(tr);
6642 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6644 struct trace_array *tr = iter->tr;
6646 /* Iterators are static, they should be filled or empty */
6647 if (trace_buffer_iter(iter, iter->cpu_file))
6648 return EPOLLIN | EPOLLRDNORM;
6650 if (tr->trace_flags & TRACE_ITER_BLOCK)
6652 * Always select as readable when in blocking mode
6654 return EPOLLIN | EPOLLRDNORM;
6656 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6661 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6663 struct trace_iterator *iter = filp->private_data;
6665 return trace_poll(iter, filp, poll_table);
6668 /* Must be called with iter->mutex held. */
6669 static int tracing_wait_pipe(struct file *filp)
6671 struct trace_iterator *iter = filp->private_data;
6674 while (trace_empty(iter)) {
6676 if ((filp->f_flags & O_NONBLOCK)) {
6681 * We block until we read something and tracing is disabled.
6682 * We still block if tracing is disabled, but we have never
6683 * read anything. This allows a user to cat this file, and
6684 * then enable tracing. But after we have read something,
6685 * we give an EOF when tracing is again disabled.
6687 * iter->pos will be 0 if we haven't read anything.
6689 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6692 mutex_unlock(&iter->mutex);
6694 ret = wait_on_pipe(iter, 0);
6696 mutex_lock(&iter->mutex);
6709 tracing_read_pipe(struct file *filp, char __user *ubuf,
6710 size_t cnt, loff_t *ppos)
6712 struct trace_iterator *iter = filp->private_data;
6716 * Avoid more than one consumer on a single file descriptor
6717 * This is just a matter of traces coherency, the ring buffer itself
6720 mutex_lock(&iter->mutex);
6722 /* return any leftover data */
6723 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6727 trace_seq_init(&iter->seq);
6729 if (iter->trace->read) {
6730 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6736 sret = tracing_wait_pipe(filp);
6740 /* stop when tracing is finished */
6741 if (trace_empty(iter)) {
6746 if (cnt >= PAGE_SIZE)
6747 cnt = PAGE_SIZE - 1;
6749 /* reset all but tr, trace, and overruns */
6750 memset(&iter->seq, 0,
6751 sizeof(struct trace_iterator) -
6752 offsetof(struct trace_iterator, seq));
6753 cpumask_clear(iter->started);
6754 trace_seq_init(&iter->seq);
6757 trace_event_read_lock();
6758 trace_access_lock(iter->cpu_file);
6759 while (trace_find_next_entry_inc(iter) != NULL) {
6760 enum print_line_t ret;
6761 int save_len = iter->seq.seq.len;
6763 ret = print_trace_line(iter);
6764 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6765 /* don't print partial lines */
6766 iter->seq.seq.len = save_len;
6769 if (ret != TRACE_TYPE_NO_CONSUME)
6770 trace_consume(iter);
6772 if (trace_seq_used(&iter->seq) >= cnt)
6776 * Setting the full flag means we reached the trace_seq buffer
6777 * size and we should leave by partial output condition above.
6778 * One of the trace_seq_* functions is not used properly.
6780 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6783 trace_access_unlock(iter->cpu_file);
6784 trace_event_read_unlock();
6786 /* Now copy what we have to the user */
6787 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6788 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6789 trace_seq_init(&iter->seq);
6792 * If there was nothing to send to user, in spite of consuming trace
6793 * entries, go back to wait for more entries.
6799 mutex_unlock(&iter->mutex);
6804 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6807 __free_page(spd->pages[idx]);
6811 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6817 /* Seq buffer is page-sized, exactly what we need. */
6819 save_len = iter->seq.seq.len;
6820 ret = print_trace_line(iter);
6822 if (trace_seq_has_overflowed(&iter->seq)) {
6823 iter->seq.seq.len = save_len;
6828 * This should not be hit, because it should only
6829 * be set if the iter->seq overflowed. But check it
6830 * anyway to be safe.
6832 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6833 iter->seq.seq.len = save_len;
6837 count = trace_seq_used(&iter->seq) - save_len;
6840 iter->seq.seq.len = save_len;
6844 if (ret != TRACE_TYPE_NO_CONSUME)
6845 trace_consume(iter);
6847 if (!trace_find_next_entry_inc(iter)) {
6857 static ssize_t tracing_splice_read_pipe(struct file *filp,
6859 struct pipe_inode_info *pipe,
6863 struct page *pages_def[PIPE_DEF_BUFFERS];
6864 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6865 struct trace_iterator *iter = filp->private_data;
6866 struct splice_pipe_desc spd = {
6868 .partial = partial_def,
6869 .nr_pages = 0, /* This gets updated below. */
6870 .nr_pages_max = PIPE_DEF_BUFFERS,
6871 .ops = &default_pipe_buf_ops,
6872 .spd_release = tracing_spd_release_pipe,
6878 if (splice_grow_spd(pipe, &spd))
6881 mutex_lock(&iter->mutex);
6883 if (iter->trace->splice_read) {
6884 ret = iter->trace->splice_read(iter, filp,
6885 ppos, pipe, len, flags);
6890 ret = tracing_wait_pipe(filp);
6894 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6899 trace_event_read_lock();
6900 trace_access_lock(iter->cpu_file);
6902 /* Fill as many pages as possible. */
6903 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6904 spd.pages[i] = alloc_page(GFP_KERNEL);
6908 rem = tracing_fill_pipe_page(rem, iter);
6910 /* Copy the data into the page, so we can start over. */
6911 ret = trace_seq_to_buffer(&iter->seq,
6912 page_address(spd.pages[i]),
6913 trace_seq_used(&iter->seq));
6915 __free_page(spd.pages[i]);
6918 spd.partial[i].offset = 0;
6919 spd.partial[i].len = trace_seq_used(&iter->seq);
6921 trace_seq_init(&iter->seq);
6924 trace_access_unlock(iter->cpu_file);
6925 trace_event_read_unlock();
6926 mutex_unlock(&iter->mutex);
6931 ret = splice_to_pipe(pipe, &spd);
6935 splice_shrink_spd(&spd);
6939 mutex_unlock(&iter->mutex);
6944 tracing_entries_read(struct file *filp, char __user *ubuf,
6945 size_t cnt, loff_t *ppos)
6947 struct inode *inode = file_inode(filp);
6948 struct trace_array *tr = inode->i_private;
6949 int cpu = tracing_get_cpu(inode);
6954 mutex_lock(&trace_types_lock);
6956 if (cpu == RING_BUFFER_ALL_CPUS) {
6957 int cpu, buf_size_same;
6962 /* check if all cpu sizes are same */
6963 for_each_tracing_cpu(cpu) {
6964 /* fill in the size from first enabled cpu */
6966 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6967 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6973 if (buf_size_same) {
6974 if (!ring_buffer_expanded)
6975 r = sprintf(buf, "%lu (expanded: %lu)\n",
6977 trace_buf_size >> 10);
6979 r = sprintf(buf, "%lu\n", size >> 10);
6981 r = sprintf(buf, "X\n");
6983 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6985 mutex_unlock(&trace_types_lock);
6987 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6992 tracing_entries_write(struct file *filp, const char __user *ubuf,
6993 size_t cnt, loff_t *ppos)
6995 struct inode *inode = file_inode(filp);
6996 struct trace_array *tr = inode->i_private;
7000 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7004 /* must have at least 1 entry */
7008 /* value is in KB */
7010 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
7020 tracing_total_entries_read(struct file *filp, char __user *ubuf,
7021 size_t cnt, loff_t *ppos)
7023 struct trace_array *tr = filp->private_data;
7026 unsigned long size = 0, expanded_size = 0;
7028 mutex_lock(&trace_types_lock);
7029 for_each_tracing_cpu(cpu) {
7030 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7031 if (!ring_buffer_expanded)
7032 expanded_size += trace_buf_size >> 10;
7034 if (ring_buffer_expanded)
7035 r = sprintf(buf, "%lu\n", size);
7037 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
7038 mutex_unlock(&trace_types_lock);
7040 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7044 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7045 size_t cnt, loff_t *ppos)
7048 * There is no need to read what the user has written, this function
7049 * is just to make sure that there is no error when "echo" is used
7058 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7060 struct trace_array *tr = inode->i_private;
7062 /* disable tracing ? */
7063 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7064 tracer_tracing_off(tr);
7065 /* resize the ring buffer to 0 */
7066 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7068 trace_array_put(tr);
7074 tracing_mark_write(struct file *filp, const char __user *ubuf,
7075 size_t cnt, loff_t *fpos)
7077 struct trace_array *tr = filp->private_data;
7078 struct ring_buffer_event *event;
7079 enum event_trigger_type tt = ETT_NONE;
7080 struct trace_buffer *buffer;
7081 struct print_entry *entry;
7086 /* Used in tracing_mark_raw_write() as well */
7087 #define FAULTED_STR "<faulted>"
7088 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7090 if (tracing_disabled)
7093 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7096 if (cnt > TRACE_BUF_SIZE)
7097 cnt = TRACE_BUF_SIZE;
7099 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7101 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7103 /* If less than "<faulted>", then make sure we can still add that */
7104 if (cnt < FAULTED_SIZE)
7105 size += FAULTED_SIZE - cnt;
7107 buffer = tr->array_buffer.buffer;
7108 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7110 if (unlikely(!event))
7111 /* Ring buffer disabled, return as if not open for write */
7114 entry = ring_buffer_event_data(event);
7115 entry->ip = _THIS_IP_;
7117 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7119 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7125 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7126 /* do not add \n before testing triggers, but add \0 */
7127 entry->buf[cnt] = '\0';
7128 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7131 if (entry->buf[cnt - 1] != '\n') {
7132 entry->buf[cnt] = '\n';
7133 entry->buf[cnt + 1] = '\0';
7135 entry->buf[cnt] = '\0';
7137 if (static_branch_unlikely(&trace_marker_exports_enabled))
7138 ftrace_exports(event, TRACE_EXPORT_MARKER);
7139 __buffer_unlock_commit(buffer, event);
7142 event_triggers_post_call(tr->trace_marker_file, tt);
7150 /* Limit it for now to 3K (including tag) */
7151 #define RAW_DATA_MAX_SIZE (1024*3)
7154 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7155 size_t cnt, loff_t *fpos)
7157 struct trace_array *tr = filp->private_data;
7158 struct ring_buffer_event *event;
7159 struct trace_buffer *buffer;
7160 struct raw_data_entry *entry;
7165 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7167 if (tracing_disabled)
7170 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7173 /* The marker must at least have a tag id */
7174 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7177 if (cnt > TRACE_BUF_SIZE)
7178 cnt = TRACE_BUF_SIZE;
7180 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7182 size = sizeof(*entry) + cnt;
7183 if (cnt < FAULT_SIZE_ID)
7184 size += FAULT_SIZE_ID - cnt;
7186 buffer = tr->array_buffer.buffer;
7187 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7190 /* Ring buffer disabled, return as if not open for write */
7193 entry = ring_buffer_event_data(event);
7195 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7198 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7203 __buffer_unlock_commit(buffer, event);
7211 static int tracing_clock_show(struct seq_file *m, void *v)
7213 struct trace_array *tr = m->private;
7216 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7218 "%s%s%s%s", i ? " " : "",
7219 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7220 i == tr->clock_id ? "]" : "");
7226 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7230 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7231 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7234 if (i == ARRAY_SIZE(trace_clocks))
7237 mutex_lock(&trace_types_lock);
7241 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7244 * New clock may not be consistent with the previous clock.
7245 * Reset the buffer so that it doesn't have incomparable timestamps.
7247 tracing_reset_online_cpus(&tr->array_buffer);
7249 #ifdef CONFIG_TRACER_MAX_TRACE
7250 if (tr->max_buffer.buffer)
7251 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7252 tracing_reset_online_cpus(&tr->max_buffer);
7255 mutex_unlock(&trace_types_lock);
7260 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7261 size_t cnt, loff_t *fpos)
7263 struct seq_file *m = filp->private_data;
7264 struct trace_array *tr = m->private;
7266 const char *clockstr;
7269 if (cnt >= sizeof(buf))
7272 if (copy_from_user(buf, ubuf, cnt))
7277 clockstr = strstrip(buf);
7279 ret = tracing_set_clock(tr, clockstr);
7288 static int tracing_clock_open(struct inode *inode, struct file *file)
7290 struct trace_array *tr = inode->i_private;
7293 ret = tracing_check_open_get_tr(tr);
7297 ret = single_open(file, tracing_clock_show, inode->i_private);
7299 trace_array_put(tr);
7304 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7306 struct trace_array *tr = m->private;
7308 mutex_lock(&trace_types_lock);
7310 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7311 seq_puts(m, "delta [absolute]\n");
7313 seq_puts(m, "[delta] absolute\n");
7315 mutex_unlock(&trace_types_lock);
7320 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7322 struct trace_array *tr = inode->i_private;
7325 ret = tracing_check_open_get_tr(tr);
7329 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7331 trace_array_put(tr);
7336 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7338 if (rbe == this_cpu_read(trace_buffered_event))
7339 return ring_buffer_time_stamp(buffer);
7341 return ring_buffer_event_time_stamp(buffer, rbe);
7345 * Set or disable using the per CPU trace_buffer_event when possible.
7347 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7351 mutex_lock(&trace_types_lock);
7353 if (set && tr->no_filter_buffering_ref++)
7357 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7362 --tr->no_filter_buffering_ref;
7365 mutex_unlock(&trace_types_lock);
7370 struct ftrace_buffer_info {
7371 struct trace_iterator iter;
7373 unsigned int spare_cpu;
7377 #ifdef CONFIG_TRACER_SNAPSHOT
7378 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7380 struct trace_array *tr = inode->i_private;
7381 struct trace_iterator *iter;
7385 ret = tracing_check_open_get_tr(tr);
7389 if (file->f_mode & FMODE_READ) {
7390 iter = __tracing_open(inode, file, true);
7392 ret = PTR_ERR(iter);
7394 /* Writes still need the seq_file to hold the private data */
7396 m = kzalloc(sizeof(*m), GFP_KERNEL);
7399 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7407 iter->array_buffer = &tr->max_buffer;
7408 iter->cpu_file = tracing_get_cpu(inode);
7410 file->private_data = m;
7414 trace_array_put(tr);
7420 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7423 struct seq_file *m = filp->private_data;
7424 struct trace_iterator *iter = m->private;
7425 struct trace_array *tr = iter->tr;
7429 ret = tracing_update_buffers();
7433 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7437 mutex_lock(&trace_types_lock);
7439 if (tr->current_trace->use_max_tr) {
7444 local_irq_disable();
7445 arch_spin_lock(&tr->max_lock);
7446 if (tr->cond_snapshot)
7448 arch_spin_unlock(&tr->max_lock);
7455 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7459 if (tr->allocated_snapshot)
7463 /* Only allow per-cpu swap if the ring buffer supports it */
7464 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7465 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7470 if (tr->allocated_snapshot)
7471 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7472 &tr->array_buffer, iter->cpu_file);
7474 ret = tracing_alloc_snapshot_instance(tr);
7477 local_irq_disable();
7478 /* Now, we're going to swap */
7479 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7480 update_max_tr(tr, current, smp_processor_id(), NULL);
7482 update_max_tr_single(tr, current, iter->cpu_file);
7486 if (tr->allocated_snapshot) {
7487 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7488 tracing_reset_online_cpus(&tr->max_buffer);
7490 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7500 mutex_unlock(&trace_types_lock);
7504 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7506 struct seq_file *m = file->private_data;
7509 ret = tracing_release(inode, file);
7511 if (file->f_mode & FMODE_READ)
7514 /* If write only, the seq_file is just a stub */
7522 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7523 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7524 size_t count, loff_t *ppos);
7525 static int tracing_buffers_release(struct inode *inode, struct file *file);
7526 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7527 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7529 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7531 struct ftrace_buffer_info *info;
7534 /* The following checks for tracefs lockdown */
7535 ret = tracing_buffers_open(inode, filp);
7539 info = filp->private_data;
7541 if (info->iter.trace->use_max_tr) {
7542 tracing_buffers_release(inode, filp);
7546 info->iter.snapshot = true;
7547 info->iter.array_buffer = &info->iter.tr->max_buffer;
7552 #endif /* CONFIG_TRACER_SNAPSHOT */
7555 static const struct file_operations tracing_thresh_fops = {
7556 .open = tracing_open_generic,
7557 .read = tracing_thresh_read,
7558 .write = tracing_thresh_write,
7559 .llseek = generic_file_llseek,
7562 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7563 static const struct file_operations tracing_max_lat_fops = {
7564 .open = tracing_open_generic,
7565 .read = tracing_max_lat_read,
7566 .write = tracing_max_lat_write,
7567 .llseek = generic_file_llseek,
7571 static const struct file_operations set_tracer_fops = {
7572 .open = tracing_open_generic,
7573 .read = tracing_set_trace_read,
7574 .write = tracing_set_trace_write,
7575 .llseek = generic_file_llseek,
7578 static const struct file_operations tracing_pipe_fops = {
7579 .open = tracing_open_pipe,
7580 .poll = tracing_poll_pipe,
7581 .read = tracing_read_pipe,
7582 .splice_read = tracing_splice_read_pipe,
7583 .release = tracing_release_pipe,
7584 .llseek = no_llseek,
7587 static const struct file_operations tracing_entries_fops = {
7588 .open = tracing_open_generic_tr,
7589 .read = tracing_entries_read,
7590 .write = tracing_entries_write,
7591 .llseek = generic_file_llseek,
7592 .release = tracing_release_generic_tr,
7595 static const struct file_operations tracing_total_entries_fops = {
7596 .open = tracing_open_generic_tr,
7597 .read = tracing_total_entries_read,
7598 .llseek = generic_file_llseek,
7599 .release = tracing_release_generic_tr,
7602 static const struct file_operations tracing_free_buffer_fops = {
7603 .open = tracing_open_generic_tr,
7604 .write = tracing_free_buffer_write,
7605 .release = tracing_free_buffer_release,
7608 static const struct file_operations tracing_mark_fops = {
7609 .open = tracing_open_generic_tr,
7610 .write = tracing_mark_write,
7611 .llseek = generic_file_llseek,
7612 .release = tracing_release_generic_tr,
7615 static const struct file_operations tracing_mark_raw_fops = {
7616 .open = tracing_open_generic_tr,
7617 .write = tracing_mark_raw_write,
7618 .llseek = generic_file_llseek,
7619 .release = tracing_release_generic_tr,
7622 static const struct file_operations trace_clock_fops = {
7623 .open = tracing_clock_open,
7625 .llseek = seq_lseek,
7626 .release = tracing_single_release_tr,
7627 .write = tracing_clock_write,
7630 static const struct file_operations trace_time_stamp_mode_fops = {
7631 .open = tracing_time_stamp_mode_open,
7633 .llseek = seq_lseek,
7634 .release = tracing_single_release_tr,
7637 #ifdef CONFIG_TRACER_SNAPSHOT
7638 static const struct file_operations snapshot_fops = {
7639 .open = tracing_snapshot_open,
7641 .write = tracing_snapshot_write,
7642 .llseek = tracing_lseek,
7643 .release = tracing_snapshot_release,
7646 static const struct file_operations snapshot_raw_fops = {
7647 .open = snapshot_raw_open,
7648 .read = tracing_buffers_read,
7649 .release = tracing_buffers_release,
7650 .splice_read = tracing_buffers_splice_read,
7651 .llseek = no_llseek,
7654 #endif /* CONFIG_TRACER_SNAPSHOT */
7657 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7658 * @filp: The active open file structure
7659 * @ubuf: The userspace provided buffer to read value into
7660 * @cnt: The maximum number of bytes to read
7661 * @ppos: The current "file" position
7663 * This function implements the write interface for a struct trace_min_max_param.
7664 * The filp->private_data must point to a trace_min_max_param structure that
7665 * defines where to write the value, the min and the max acceptable values,
7666 * and a lock to protect the write.
7669 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7671 struct trace_min_max_param *param = filp->private_data;
7678 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7683 mutex_lock(param->lock);
7685 if (param->min && val < *param->min)
7688 if (param->max && val > *param->max)
7695 mutex_unlock(param->lock);
7704 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7705 * @filp: The active open file structure
7706 * @ubuf: The userspace provided buffer to read value into
7707 * @cnt: The maximum number of bytes to read
7708 * @ppos: The current "file" position
7710 * This function implements the read interface for a struct trace_min_max_param.
7711 * The filp->private_data must point to a trace_min_max_param struct with valid
7715 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7717 struct trace_min_max_param *param = filp->private_data;
7718 char buf[U64_STR_SIZE];
7727 if (cnt > sizeof(buf))
7730 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7732 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7735 const struct file_operations trace_min_max_fops = {
7736 .open = tracing_open_generic,
7737 .read = trace_min_max_read,
7738 .write = trace_min_max_write,
7741 #define TRACING_LOG_ERRS_MAX 8
7742 #define TRACING_LOG_LOC_MAX 128
7744 #define CMD_PREFIX " Command: "
7747 const char **errs; /* ptr to loc-specific array of err strings */
7748 u8 type; /* index into errs -> specific err string */
7749 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7753 struct tracing_log_err {
7754 struct list_head list;
7755 struct err_info info;
7756 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7757 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7760 static DEFINE_MUTEX(tracing_err_log_lock);
7762 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
7764 struct tracing_log_err *err;
7766 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7767 err = kzalloc(sizeof(*err), GFP_KERNEL);
7769 err = ERR_PTR(-ENOMEM);
7771 tr->n_err_log_entries++;
7776 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7777 list_del(&err->list);
7783 * err_pos - find the position of a string within a command for error careting
7784 * @cmd: The tracing command that caused the error
7785 * @str: The string to position the caret at within @cmd
7787 * Finds the position of the first occurrence of @str within @cmd. The
7788 * return value can be passed to tracing_log_err() for caret placement
7791 * Returns the index within @cmd of the first occurrence of @str or 0
7792 * if @str was not found.
7794 unsigned int err_pos(char *cmd, const char *str)
7798 if (WARN_ON(!strlen(cmd)))
7801 found = strstr(cmd, str);
7809 * tracing_log_err - write an error to the tracing error log
7810 * @tr: The associated trace array for the error (NULL for top level array)
7811 * @loc: A string describing where the error occurred
7812 * @cmd: The tracing command that caused the error
7813 * @errs: The array of loc-specific static error strings
7814 * @type: The index into errs[], which produces the specific static err string
7815 * @pos: The position the caret should be placed in the cmd
7817 * Writes an error into tracing/error_log of the form:
7819 * <loc>: error: <text>
7823 * tracing/error_log is a small log file containing the last
7824 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7825 * unless there has been a tracing error, and the error log can be
7826 * cleared and have its memory freed by writing the empty string in
7827 * truncation mode to it i.e. echo > tracing/error_log.
7829 * NOTE: the @errs array along with the @type param are used to
7830 * produce a static error string - this string is not copied and saved
7831 * when the error is logged - only a pointer to it is saved. See
7832 * existing callers for examples of how static strings are typically
7833 * defined for use with tracing_log_err().
7835 void tracing_log_err(struct trace_array *tr,
7836 const char *loc, const char *cmd,
7837 const char **errs, u8 type, u8 pos)
7839 struct tracing_log_err *err;
7844 mutex_lock(&tracing_err_log_lock);
7845 err = get_tracing_log_err(tr);
7846 if (PTR_ERR(err) == -ENOMEM) {
7847 mutex_unlock(&tracing_err_log_lock);
7851 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7852 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7854 err->info.errs = errs;
7855 err->info.type = type;
7856 err->info.pos = pos;
7857 err->info.ts = local_clock();
7859 list_add_tail(&err->list, &tr->err_log);
7860 mutex_unlock(&tracing_err_log_lock);
7863 static void clear_tracing_err_log(struct trace_array *tr)
7865 struct tracing_log_err *err, *next;
7867 mutex_lock(&tracing_err_log_lock);
7868 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7869 list_del(&err->list);
7873 tr->n_err_log_entries = 0;
7874 mutex_unlock(&tracing_err_log_lock);
7877 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7879 struct trace_array *tr = m->private;
7881 mutex_lock(&tracing_err_log_lock);
7883 return seq_list_start(&tr->err_log, *pos);
7886 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7888 struct trace_array *tr = m->private;
7890 return seq_list_next(v, &tr->err_log, pos);
7893 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7895 mutex_unlock(&tracing_err_log_lock);
7898 static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7902 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7904 for (i = 0; i < pos; i++)
7909 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7911 struct tracing_log_err *err = v;
7914 const char *err_text = err->info.errs[err->info.type];
7915 u64 sec = err->info.ts;
7918 nsec = do_div(sec, NSEC_PER_SEC);
7919 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7920 err->loc, err_text);
7921 seq_printf(m, "%s", err->cmd);
7922 tracing_err_log_show_pos(m, err->info.pos);
7928 static const struct seq_operations tracing_err_log_seq_ops = {
7929 .start = tracing_err_log_seq_start,
7930 .next = tracing_err_log_seq_next,
7931 .stop = tracing_err_log_seq_stop,
7932 .show = tracing_err_log_seq_show
7935 static int tracing_err_log_open(struct inode *inode, struct file *file)
7937 struct trace_array *tr = inode->i_private;
7940 ret = tracing_check_open_get_tr(tr);
7944 /* If this file was opened for write, then erase contents */
7945 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7946 clear_tracing_err_log(tr);
7948 if (file->f_mode & FMODE_READ) {
7949 ret = seq_open(file, &tracing_err_log_seq_ops);
7951 struct seq_file *m = file->private_data;
7954 trace_array_put(tr);
7960 static ssize_t tracing_err_log_write(struct file *file,
7961 const char __user *buffer,
7962 size_t count, loff_t *ppos)
7967 static int tracing_err_log_release(struct inode *inode, struct file *file)
7969 struct trace_array *tr = inode->i_private;
7971 trace_array_put(tr);
7973 if (file->f_mode & FMODE_READ)
7974 seq_release(inode, file);
7979 static const struct file_operations tracing_err_log_fops = {
7980 .open = tracing_err_log_open,
7981 .write = tracing_err_log_write,
7983 .llseek = seq_lseek,
7984 .release = tracing_err_log_release,
7987 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7989 struct trace_array *tr = inode->i_private;
7990 struct ftrace_buffer_info *info;
7993 ret = tracing_check_open_get_tr(tr);
7997 info = kvzalloc(sizeof(*info), GFP_KERNEL);
7999 trace_array_put(tr);
8003 mutex_lock(&trace_types_lock);
8006 info->iter.cpu_file = tracing_get_cpu(inode);
8007 info->iter.trace = tr->current_trace;
8008 info->iter.array_buffer = &tr->array_buffer;
8010 /* Force reading ring buffer for first read */
8011 info->read = (unsigned int)-1;
8013 filp->private_data = info;
8017 mutex_unlock(&trace_types_lock);
8019 ret = nonseekable_open(inode, filp);
8021 trace_array_put(tr);
8027 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8029 struct ftrace_buffer_info *info = filp->private_data;
8030 struct trace_iterator *iter = &info->iter;
8032 return trace_poll(iter, filp, poll_table);
8036 tracing_buffers_read(struct file *filp, char __user *ubuf,
8037 size_t count, loff_t *ppos)
8039 struct ftrace_buffer_info *info = filp->private_data;
8040 struct trace_iterator *iter = &info->iter;
8047 #ifdef CONFIG_TRACER_MAX_TRACE
8048 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8053 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8055 if (IS_ERR(info->spare)) {
8056 ret = PTR_ERR(info->spare);
8059 info->spare_cpu = iter->cpu_file;
8065 /* Do we have previous read data to read? */
8066 if (info->read < PAGE_SIZE)
8070 trace_access_lock(iter->cpu_file);
8071 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8075 trace_access_unlock(iter->cpu_file);
8078 if (trace_empty(iter)) {
8079 if ((filp->f_flags & O_NONBLOCK))
8082 ret = wait_on_pipe(iter, 0);
8093 size = PAGE_SIZE - info->read;
8097 ret = copy_to_user(ubuf, info->spare + info->read, size);
8109 static int tracing_buffers_release(struct inode *inode, struct file *file)
8111 struct ftrace_buffer_info *info = file->private_data;
8112 struct trace_iterator *iter = &info->iter;
8114 mutex_lock(&trace_types_lock);
8116 iter->tr->trace_ref--;
8118 __trace_array_put(iter->tr);
8121 /* Make sure the waiters see the new wait_index */
8124 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8127 ring_buffer_free_read_page(iter->array_buffer->buffer,
8128 info->spare_cpu, info->spare);
8131 mutex_unlock(&trace_types_lock);
8137 struct trace_buffer *buffer;
8140 refcount_t refcount;
8143 static void buffer_ref_release(struct buffer_ref *ref)
8145 if (!refcount_dec_and_test(&ref->refcount))
8147 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8151 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8152 struct pipe_buffer *buf)
8154 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8156 buffer_ref_release(ref);
8160 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8161 struct pipe_buffer *buf)
8163 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8165 if (refcount_read(&ref->refcount) > INT_MAX/2)
8168 refcount_inc(&ref->refcount);
8172 /* Pipe buffer operations for a buffer. */
8173 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8174 .release = buffer_pipe_buf_release,
8175 .get = buffer_pipe_buf_get,
8179 * Callback from splice_to_pipe(), if we need to release some pages
8180 * at the end of the spd in case we error'ed out in filling the pipe.
8182 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8184 struct buffer_ref *ref =
8185 (struct buffer_ref *)spd->partial[i].private;
8187 buffer_ref_release(ref);
8188 spd->partial[i].private = 0;
8192 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8193 struct pipe_inode_info *pipe, size_t len,
8196 struct ftrace_buffer_info *info = file->private_data;
8197 struct trace_iterator *iter = &info->iter;
8198 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8199 struct page *pages_def[PIPE_DEF_BUFFERS];
8200 struct splice_pipe_desc spd = {
8202 .partial = partial_def,
8203 .nr_pages_max = PIPE_DEF_BUFFERS,
8204 .ops = &buffer_pipe_buf_ops,
8205 .spd_release = buffer_spd_release,
8207 struct buffer_ref *ref;
8211 #ifdef CONFIG_TRACER_MAX_TRACE
8212 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8216 if (*ppos & (PAGE_SIZE - 1))
8219 if (len & (PAGE_SIZE - 1)) {
8220 if (len < PAGE_SIZE)
8225 if (splice_grow_spd(pipe, &spd))
8229 trace_access_lock(iter->cpu_file);
8230 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8232 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8236 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8242 refcount_set(&ref->refcount, 1);
8243 ref->buffer = iter->array_buffer->buffer;
8244 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8245 if (IS_ERR(ref->page)) {
8246 ret = PTR_ERR(ref->page);
8251 ref->cpu = iter->cpu_file;
8253 r = ring_buffer_read_page(ref->buffer, &ref->page,
8254 len, iter->cpu_file, 1);
8256 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8262 page = virt_to_page(ref->page);
8264 spd.pages[i] = page;
8265 spd.partial[i].len = PAGE_SIZE;
8266 spd.partial[i].offset = 0;
8267 spd.partial[i].private = (unsigned long)ref;
8271 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8274 trace_access_unlock(iter->cpu_file);
8277 /* did we read anything? */
8278 if (!spd.nr_pages) {
8285 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8288 wait_index = READ_ONCE(iter->wait_index);
8290 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8294 /* No need to wait after waking up when tracing is off */
8295 if (!tracer_tracing_is_on(iter->tr))
8298 /* Make sure we see the new wait_index */
8300 if (wait_index != iter->wait_index)
8306 ret = splice_to_pipe(pipe, &spd);
8308 splice_shrink_spd(&spd);
8313 /* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */
8314 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8316 struct ftrace_buffer_info *info = file->private_data;
8317 struct trace_iterator *iter = &info->iter;
8320 return -ENOIOCTLCMD;
8322 mutex_lock(&trace_types_lock);
8325 /* Make sure the waiters see the new wait_index */
8328 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8330 mutex_unlock(&trace_types_lock);
8334 static const struct file_operations tracing_buffers_fops = {
8335 .open = tracing_buffers_open,
8336 .read = tracing_buffers_read,
8337 .poll = tracing_buffers_poll,
8338 .release = tracing_buffers_release,
8339 .splice_read = tracing_buffers_splice_read,
8340 .unlocked_ioctl = tracing_buffers_ioctl,
8341 .llseek = no_llseek,
8345 tracing_stats_read(struct file *filp, char __user *ubuf,
8346 size_t count, loff_t *ppos)
8348 struct inode *inode = file_inode(filp);
8349 struct trace_array *tr = inode->i_private;
8350 struct array_buffer *trace_buf = &tr->array_buffer;
8351 int cpu = tracing_get_cpu(inode);
8352 struct trace_seq *s;
8354 unsigned long long t;
8355 unsigned long usec_rem;
8357 s = kmalloc(sizeof(*s), GFP_KERNEL);
8363 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8364 trace_seq_printf(s, "entries: %ld\n", cnt);
8366 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8367 trace_seq_printf(s, "overrun: %ld\n", cnt);
8369 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8370 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8372 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8373 trace_seq_printf(s, "bytes: %ld\n", cnt);
8375 if (trace_clocks[tr->clock_id].in_ns) {
8376 /* local or global for trace_clock */
8377 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8378 usec_rem = do_div(t, USEC_PER_SEC);
8379 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8382 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8383 usec_rem = do_div(t, USEC_PER_SEC);
8384 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8386 /* counter or tsc mode for trace_clock */
8387 trace_seq_printf(s, "oldest event ts: %llu\n",
8388 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8390 trace_seq_printf(s, "now ts: %llu\n",
8391 ring_buffer_time_stamp(trace_buf->buffer));
8394 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8395 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8397 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8398 trace_seq_printf(s, "read events: %ld\n", cnt);
8400 count = simple_read_from_buffer(ubuf, count, ppos,
8401 s->buffer, trace_seq_used(s));
8408 static const struct file_operations tracing_stats_fops = {
8409 .open = tracing_open_generic_tr,
8410 .read = tracing_stats_read,
8411 .llseek = generic_file_llseek,
8412 .release = tracing_release_generic_tr,
8415 #ifdef CONFIG_DYNAMIC_FTRACE
8418 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8419 size_t cnt, loff_t *ppos)
8425 /* 256 should be plenty to hold the amount needed */
8426 buf = kmalloc(256, GFP_KERNEL);
8430 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8431 ftrace_update_tot_cnt,
8432 ftrace_number_of_pages,
8433 ftrace_number_of_groups);
8435 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8440 static const struct file_operations tracing_dyn_info_fops = {
8441 .open = tracing_open_generic,
8442 .read = tracing_read_dyn_info,
8443 .llseek = generic_file_llseek,
8445 #endif /* CONFIG_DYNAMIC_FTRACE */
8447 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8449 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8450 struct trace_array *tr, struct ftrace_probe_ops *ops,
8453 tracing_snapshot_instance(tr);
8457 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8458 struct trace_array *tr, struct ftrace_probe_ops *ops,
8461 struct ftrace_func_mapper *mapper = data;
8465 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8475 tracing_snapshot_instance(tr);
8479 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8480 struct ftrace_probe_ops *ops, void *data)
8482 struct ftrace_func_mapper *mapper = data;
8485 seq_printf(m, "%ps:", (void *)ip);
8487 seq_puts(m, "snapshot");
8490 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8493 seq_printf(m, ":count=%ld\n", *count);
8495 seq_puts(m, ":unlimited\n");
8501 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8502 unsigned long ip, void *init_data, void **data)
8504 struct ftrace_func_mapper *mapper = *data;
8507 mapper = allocate_ftrace_func_mapper();
8513 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8517 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8518 unsigned long ip, void *data)
8520 struct ftrace_func_mapper *mapper = data;
8525 free_ftrace_func_mapper(mapper, NULL);
8529 ftrace_func_mapper_remove_ip(mapper, ip);
8532 static struct ftrace_probe_ops snapshot_probe_ops = {
8533 .func = ftrace_snapshot,
8534 .print = ftrace_snapshot_print,
8537 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8538 .func = ftrace_count_snapshot,
8539 .print = ftrace_snapshot_print,
8540 .init = ftrace_snapshot_init,
8541 .free = ftrace_snapshot_free,
8545 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8546 char *glob, char *cmd, char *param, int enable)
8548 struct ftrace_probe_ops *ops;
8549 void *count = (void *)-1;
8556 /* hash funcs only work with set_ftrace_filter */
8560 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8563 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8568 number = strsep(¶m, ":");
8570 if (!strlen(number))
8574 * We use the callback data field (which is a pointer)
8577 ret = kstrtoul(number, 0, (unsigned long *)&count);
8582 ret = tracing_alloc_snapshot_instance(tr);
8586 ret = register_ftrace_function_probe(glob, tr, ops, count);
8589 return ret < 0 ? ret : 0;
8592 static struct ftrace_func_command ftrace_snapshot_cmd = {
8594 .func = ftrace_trace_snapshot_callback,
8597 static __init int register_snapshot_cmd(void)
8599 return register_ftrace_command(&ftrace_snapshot_cmd);
8602 static inline __init int register_snapshot_cmd(void) { return 0; }
8603 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8605 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8607 if (WARN_ON(!tr->dir))
8608 return ERR_PTR(-ENODEV);
8610 /* Top directory uses NULL as the parent */
8611 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8614 /* All sub buffers have a descriptor */
8618 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8620 struct dentry *d_tracer;
8623 return tr->percpu_dir;
8625 d_tracer = tracing_get_dentry(tr);
8626 if (IS_ERR(d_tracer))
8629 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8631 MEM_FAIL(!tr->percpu_dir,
8632 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8634 return tr->percpu_dir;
8637 static struct dentry *
8638 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8639 void *data, long cpu, const struct file_operations *fops)
8641 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8643 if (ret) /* See tracing_get_cpu() */
8644 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8649 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8651 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8652 struct dentry *d_cpu;
8653 char cpu_dir[30]; /* 30 characters should be more than enough */
8658 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8659 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8661 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8665 /* per cpu trace_pipe */
8666 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8667 tr, cpu, &tracing_pipe_fops);
8670 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8671 tr, cpu, &tracing_fops);
8673 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8674 tr, cpu, &tracing_buffers_fops);
8676 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8677 tr, cpu, &tracing_stats_fops);
8679 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8680 tr, cpu, &tracing_entries_fops);
8682 #ifdef CONFIG_TRACER_SNAPSHOT
8683 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8684 tr, cpu, &snapshot_fops);
8686 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8687 tr, cpu, &snapshot_raw_fops);
8691 #ifdef CONFIG_FTRACE_SELFTEST
8692 /* Let selftest have access to static functions in this file */
8693 #include "trace_selftest.c"
8697 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8700 struct trace_option_dentry *topt = filp->private_data;
8703 if (topt->flags->val & topt->opt->bit)
8708 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8712 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8715 struct trace_option_dentry *topt = filp->private_data;
8719 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8723 if (val != 0 && val != 1)
8726 if (!!(topt->flags->val & topt->opt->bit) != val) {
8727 mutex_lock(&trace_types_lock);
8728 ret = __set_tracer_option(topt->tr, topt->flags,
8730 mutex_unlock(&trace_types_lock);
8741 static const struct file_operations trace_options_fops = {
8742 .open = tracing_open_generic,
8743 .read = trace_options_read,
8744 .write = trace_options_write,
8745 .llseek = generic_file_llseek,
8749 * In order to pass in both the trace_array descriptor as well as the index
8750 * to the flag that the trace option file represents, the trace_array
8751 * has a character array of trace_flags_index[], which holds the index
8752 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8753 * The address of this character array is passed to the flag option file
8754 * read/write callbacks.
8756 * In order to extract both the index and the trace_array descriptor,
8757 * get_tr_index() uses the following algorithm.
8761 * As the pointer itself contains the address of the index (remember
8764 * Then to get the trace_array descriptor, by subtracting that index
8765 * from the ptr, we get to the start of the index itself.
8767 * ptr - idx == &index[0]
8769 * Then a simple container_of() from that pointer gets us to the
8770 * trace_array descriptor.
8772 static void get_tr_index(void *data, struct trace_array **ptr,
8773 unsigned int *pindex)
8775 *pindex = *(unsigned char *)data;
8777 *ptr = container_of(data - *pindex, struct trace_array,
8782 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8785 void *tr_index = filp->private_data;
8786 struct trace_array *tr;
8790 get_tr_index(tr_index, &tr, &index);
8792 if (tr->trace_flags & (1 << index))
8797 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8801 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8804 void *tr_index = filp->private_data;
8805 struct trace_array *tr;
8810 get_tr_index(tr_index, &tr, &index);
8812 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8816 if (val != 0 && val != 1)
8819 mutex_lock(&event_mutex);
8820 mutex_lock(&trace_types_lock);
8821 ret = set_tracer_flag(tr, 1 << index, val);
8822 mutex_unlock(&trace_types_lock);
8823 mutex_unlock(&event_mutex);
8833 static const struct file_operations trace_options_core_fops = {
8834 .open = tracing_open_generic,
8835 .read = trace_options_core_read,
8836 .write = trace_options_core_write,
8837 .llseek = generic_file_llseek,
8840 struct dentry *trace_create_file(const char *name,
8842 struct dentry *parent,
8844 const struct file_operations *fops)
8848 ret = tracefs_create_file(name, mode, parent, data, fops);
8850 pr_warn("Could not create tracefs '%s' entry\n", name);
8856 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8858 struct dentry *d_tracer;
8863 d_tracer = tracing_get_dentry(tr);
8864 if (IS_ERR(d_tracer))
8867 tr->options = tracefs_create_dir("options", d_tracer);
8869 pr_warn("Could not create tracefs directory 'options'\n");
8877 create_trace_option_file(struct trace_array *tr,
8878 struct trace_option_dentry *topt,
8879 struct tracer_flags *flags,
8880 struct tracer_opt *opt)
8882 struct dentry *t_options;
8884 t_options = trace_options_init_dentry(tr);
8888 topt->flags = flags;
8892 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
8893 t_options, topt, &trace_options_fops);
8898 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8900 struct trace_option_dentry *topts;
8901 struct trace_options *tr_topts;
8902 struct tracer_flags *flags;
8903 struct tracer_opt *opts;
8910 flags = tracer->flags;
8912 if (!flags || !flags->opts)
8916 * If this is an instance, only create flags for tracers
8917 * the instance may have.
8919 if (!trace_ok_for_array(tracer, tr))
8922 for (i = 0; i < tr->nr_topts; i++) {
8923 /* Make sure there's no duplicate flags. */
8924 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8930 for (cnt = 0; opts[cnt].name; cnt++)
8933 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8937 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8944 tr->topts = tr_topts;
8945 tr->topts[tr->nr_topts].tracer = tracer;
8946 tr->topts[tr->nr_topts].topts = topts;
8949 for (cnt = 0; opts[cnt].name; cnt++) {
8950 create_trace_option_file(tr, &topts[cnt], flags,
8952 MEM_FAIL(topts[cnt].entry == NULL,
8953 "Failed to create trace option: %s",
8958 static struct dentry *
8959 create_trace_option_core_file(struct trace_array *tr,
8960 const char *option, long index)
8962 struct dentry *t_options;
8964 t_options = trace_options_init_dentry(tr);
8968 return trace_create_file(option, TRACE_MODE_WRITE, t_options,
8969 (void *)&tr->trace_flags_index[index],
8970 &trace_options_core_fops);
8973 static void create_trace_options_dir(struct trace_array *tr)
8975 struct dentry *t_options;
8976 bool top_level = tr == &global_trace;
8979 t_options = trace_options_init_dentry(tr);
8983 for (i = 0; trace_options[i]; i++) {
8985 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8986 create_trace_option_core_file(tr, trace_options[i], i);
8991 rb_simple_read(struct file *filp, char __user *ubuf,
8992 size_t cnt, loff_t *ppos)
8994 struct trace_array *tr = filp->private_data;
8998 r = tracer_tracing_is_on(tr);
8999 r = sprintf(buf, "%d\n", r);
9001 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9005 rb_simple_write(struct file *filp, const char __user *ubuf,
9006 size_t cnt, loff_t *ppos)
9008 struct trace_array *tr = filp->private_data;
9009 struct trace_buffer *buffer = tr->array_buffer.buffer;
9013 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9018 mutex_lock(&trace_types_lock);
9019 if (!!val == tracer_tracing_is_on(tr)) {
9020 val = 0; /* do nothing */
9022 tracer_tracing_on(tr);
9023 if (tr->current_trace->start)
9024 tr->current_trace->start(tr);
9026 tracer_tracing_off(tr);
9027 if (tr->current_trace->stop)
9028 tr->current_trace->stop(tr);
9029 /* Wake up any waiters */
9030 ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
9032 mutex_unlock(&trace_types_lock);
9040 static const struct file_operations rb_simple_fops = {
9041 .open = tracing_open_generic_tr,
9042 .read = rb_simple_read,
9043 .write = rb_simple_write,
9044 .release = tracing_release_generic_tr,
9045 .llseek = default_llseek,
9049 buffer_percent_read(struct file *filp, char __user *ubuf,
9050 size_t cnt, loff_t *ppos)
9052 struct trace_array *tr = filp->private_data;
9056 r = tr->buffer_percent;
9057 r = sprintf(buf, "%d\n", r);
9059 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9063 buffer_percent_write(struct file *filp, const char __user *ubuf,
9064 size_t cnt, loff_t *ppos)
9066 struct trace_array *tr = filp->private_data;
9070 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9080 tr->buffer_percent = val;
9087 static const struct file_operations buffer_percent_fops = {
9088 .open = tracing_open_generic_tr,
9089 .read = buffer_percent_read,
9090 .write = buffer_percent_write,
9091 .release = tracing_release_generic_tr,
9092 .llseek = default_llseek,
9095 static struct dentry *trace_instance_dir;
9098 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9101 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9103 enum ring_buffer_flags rb_flags;
9105 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9109 buf->buffer = ring_buffer_alloc(size, rb_flags);
9113 buf->data = alloc_percpu(struct trace_array_cpu);
9115 ring_buffer_free(buf->buffer);
9120 /* Allocate the first page for all buffers */
9121 set_buffer_entries(&tr->array_buffer,
9122 ring_buffer_size(tr->array_buffer.buffer, 0));
9127 static int allocate_trace_buffers(struct trace_array *tr, int size)
9131 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9135 #ifdef CONFIG_TRACER_MAX_TRACE
9136 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9137 allocate_snapshot ? size : 1);
9138 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9139 ring_buffer_free(tr->array_buffer.buffer);
9140 tr->array_buffer.buffer = NULL;
9141 free_percpu(tr->array_buffer.data);
9142 tr->array_buffer.data = NULL;
9145 tr->allocated_snapshot = allocate_snapshot;
9148 * Only the top level trace array gets its snapshot allocated
9149 * from the kernel command line.
9151 allocate_snapshot = false;
9157 static void free_trace_buffer(struct array_buffer *buf)
9160 ring_buffer_free(buf->buffer);
9162 free_percpu(buf->data);
9167 static void free_trace_buffers(struct trace_array *tr)
9172 free_trace_buffer(&tr->array_buffer);
9174 #ifdef CONFIG_TRACER_MAX_TRACE
9175 free_trace_buffer(&tr->max_buffer);
9179 static void init_trace_flags_index(struct trace_array *tr)
9183 /* Used by the trace options files */
9184 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9185 tr->trace_flags_index[i] = i;
9188 static void __update_tracer_options(struct trace_array *tr)
9192 for (t = trace_types; t; t = t->next)
9193 add_tracer_options(tr, t);
9196 static void update_tracer_options(struct trace_array *tr)
9198 mutex_lock(&trace_types_lock);
9199 tracer_options_updated = true;
9200 __update_tracer_options(tr);
9201 mutex_unlock(&trace_types_lock);
9204 /* Must have trace_types_lock held */
9205 struct trace_array *trace_array_find(const char *instance)
9207 struct trace_array *tr, *found = NULL;
9209 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9210 if (tr->name && strcmp(tr->name, instance) == 0) {
9219 struct trace_array *trace_array_find_get(const char *instance)
9221 struct trace_array *tr;
9223 mutex_lock(&trace_types_lock);
9224 tr = trace_array_find(instance);
9227 mutex_unlock(&trace_types_lock);
9232 static int trace_array_create_dir(struct trace_array *tr)
9236 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9240 ret = event_trace_add_tracer(tr->dir, tr);
9242 tracefs_remove(tr->dir);
9246 init_tracer_tracefs(tr, tr->dir);
9247 __update_tracer_options(tr);
9252 static struct trace_array *trace_array_create(const char *name)
9254 struct trace_array *tr;
9258 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9260 return ERR_PTR(ret);
9262 tr->name = kstrdup(name, GFP_KERNEL);
9266 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9269 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9271 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9273 raw_spin_lock_init(&tr->start_lock);
9275 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9277 tr->current_trace = &nop_trace;
9279 INIT_LIST_HEAD(&tr->systems);
9280 INIT_LIST_HEAD(&tr->events);
9281 INIT_LIST_HEAD(&tr->hist_vars);
9282 INIT_LIST_HEAD(&tr->err_log);
9284 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9287 if (ftrace_allocate_ftrace_ops(tr) < 0)
9290 ftrace_init_trace_array(tr);
9292 init_trace_flags_index(tr);
9294 if (trace_instance_dir) {
9295 ret = trace_array_create_dir(tr);
9299 __trace_early_add_events(tr);
9301 list_add(&tr->list, &ftrace_trace_arrays);
9308 ftrace_free_ftrace_ops(tr);
9309 free_trace_buffers(tr);
9310 free_cpumask_var(tr->tracing_cpumask);
9314 return ERR_PTR(ret);
9317 static int instance_mkdir(const char *name)
9319 struct trace_array *tr;
9322 mutex_lock(&event_mutex);
9323 mutex_lock(&trace_types_lock);
9326 if (trace_array_find(name))
9329 tr = trace_array_create(name);
9331 ret = PTR_ERR_OR_ZERO(tr);
9334 mutex_unlock(&trace_types_lock);
9335 mutex_unlock(&event_mutex);
9340 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9341 * @name: The name of the trace array to be looked up/created.
9343 * Returns pointer to trace array with given name.
9344 * NULL, if it cannot be created.
9346 * NOTE: This function increments the reference counter associated with the
9347 * trace array returned. This makes sure it cannot be freed while in use.
9348 * Use trace_array_put() once the trace array is no longer needed.
9349 * If the trace_array is to be freed, trace_array_destroy() needs to
9350 * be called after the trace_array_put(), or simply let user space delete
9351 * it from the tracefs instances directory. But until the
9352 * trace_array_put() is called, user space can not delete it.
9355 struct trace_array *trace_array_get_by_name(const char *name)
9357 struct trace_array *tr;
9359 mutex_lock(&event_mutex);
9360 mutex_lock(&trace_types_lock);
9362 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9363 if (tr->name && strcmp(tr->name, name) == 0)
9367 tr = trace_array_create(name);
9375 mutex_unlock(&trace_types_lock);
9376 mutex_unlock(&event_mutex);
9379 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9381 static int __remove_instance(struct trace_array *tr)
9385 /* Reference counter for a newly created trace array = 1. */
9386 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9389 list_del(&tr->list);
9391 /* Disable all the flags that were enabled coming in */
9392 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9393 if ((1 << i) & ZEROED_TRACE_FLAGS)
9394 set_tracer_flag(tr, 1 << i, 0);
9397 tracing_set_nop(tr);
9398 clear_ftrace_function_probes(tr);
9399 event_trace_del_tracer(tr);
9400 ftrace_clear_pids(tr);
9401 ftrace_destroy_function_files(tr);
9402 tracefs_remove(tr->dir);
9403 free_percpu(tr->last_func_repeats);
9404 free_trace_buffers(tr);
9406 for (i = 0; i < tr->nr_topts; i++) {
9407 kfree(tr->topts[i].topts);
9411 free_cpumask_var(tr->tracing_cpumask);
9418 int trace_array_destroy(struct trace_array *this_tr)
9420 struct trace_array *tr;
9426 mutex_lock(&event_mutex);
9427 mutex_lock(&trace_types_lock);
9431 /* Making sure trace array exists before destroying it. */
9432 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9433 if (tr == this_tr) {
9434 ret = __remove_instance(tr);
9439 mutex_unlock(&trace_types_lock);
9440 mutex_unlock(&event_mutex);
9444 EXPORT_SYMBOL_GPL(trace_array_destroy);
9446 static int instance_rmdir(const char *name)
9448 struct trace_array *tr;
9451 mutex_lock(&event_mutex);
9452 mutex_lock(&trace_types_lock);
9455 tr = trace_array_find(name);
9457 ret = __remove_instance(tr);
9459 mutex_unlock(&trace_types_lock);
9460 mutex_unlock(&event_mutex);
9465 static __init void create_trace_instances(struct dentry *d_tracer)
9467 struct trace_array *tr;
9469 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9472 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9475 mutex_lock(&event_mutex);
9476 mutex_lock(&trace_types_lock);
9478 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9481 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9482 "Failed to create instance directory\n"))
9486 mutex_unlock(&trace_types_lock);
9487 mutex_unlock(&event_mutex);
9491 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9493 struct trace_event_file *file;
9496 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9497 tr, &show_traces_fops);
9499 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9500 tr, &set_tracer_fops);
9502 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9503 tr, &tracing_cpumask_fops);
9505 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9506 tr, &tracing_iter_fops);
9508 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9511 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9512 tr, &tracing_pipe_fops);
9514 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9515 tr, &tracing_entries_fops);
9517 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9518 tr, &tracing_total_entries_fops);
9520 trace_create_file("free_buffer", 0200, d_tracer,
9521 tr, &tracing_free_buffer_fops);
9523 trace_create_file("trace_marker", 0220, d_tracer,
9524 tr, &tracing_mark_fops);
9526 file = __find_event_file(tr, "ftrace", "print");
9527 if (file && file->dir)
9528 trace_create_file("trigger", TRACE_MODE_WRITE, file->dir,
9529 file, &event_trigger_fops);
9530 tr->trace_marker_file = file;
9532 trace_create_file("trace_marker_raw", 0220, d_tracer,
9533 tr, &tracing_mark_raw_fops);
9535 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9538 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9539 tr, &rb_simple_fops);
9541 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9542 &trace_time_stamp_mode_fops);
9544 tr->buffer_percent = 50;
9546 trace_create_file("buffer_percent", TRACE_MODE_READ, d_tracer,
9547 tr, &buffer_percent_fops);
9549 create_trace_options_dir(tr);
9551 trace_create_maxlat_file(tr, d_tracer);
9553 if (ftrace_create_function_files(tr, d_tracer))
9554 MEM_FAIL(1, "Could not allocate function filter files");
9556 #ifdef CONFIG_TRACER_SNAPSHOT
9557 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9558 tr, &snapshot_fops);
9561 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9562 tr, &tracing_err_log_fops);
9564 for_each_tracing_cpu(cpu)
9565 tracing_init_tracefs_percpu(tr, cpu);
9567 ftrace_init_tracefs(tr, d_tracer);
9570 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9572 struct vfsmount *mnt;
9573 struct file_system_type *type;
9576 * To maintain backward compatibility for tools that mount
9577 * debugfs to get to the tracing facility, tracefs is automatically
9578 * mounted to the debugfs/tracing directory.
9580 type = get_fs_type("tracefs");
9583 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9584 put_filesystem(type);
9593 * tracing_init_dentry - initialize top level trace array
9595 * This is called when creating files or directories in the tracing
9596 * directory. It is called via fs_initcall() by any of the boot up code
9597 * and expects to return the dentry of the top level tracing directory.
9599 int tracing_init_dentry(void)
9601 struct trace_array *tr = &global_trace;
9603 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9604 pr_warn("Tracing disabled due to lockdown\n");
9608 /* The top level trace array uses NULL as parent */
9612 if (WARN_ON(!tracefs_initialized()))
9616 * As there may still be users that expect the tracing
9617 * files to exist in debugfs/tracing, we must automount
9618 * the tracefs file system there, so older tools still
9619 * work with the newer kernel.
9621 tr->dir = debugfs_create_automount("tracing", NULL,
9622 trace_automount, NULL);
9627 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9628 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9630 static struct workqueue_struct *eval_map_wq __initdata;
9631 static struct work_struct eval_map_work __initdata;
9633 static void __init eval_map_work_func(struct work_struct *work)
9637 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9638 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9641 static int __init trace_eval_init(void)
9643 INIT_WORK(&eval_map_work, eval_map_work_func);
9645 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9647 pr_err("Unable to allocate eval_map_wq\n");
9649 eval_map_work_func(&eval_map_work);
9653 queue_work(eval_map_wq, &eval_map_work);
9657 static int __init trace_eval_sync(void)
9659 /* Make sure the eval map updates are finished */
9661 destroy_workqueue(eval_map_wq);
9665 late_initcall_sync(trace_eval_sync);
9668 #ifdef CONFIG_MODULES
9669 static void trace_module_add_evals(struct module *mod)
9671 if (!mod->num_trace_evals)
9675 * Modules with bad taint do not have events created, do
9676 * not bother with enums either.
9678 if (trace_module_has_bad_taint(mod))
9681 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9684 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9685 static void trace_module_remove_evals(struct module *mod)
9687 union trace_eval_map_item *map;
9688 union trace_eval_map_item **last = &trace_eval_maps;
9690 if (!mod->num_trace_evals)
9693 mutex_lock(&trace_eval_mutex);
9695 map = trace_eval_maps;
9698 if (map->head.mod == mod)
9700 map = trace_eval_jmp_to_tail(map);
9701 last = &map->tail.next;
9702 map = map->tail.next;
9707 *last = trace_eval_jmp_to_tail(map)->tail.next;
9710 mutex_unlock(&trace_eval_mutex);
9713 static inline void trace_module_remove_evals(struct module *mod) { }
9714 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9716 static int trace_module_notify(struct notifier_block *self,
9717 unsigned long val, void *data)
9719 struct module *mod = data;
9722 case MODULE_STATE_COMING:
9723 trace_module_add_evals(mod);
9725 case MODULE_STATE_GOING:
9726 trace_module_remove_evals(mod);
9733 static struct notifier_block trace_module_nb = {
9734 .notifier_call = trace_module_notify,
9737 #endif /* CONFIG_MODULES */
9739 static __init int tracer_init_tracefs(void)
9743 trace_access_lock_init();
9745 ret = tracing_init_dentry();
9751 init_tracer_tracefs(&global_trace, NULL);
9752 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9754 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
9755 &global_trace, &tracing_thresh_fops);
9757 trace_create_file("README", TRACE_MODE_READ, NULL,
9758 NULL, &tracing_readme_fops);
9760 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
9761 NULL, &tracing_saved_cmdlines_fops);
9763 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
9764 NULL, &tracing_saved_cmdlines_size_fops);
9766 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
9767 NULL, &tracing_saved_tgids_fops);
9771 trace_create_eval_file(NULL);
9773 #ifdef CONFIG_MODULES
9774 register_module_notifier(&trace_module_nb);
9777 #ifdef CONFIG_DYNAMIC_FTRACE
9778 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
9779 NULL, &tracing_dyn_info_fops);
9782 create_trace_instances(NULL);
9784 update_tracer_options(&global_trace);
9789 fs_initcall(tracer_init_tracefs);
9791 static int trace_panic_handler(struct notifier_block *this,
9792 unsigned long event, void *unused)
9794 if (ftrace_dump_on_oops)
9795 ftrace_dump(ftrace_dump_on_oops);
9799 static struct notifier_block trace_panic_notifier = {
9800 .notifier_call = trace_panic_handler,
9802 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9805 static int trace_die_handler(struct notifier_block *self,
9811 if (ftrace_dump_on_oops)
9812 ftrace_dump(ftrace_dump_on_oops);
9820 static struct notifier_block trace_die_notifier = {
9821 .notifier_call = trace_die_handler,
9826 * printk is set to max of 1024, we really don't need it that big.
9827 * Nothing should be printing 1000 characters anyway.
9829 #define TRACE_MAX_PRINT 1000
9832 * Define here KERN_TRACE so that we have one place to modify
9833 * it if we decide to change what log level the ftrace dump
9836 #define KERN_TRACE KERN_EMERG
9839 trace_printk_seq(struct trace_seq *s)
9841 /* Probably should print a warning here. */
9842 if (s->seq.len >= TRACE_MAX_PRINT)
9843 s->seq.len = TRACE_MAX_PRINT;
9846 * More paranoid code. Although the buffer size is set to
9847 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9848 * an extra layer of protection.
9850 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9851 s->seq.len = s->seq.size - 1;
9853 /* should be zero ended, but we are paranoid. */
9854 s->buffer[s->seq.len] = 0;
9856 printk(KERN_TRACE "%s", s->buffer);
9861 void trace_init_global_iter(struct trace_iterator *iter)
9863 iter->tr = &global_trace;
9864 iter->trace = iter->tr->current_trace;
9865 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9866 iter->array_buffer = &global_trace.array_buffer;
9868 if (iter->trace && iter->trace->open)
9869 iter->trace->open(iter);
9871 /* Annotate start of buffers if we had overruns */
9872 if (ring_buffer_overruns(iter->array_buffer->buffer))
9873 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9875 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9876 if (trace_clocks[iter->tr->clock_id].in_ns)
9877 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9879 /* Can not use kmalloc for iter.temp and iter.fmt */
9880 iter->temp = static_temp_buf;
9881 iter->temp_size = STATIC_TEMP_BUF_SIZE;
9882 iter->fmt = static_fmt_buf;
9883 iter->fmt_size = STATIC_FMT_BUF_SIZE;
9886 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9888 /* use static because iter can be a bit big for the stack */
9889 static struct trace_iterator iter;
9890 static atomic_t dump_running;
9891 struct trace_array *tr = &global_trace;
9892 unsigned int old_userobj;
9893 unsigned long flags;
9896 /* Only allow one dump user at a time. */
9897 if (atomic_inc_return(&dump_running) != 1) {
9898 atomic_dec(&dump_running);
9903 * Always turn off tracing when we dump.
9904 * We don't need to show trace output of what happens
9905 * between multiple crashes.
9907 * If the user does a sysrq-z, then they can re-enable
9908 * tracing with echo 1 > tracing_on.
9912 local_irq_save(flags);
9914 /* Simulate the iterator */
9915 trace_init_global_iter(&iter);
9917 for_each_tracing_cpu(cpu) {
9918 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9921 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9923 /* don't look at user memory in panic mode */
9924 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9926 switch (oops_dump_mode) {
9928 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9931 iter.cpu_file = raw_smp_processor_id();
9936 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9937 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9940 printk(KERN_TRACE "Dumping ftrace buffer:\n");
9942 /* Did function tracer already get disabled? */
9943 if (ftrace_is_dead()) {
9944 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9945 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9949 * We need to stop all tracing on all CPUS to read
9950 * the next buffer. This is a bit expensive, but is
9951 * not done often. We fill all what we can read,
9952 * and then release the locks again.
9955 while (!trace_empty(&iter)) {
9958 printk(KERN_TRACE "---------------------------------\n");
9962 trace_iterator_reset(&iter);
9963 iter.iter_flags |= TRACE_FILE_LAT_FMT;
9965 if (trace_find_next_entry_inc(&iter) != NULL) {
9968 ret = print_trace_line(&iter);
9969 if (ret != TRACE_TYPE_NO_CONSUME)
9970 trace_consume(&iter);
9972 touch_nmi_watchdog();
9974 trace_printk_seq(&iter.seq);
9978 printk(KERN_TRACE " (ftrace buffer empty)\n");
9980 printk(KERN_TRACE "---------------------------------\n");
9983 tr->trace_flags |= old_userobj;
9985 for_each_tracing_cpu(cpu) {
9986 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9988 atomic_dec(&dump_running);
9989 local_irq_restore(flags);
9991 EXPORT_SYMBOL_GPL(ftrace_dump);
9993 #define WRITE_BUFSIZE 4096
9995 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9996 size_t count, loff_t *ppos,
9997 int (*createfn)(const char *))
9999 char *kbuf, *buf, *tmp;
10004 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
10008 while (done < count) {
10009 size = count - done;
10011 if (size >= WRITE_BUFSIZE)
10012 size = WRITE_BUFSIZE - 1;
10014 if (copy_from_user(kbuf, buffer + done, size)) {
10021 tmp = strchr(buf, '\n');
10024 size = tmp - buf + 1;
10026 size = strlen(buf);
10027 if (done + size < count) {
10030 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10031 pr_warn("Line length is too long: Should be less than %d\n",
10032 WRITE_BUFSIZE - 2);
10039 /* Remove comments */
10040 tmp = strchr(buf, '#');
10045 ret = createfn(buf);
10050 } while (done < count);
10060 __init static int tracer_alloc_buffers(void)
10066 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10067 pr_warn("Tracing disabled due to lockdown\n");
10072 * Make sure we don't accidentally add more trace options
10073 * than we have bits for.
10075 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10077 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10080 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10081 goto out_free_buffer_mask;
10083 /* Only allocate trace_printk buffers if a trace_printk exists */
10084 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10085 /* Must be called before global_trace.buffer is allocated */
10086 trace_printk_init_buffers();
10088 /* To save memory, keep the ring buffer size to its minimum */
10089 if (ring_buffer_expanded)
10090 ring_buf_size = trace_buf_size;
10094 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10095 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10097 raw_spin_lock_init(&global_trace.start_lock);
10100 * The prepare callbacks allocates some memory for the ring buffer. We
10101 * don't free the buffer if the CPU goes down. If we were to free
10102 * the buffer, then the user would lose any trace that was in the
10103 * buffer. The memory will be removed once the "instance" is removed.
10105 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10106 "trace/RB:preapre", trace_rb_cpu_prepare,
10109 goto out_free_cpumask;
10110 /* Used for event triggers */
10112 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10114 goto out_rm_hp_state;
10116 if (trace_create_savedcmd() < 0)
10117 goto out_free_temp_buffer;
10119 /* TODO: make the number of buffers hot pluggable with CPUS */
10120 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10121 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10122 goto out_free_savedcmd;
10125 if (global_trace.buffer_disabled)
10128 if (trace_boot_clock) {
10129 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10131 pr_warn("Trace clock %s not defined, going back to default\n",
10136 * register_tracer() might reference current_trace, so it
10137 * needs to be set before we register anything. This is
10138 * just a bootstrap of current_trace anyway.
10140 global_trace.current_trace = &nop_trace;
10142 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10144 ftrace_init_global_array_ops(&global_trace);
10146 init_trace_flags_index(&global_trace);
10148 register_tracer(&nop_trace);
10150 /* Function tracing may start here (via kernel command line) */
10151 init_function_trace();
10153 /* All seems OK, enable tracing */
10154 tracing_disabled = 0;
10156 atomic_notifier_chain_register(&panic_notifier_list,
10157 &trace_panic_notifier);
10159 register_die_notifier(&trace_die_notifier);
10161 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10163 INIT_LIST_HEAD(&global_trace.systems);
10164 INIT_LIST_HEAD(&global_trace.events);
10165 INIT_LIST_HEAD(&global_trace.hist_vars);
10166 INIT_LIST_HEAD(&global_trace.err_log);
10167 list_add(&global_trace.list, &ftrace_trace_arrays);
10169 apply_trace_boot_options();
10171 register_snapshot_cmd();
10178 free_saved_cmdlines_buffer(savedcmd);
10179 out_free_temp_buffer:
10180 ring_buffer_free(temp_buffer);
10182 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10184 free_cpumask_var(global_trace.tracing_cpumask);
10185 out_free_buffer_mask:
10186 free_cpumask_var(tracing_buffer_mask);
10191 void __init early_trace_init(void)
10193 if (tracepoint_printk) {
10194 tracepoint_print_iter =
10195 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10196 if (MEM_FAIL(!tracepoint_print_iter,
10197 "Failed to allocate trace iterator\n"))
10198 tracepoint_printk = 0;
10200 static_key_enable(&tracepoint_printk_key.key);
10202 tracer_alloc_buffers();
10205 void __init trace_init(void)
10207 trace_event_init();
10210 __init static void clear_boot_tracer(void)
10213 * The default tracer at boot buffer is an init section.
10214 * This function is called in lateinit. If we did not
10215 * find the boot tracer, then clear it out, to prevent
10216 * later registration from accessing the buffer that is
10217 * about to be freed.
10219 if (!default_bootup_tracer)
10222 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10223 default_bootup_tracer);
10224 default_bootup_tracer = NULL;
10227 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10228 __init static void tracing_set_default_clock(void)
10230 /* sched_clock_stable() is determined in late_initcall */
10231 if (!trace_boot_clock && !sched_clock_stable()) {
10232 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10233 pr_warn("Can not set tracing clock due to lockdown\n");
10237 printk(KERN_WARNING
10238 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10239 "If you want to keep using the local clock, then add:\n"
10240 " \"trace_clock=local\"\n"
10241 "on the kernel command line\n");
10242 tracing_set_clock(&global_trace, "global");
10246 static inline void tracing_set_default_clock(void) { }
10249 __init static int late_trace_init(void)
10251 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10252 static_key_disable(&tracepoint_printk_key.key);
10253 tracepoint_printk = 0;
10256 tracing_set_default_clock();
10257 clear_boot_tracer();
10261 late_initcall_sync(late_trace_init);