1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/panic_notifier.h>
43 #include <linux/poll.h>
44 #include <linux/nmi.h>
46 #include <linux/trace.h>
47 #include <linux/sched/clock.h>
48 #include <linux/sched/rt.h>
49 #include <linux/fsnotify.h>
50 #include <linux/irq_work.h>
51 #include <linux/workqueue.h>
54 #include "trace_output.h"
57 * On boot up, the ring buffer is set to the minimum size, so that
58 * we do not waste memory on systems that are not using tracing.
60 bool ring_buffer_expanded;
63 * We need to change this state when a selftest is running.
64 * A selftest will lurk into the ring-buffer to count the
65 * entries inserted during the selftest although some concurrent
66 * insertions into the ring-buffer such as trace_printk could occurred
67 * at the same time, giving false positive or negative results.
69 static bool __read_mostly tracing_selftest_running;
72 * If boot-time tracing including tracers/events via kernel cmdline
73 * is running, we do not want to run SELFTEST.
75 bool __read_mostly tracing_selftest_disabled;
77 #ifdef CONFIG_FTRACE_STARTUP_TEST
78 void __init disable_tracing_selftest(const char *reason)
80 if (!tracing_selftest_disabled) {
81 tracing_selftest_disabled = true;
82 pr_info("Ftrace startup test is disabled due to %s\n", reason);
87 /* Pipe tracepoints to printk */
88 struct trace_iterator *tracepoint_print_iter;
89 int tracepoint_printk;
90 static bool tracepoint_printk_stop_on_boot __initdata;
91 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
93 /* For tracers that don't implement custom flags */
94 static struct tracer_opt dummy_tracer_opt[] = {
99 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
105 * To prevent the comm cache from being overwritten when no
106 * tracing is active, only save the comm when a trace event
109 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
112 * Kill all tracing for good (never come back).
113 * It is initialized to 1 but will turn to zero if the initialization
114 * of the tracer is successful. But that is the only place that sets
117 static int tracing_disabled = 1;
119 cpumask_var_t __read_mostly tracing_buffer_mask;
122 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
124 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
125 * is set, then ftrace_dump is called. This will output the contents
126 * of the ftrace buffers to the console. This is very useful for
127 * capturing traces that lead to crashes and outputing it to a
130 * It is default off, but you can enable it with either specifying
131 * "ftrace_dump_on_oops" in the kernel command line, or setting
132 * /proc/sys/kernel/ftrace_dump_on_oops
133 * Set 1 if you want to dump buffers of all CPUs
134 * Set 2 if you want to dump the buffer of the CPU that triggered oops
137 enum ftrace_dump_mode ftrace_dump_on_oops;
139 /* When set, tracing will stop when a WARN*() is hit */
140 int __disable_trace_on_warning;
142 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
143 /* Map of enums to their values, for "eval_map" file */
144 struct trace_eval_map_head {
146 unsigned long length;
149 union trace_eval_map_item;
151 struct trace_eval_map_tail {
153 * "end" is first and points to NULL as it must be different
154 * than "mod" or "eval_string"
156 union trace_eval_map_item *next;
157 const char *end; /* points to NULL */
160 static DEFINE_MUTEX(trace_eval_mutex);
163 * The trace_eval_maps are saved in an array with two extra elements,
164 * one at the beginning, and one at the end. The beginning item contains
165 * the count of the saved maps (head.length), and the module they
166 * belong to if not built in (head.mod). The ending item contains a
167 * pointer to the next array of saved eval_map items.
169 union trace_eval_map_item {
170 struct trace_eval_map map;
171 struct trace_eval_map_head head;
172 struct trace_eval_map_tail tail;
175 static union trace_eval_map_item *trace_eval_maps;
176 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
178 int tracing_set_tracer(struct trace_array *tr, const char *buf);
179 static void ftrace_trace_userstack(struct trace_array *tr,
180 struct trace_buffer *buffer,
181 unsigned int trace_ctx);
183 #define MAX_TRACER_SIZE 100
184 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
185 static char *default_bootup_tracer;
187 static bool allocate_snapshot;
188 static bool snapshot_at_boot;
190 static int __init set_cmdline_ftrace(char *str)
192 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
193 default_bootup_tracer = bootup_tracer_buf;
194 /* We are using ftrace early, expand it */
195 ring_buffer_expanded = true;
198 __setup("ftrace=", set_cmdline_ftrace);
200 static int __init set_ftrace_dump_on_oops(char *str)
202 if (*str++ != '=' || !*str || !strcmp("1", str)) {
203 ftrace_dump_on_oops = DUMP_ALL;
207 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
208 ftrace_dump_on_oops = DUMP_ORIG;
214 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
216 static int __init stop_trace_on_warning(char *str)
218 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
219 __disable_trace_on_warning = 1;
222 __setup("traceoff_on_warning", stop_trace_on_warning);
224 static int __init boot_alloc_snapshot(char *str)
226 allocate_snapshot = true;
227 /* We also need the main ring buffer expanded */
228 ring_buffer_expanded = true;
231 __setup("alloc_snapshot", boot_alloc_snapshot);
234 static int __init boot_snapshot(char *str)
236 snapshot_at_boot = true;
237 boot_alloc_snapshot(str);
240 __setup("ftrace_boot_snapshot", boot_snapshot);
243 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
245 static int __init set_trace_boot_options(char *str)
247 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
250 __setup("trace_options=", set_trace_boot_options);
252 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
253 static char *trace_boot_clock __initdata;
255 static int __init set_trace_boot_clock(char *str)
257 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
258 trace_boot_clock = trace_boot_clock_buf;
261 __setup("trace_clock=", set_trace_boot_clock);
263 static int __init set_tracepoint_printk(char *str)
265 /* Ignore the "tp_printk_stop_on_boot" param */
269 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
270 tracepoint_printk = 1;
273 __setup("tp_printk", set_tracepoint_printk);
275 static int __init set_tracepoint_printk_stop(char *str)
277 tracepoint_printk_stop_on_boot = true;
280 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
282 unsigned long long ns2usecs(u64 nsec)
290 trace_process_export(struct trace_export *export,
291 struct ring_buffer_event *event, int flag)
293 struct trace_entry *entry;
294 unsigned int size = 0;
296 if (export->flags & flag) {
297 entry = ring_buffer_event_data(event);
298 size = ring_buffer_event_length(event);
299 export->write(export, entry, size);
303 static DEFINE_MUTEX(ftrace_export_lock);
305 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
307 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
308 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
309 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
311 static inline void ftrace_exports_enable(struct trace_export *export)
313 if (export->flags & TRACE_EXPORT_FUNCTION)
314 static_branch_inc(&trace_function_exports_enabled);
316 if (export->flags & TRACE_EXPORT_EVENT)
317 static_branch_inc(&trace_event_exports_enabled);
319 if (export->flags & TRACE_EXPORT_MARKER)
320 static_branch_inc(&trace_marker_exports_enabled);
323 static inline void ftrace_exports_disable(struct trace_export *export)
325 if (export->flags & TRACE_EXPORT_FUNCTION)
326 static_branch_dec(&trace_function_exports_enabled);
328 if (export->flags & TRACE_EXPORT_EVENT)
329 static_branch_dec(&trace_event_exports_enabled);
331 if (export->flags & TRACE_EXPORT_MARKER)
332 static_branch_dec(&trace_marker_exports_enabled);
335 static void ftrace_exports(struct ring_buffer_event *event, int flag)
337 struct trace_export *export;
339 preempt_disable_notrace();
341 export = rcu_dereference_raw_check(ftrace_exports_list);
343 trace_process_export(export, event, flag);
344 export = rcu_dereference_raw_check(export->next);
347 preempt_enable_notrace();
351 add_trace_export(struct trace_export **list, struct trace_export *export)
353 rcu_assign_pointer(export->next, *list);
355 * We are entering export into the list but another
356 * CPU might be walking that list. We need to make sure
357 * the export->next pointer is valid before another CPU sees
358 * the export pointer included into the list.
360 rcu_assign_pointer(*list, export);
364 rm_trace_export(struct trace_export **list, struct trace_export *export)
366 struct trace_export **p;
368 for (p = list; *p != NULL; p = &(*p)->next)
375 rcu_assign_pointer(*p, (*p)->next);
381 add_ftrace_export(struct trace_export **list, struct trace_export *export)
383 ftrace_exports_enable(export);
385 add_trace_export(list, export);
389 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
393 ret = rm_trace_export(list, export);
394 ftrace_exports_disable(export);
399 int register_ftrace_export(struct trace_export *export)
401 if (WARN_ON_ONCE(!export->write))
404 mutex_lock(&ftrace_export_lock);
406 add_ftrace_export(&ftrace_exports_list, export);
408 mutex_unlock(&ftrace_export_lock);
412 EXPORT_SYMBOL_GPL(register_ftrace_export);
414 int unregister_ftrace_export(struct trace_export *export)
418 mutex_lock(&ftrace_export_lock);
420 ret = rm_ftrace_export(&ftrace_exports_list, export);
422 mutex_unlock(&ftrace_export_lock);
426 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
428 /* trace_flags holds trace_options default values */
429 #define TRACE_DEFAULT_FLAGS \
430 (FUNCTION_DEFAULT_FLAGS | \
431 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
432 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
433 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
434 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
437 /* trace_options that are only supported by global_trace */
438 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
439 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
441 /* trace_flags that are default zero for instances */
442 #define ZEROED_TRACE_FLAGS \
443 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
446 * The global_trace is the descriptor that holds the top-level tracing
447 * buffers for the live tracing.
449 static struct trace_array global_trace = {
450 .trace_flags = TRACE_DEFAULT_FLAGS,
453 LIST_HEAD(ftrace_trace_arrays);
455 int trace_array_get(struct trace_array *this_tr)
457 struct trace_array *tr;
460 mutex_lock(&trace_types_lock);
461 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
468 mutex_unlock(&trace_types_lock);
473 static void __trace_array_put(struct trace_array *this_tr)
475 WARN_ON(!this_tr->ref);
480 * trace_array_put - Decrement the reference counter for this trace array.
481 * @this_tr : pointer to the trace array
483 * NOTE: Use this when we no longer need the trace array returned by
484 * trace_array_get_by_name(). This ensures the trace array can be later
488 void trace_array_put(struct trace_array *this_tr)
493 mutex_lock(&trace_types_lock);
494 __trace_array_put(this_tr);
495 mutex_unlock(&trace_types_lock);
497 EXPORT_SYMBOL_GPL(trace_array_put);
499 int tracing_check_open_get_tr(struct trace_array *tr)
503 ret = security_locked_down(LOCKDOWN_TRACEFS);
507 if (tracing_disabled)
510 if (tr && trace_array_get(tr) < 0)
516 int call_filter_check_discard(struct trace_event_call *call, void *rec,
517 struct trace_buffer *buffer,
518 struct ring_buffer_event *event)
520 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
521 !filter_match_preds(call->filter, rec)) {
522 __trace_event_discard_commit(buffer, event);
530 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
531 * @filtered_pids: The list of pids to check
532 * @search_pid: The PID to find in @filtered_pids
534 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
537 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
539 return trace_pid_list_is_set(filtered_pids, search_pid);
543 * trace_ignore_this_task - should a task be ignored for tracing
544 * @filtered_pids: The list of pids to check
545 * @filtered_no_pids: The list of pids not to be traced
546 * @task: The task that should be ignored if not filtered
548 * Checks if @task should be traced or not from @filtered_pids.
549 * Returns true if @task should *NOT* be traced.
550 * Returns false if @task should be traced.
553 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
554 struct trace_pid_list *filtered_no_pids,
555 struct task_struct *task)
558 * If filtered_no_pids is not empty, and the task's pid is listed
559 * in filtered_no_pids, then return true.
560 * Otherwise, if filtered_pids is empty, that means we can
561 * trace all tasks. If it has content, then only trace pids
562 * within filtered_pids.
565 return (filtered_pids &&
566 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
568 trace_find_filtered_pid(filtered_no_pids, task->pid));
572 * trace_filter_add_remove_task - Add or remove a task from a pid_list
573 * @pid_list: The list to modify
574 * @self: The current task for fork or NULL for exit
575 * @task: The task to add or remove
577 * If adding a task, if @self is defined, the task is only added if @self
578 * is also included in @pid_list. This happens on fork and tasks should
579 * only be added when the parent is listed. If @self is NULL, then the
580 * @task pid will be removed from the list, which would happen on exit
583 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
584 struct task_struct *self,
585 struct task_struct *task)
590 /* For forks, we only add if the forking task is listed */
592 if (!trace_find_filtered_pid(pid_list, self->pid))
596 /* "self" is set for forks, and NULL for exits */
598 trace_pid_list_set(pid_list, task->pid);
600 trace_pid_list_clear(pid_list, task->pid);
604 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
605 * @pid_list: The pid list to show
606 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
607 * @pos: The position of the file
609 * This is used by the seq_file "next" operation to iterate the pids
610 * listed in a trace_pid_list structure.
612 * Returns the pid+1 as we want to display pid of zero, but NULL would
613 * stop the iteration.
615 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
617 long pid = (unsigned long)v;
622 /* pid already is +1 of the actual previous bit */
623 if (trace_pid_list_next(pid_list, pid, &next) < 0)
628 /* Return pid + 1 to allow zero to be represented */
629 return (void *)(pid + 1);
633 * trace_pid_start - Used for seq_file to start reading pid lists
634 * @pid_list: The pid list to show
635 * @pos: The position of the file
637 * This is used by seq_file "start" operation to start the iteration
640 * Returns the pid+1 as we want to display pid of zero, but NULL would
641 * stop the iteration.
643 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
649 if (trace_pid_list_first(pid_list, &first) < 0)
654 /* Return pid + 1 so that zero can be the exit value */
655 for (pid++; pid && l < *pos;
656 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
662 * trace_pid_show - show the current pid in seq_file processing
663 * @m: The seq_file structure to write into
664 * @v: A void pointer of the pid (+1) value to display
666 * Can be directly used by seq_file operations to display the current
669 int trace_pid_show(struct seq_file *m, void *v)
671 unsigned long pid = (unsigned long)v - 1;
673 seq_printf(m, "%lu\n", pid);
677 /* 128 should be much more than enough */
678 #define PID_BUF_SIZE 127
680 int trace_pid_write(struct trace_pid_list *filtered_pids,
681 struct trace_pid_list **new_pid_list,
682 const char __user *ubuf, size_t cnt)
684 struct trace_pid_list *pid_list;
685 struct trace_parser parser;
693 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
697 * Always recreate a new array. The write is an all or nothing
698 * operation. Always create a new array when adding new pids by
699 * the user. If the operation fails, then the current list is
702 pid_list = trace_pid_list_alloc();
704 trace_parser_put(&parser);
709 /* copy the current bits to the new max */
710 ret = trace_pid_list_first(filtered_pids, &pid);
712 trace_pid_list_set(pid_list, pid);
713 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
723 ret = trace_get_user(&parser, ubuf, cnt, &pos);
731 if (!trace_parser_loaded(&parser))
735 if (kstrtoul(parser.buffer, 0, &val))
740 if (trace_pid_list_set(pid_list, pid) < 0) {
746 trace_parser_clear(&parser);
749 trace_parser_put(&parser);
752 trace_pid_list_free(pid_list);
757 /* Cleared the list of pids */
758 trace_pid_list_free(pid_list);
762 *new_pid_list = pid_list;
767 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
771 /* Early boot up does not have a buffer yet */
773 return trace_clock_local();
775 ts = ring_buffer_time_stamp(buf->buffer);
776 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
781 u64 ftrace_now(int cpu)
783 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
787 * tracing_is_enabled - Show if global_trace has been enabled
789 * Shows if the global trace has been enabled or not. It uses the
790 * mirror flag "buffer_disabled" to be used in fast paths such as for
791 * the irqsoff tracer. But it may be inaccurate due to races. If you
792 * need to know the accurate state, use tracing_is_on() which is a little
793 * slower, but accurate.
795 int tracing_is_enabled(void)
798 * For quick access (irqsoff uses this in fast path), just
799 * return the mirror variable of the state of the ring buffer.
800 * It's a little racy, but we don't really care.
803 return !global_trace.buffer_disabled;
807 * trace_buf_size is the size in bytes that is allocated
808 * for a buffer. Note, the number of bytes is always rounded
811 * This number is purposely set to a low number of 16384.
812 * If the dump on oops happens, it will be much appreciated
813 * to not have to wait for all that output. Anyway this can be
814 * boot time and run time configurable.
816 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
818 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
820 /* trace_types holds a link list of available tracers. */
821 static struct tracer *trace_types __read_mostly;
824 * trace_types_lock is used to protect the trace_types list.
826 DEFINE_MUTEX(trace_types_lock);
829 * serialize the access of the ring buffer
831 * ring buffer serializes readers, but it is low level protection.
832 * The validity of the events (which returns by ring_buffer_peek() ..etc)
833 * are not protected by ring buffer.
835 * The content of events may become garbage if we allow other process consumes
836 * these events concurrently:
837 * A) the page of the consumed events may become a normal page
838 * (not reader page) in ring buffer, and this page will be rewritten
839 * by events producer.
840 * B) The page of the consumed events may become a page for splice_read,
841 * and this page will be returned to system.
843 * These primitives allow multi process access to different cpu ring buffer
846 * These primitives don't distinguish read-only and read-consume access.
847 * Multi read-only access are also serialized.
851 static DECLARE_RWSEM(all_cpu_access_lock);
852 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
854 static inline void trace_access_lock(int cpu)
856 if (cpu == RING_BUFFER_ALL_CPUS) {
857 /* gain it for accessing the whole ring buffer. */
858 down_write(&all_cpu_access_lock);
860 /* gain it for accessing a cpu ring buffer. */
862 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
863 down_read(&all_cpu_access_lock);
865 /* Secondly block other access to this @cpu ring buffer. */
866 mutex_lock(&per_cpu(cpu_access_lock, cpu));
870 static inline void trace_access_unlock(int cpu)
872 if (cpu == RING_BUFFER_ALL_CPUS) {
873 up_write(&all_cpu_access_lock);
875 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
876 up_read(&all_cpu_access_lock);
880 static inline void trace_access_lock_init(void)
884 for_each_possible_cpu(cpu)
885 mutex_init(&per_cpu(cpu_access_lock, cpu));
890 static DEFINE_MUTEX(access_lock);
892 static inline void trace_access_lock(int cpu)
895 mutex_lock(&access_lock);
898 static inline void trace_access_unlock(int cpu)
901 mutex_unlock(&access_lock);
904 static inline void trace_access_lock_init(void)
910 #ifdef CONFIG_STACKTRACE
911 static void __ftrace_trace_stack(struct trace_buffer *buffer,
912 unsigned int trace_ctx,
913 int skip, struct pt_regs *regs);
914 static inline void ftrace_trace_stack(struct trace_array *tr,
915 struct trace_buffer *buffer,
916 unsigned int trace_ctx,
917 int skip, struct pt_regs *regs);
920 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
921 unsigned int trace_ctx,
922 int skip, struct pt_regs *regs)
925 static inline void ftrace_trace_stack(struct trace_array *tr,
926 struct trace_buffer *buffer,
927 unsigned long trace_ctx,
928 int skip, struct pt_regs *regs)
934 static __always_inline void
935 trace_event_setup(struct ring_buffer_event *event,
936 int type, unsigned int trace_ctx)
938 struct trace_entry *ent = ring_buffer_event_data(event);
940 tracing_generic_entry_update(ent, type, trace_ctx);
943 static __always_inline struct ring_buffer_event *
944 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
947 unsigned int trace_ctx)
949 struct ring_buffer_event *event;
951 event = ring_buffer_lock_reserve(buffer, len);
953 trace_event_setup(event, type, trace_ctx);
958 void tracer_tracing_on(struct trace_array *tr)
960 if (tr->array_buffer.buffer)
961 ring_buffer_record_on(tr->array_buffer.buffer);
963 * This flag is looked at when buffers haven't been allocated
964 * yet, or by some tracers (like irqsoff), that just want to
965 * know if the ring buffer has been disabled, but it can handle
966 * races of where it gets disabled but we still do a record.
967 * As the check is in the fast path of the tracers, it is more
968 * important to be fast than accurate.
970 tr->buffer_disabled = 0;
971 /* Make the flag seen by readers */
976 * tracing_on - enable tracing buffers
978 * This function enables tracing buffers that may have been
979 * disabled with tracing_off.
981 void tracing_on(void)
983 tracer_tracing_on(&global_trace);
985 EXPORT_SYMBOL_GPL(tracing_on);
988 static __always_inline void
989 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
991 __this_cpu_write(trace_taskinfo_save, true);
993 /* If this is the temp buffer, we need to commit fully */
994 if (this_cpu_read(trace_buffered_event) == event) {
995 /* Length is in event->array[0] */
996 ring_buffer_write(buffer, event->array[0], &event->array[1]);
997 /* Release the temp buffer */
998 this_cpu_dec(trace_buffered_event_cnt);
999 /* ring_buffer_unlock_commit() enables preemption */
1000 preempt_enable_notrace();
1002 ring_buffer_unlock_commit(buffer, event);
1005 int __trace_array_puts(struct trace_array *tr, unsigned long ip,
1006 const char *str, int size)
1008 struct ring_buffer_event *event;
1009 struct trace_buffer *buffer;
1010 struct print_entry *entry;
1011 unsigned int trace_ctx;
1014 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1017 if (unlikely(tracing_selftest_running || tracing_disabled))
1020 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1022 trace_ctx = tracing_gen_ctx();
1023 buffer = tr->array_buffer.buffer;
1024 ring_buffer_nest_start(buffer);
1025 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1032 entry = ring_buffer_event_data(event);
1035 memcpy(&entry->buf, str, size);
1037 /* Add a newline if necessary */
1038 if (entry->buf[size - 1] != '\n') {
1039 entry->buf[size] = '\n';
1040 entry->buf[size + 1] = '\0';
1042 entry->buf[size] = '\0';
1044 __buffer_unlock_commit(buffer, event);
1045 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1047 ring_buffer_nest_end(buffer);
1050 EXPORT_SYMBOL_GPL(__trace_array_puts);
1053 * __trace_puts - write a constant string into the trace buffer.
1054 * @ip: The address of the caller
1055 * @str: The constant string to write
1056 * @size: The size of the string.
1058 int __trace_puts(unsigned long ip, const char *str, int size)
1060 return __trace_array_puts(&global_trace, ip, str, size);
1062 EXPORT_SYMBOL_GPL(__trace_puts);
1065 * __trace_bputs - write the pointer to a constant string into trace buffer
1066 * @ip: The address of the caller
1067 * @str: The constant string to write to the buffer to
1069 int __trace_bputs(unsigned long ip, const char *str)
1071 struct ring_buffer_event *event;
1072 struct trace_buffer *buffer;
1073 struct bputs_entry *entry;
1074 unsigned int trace_ctx;
1075 int size = sizeof(struct bputs_entry);
1078 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1081 if (unlikely(tracing_selftest_running || tracing_disabled))
1084 trace_ctx = tracing_gen_ctx();
1085 buffer = global_trace.array_buffer.buffer;
1087 ring_buffer_nest_start(buffer);
1088 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1093 entry = ring_buffer_event_data(event);
1097 __buffer_unlock_commit(buffer, event);
1098 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1102 ring_buffer_nest_end(buffer);
1105 EXPORT_SYMBOL_GPL(__trace_bputs);
1107 #ifdef CONFIG_TRACER_SNAPSHOT
1108 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1111 struct tracer *tracer = tr->current_trace;
1112 unsigned long flags;
1115 trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1116 trace_array_puts(tr, "*** snapshot is being ignored ***\n");
1120 if (!tr->allocated_snapshot) {
1121 trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
1122 trace_array_puts(tr, "*** stopping trace here! ***\n");
1123 tracer_tracing_off(tr);
1127 /* Note, snapshot can not be used when the tracer uses it */
1128 if (tracer->use_max_tr) {
1129 trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
1130 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1134 local_irq_save(flags);
1135 update_max_tr(tr, current, smp_processor_id(), cond_data);
1136 local_irq_restore(flags);
1139 void tracing_snapshot_instance(struct trace_array *tr)
1141 tracing_snapshot_instance_cond(tr, NULL);
1145 * tracing_snapshot - take a snapshot of the current buffer.
1147 * This causes a swap between the snapshot buffer and the current live
1148 * tracing buffer. You can use this to take snapshots of the live
1149 * trace when some condition is triggered, but continue to trace.
1151 * Note, make sure to allocate the snapshot with either
1152 * a tracing_snapshot_alloc(), or by doing it manually
1153 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1155 * If the snapshot buffer is not allocated, it will stop tracing.
1156 * Basically making a permanent snapshot.
1158 void tracing_snapshot(void)
1160 struct trace_array *tr = &global_trace;
1162 tracing_snapshot_instance(tr);
1164 EXPORT_SYMBOL_GPL(tracing_snapshot);
1167 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1168 * @tr: The tracing instance to snapshot
1169 * @cond_data: The data to be tested conditionally, and possibly saved
1171 * This is the same as tracing_snapshot() except that the snapshot is
1172 * conditional - the snapshot will only happen if the
1173 * cond_snapshot.update() implementation receiving the cond_data
1174 * returns true, which means that the trace array's cond_snapshot
1175 * update() operation used the cond_data to determine whether the
1176 * snapshot should be taken, and if it was, presumably saved it along
1177 * with the snapshot.
1179 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1181 tracing_snapshot_instance_cond(tr, cond_data);
1183 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1186 * tracing_cond_snapshot_data - get the user data associated with a snapshot
1187 * @tr: The tracing instance
1189 * When the user enables a conditional snapshot using
1190 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1191 * with the snapshot. This accessor is used to retrieve it.
1193 * Should not be called from cond_snapshot.update(), since it takes
1194 * the tr->max_lock lock, which the code calling
1195 * cond_snapshot.update() has already done.
1197 * Returns the cond_data associated with the trace array's snapshot.
1199 void *tracing_cond_snapshot_data(struct trace_array *tr)
1201 void *cond_data = NULL;
1203 local_irq_disable();
1204 arch_spin_lock(&tr->max_lock);
1206 if (tr->cond_snapshot)
1207 cond_data = tr->cond_snapshot->cond_data;
1209 arch_spin_unlock(&tr->max_lock);
1214 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1216 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1217 struct array_buffer *size_buf, int cpu_id);
1218 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1220 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1224 if (!tr->allocated_snapshot) {
1226 /* allocate spare buffer */
1227 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1228 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1232 tr->allocated_snapshot = true;
1238 static void free_snapshot(struct trace_array *tr)
1241 * We don't free the ring buffer. instead, resize it because
1242 * The max_tr ring buffer has some state (e.g. ring->clock) and
1243 * we want preserve it.
1245 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1246 set_buffer_entries(&tr->max_buffer, 1);
1247 tracing_reset_online_cpus(&tr->max_buffer);
1248 tr->allocated_snapshot = false;
1252 * tracing_alloc_snapshot - allocate snapshot buffer.
1254 * This only allocates the snapshot buffer if it isn't already
1255 * allocated - it doesn't also take a snapshot.
1257 * This is meant to be used in cases where the snapshot buffer needs
1258 * to be set up for events that can't sleep but need to be able to
1259 * trigger a snapshot.
1261 int tracing_alloc_snapshot(void)
1263 struct trace_array *tr = &global_trace;
1266 ret = tracing_alloc_snapshot_instance(tr);
1271 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1274 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1276 * This is similar to tracing_snapshot(), but it will allocate the
1277 * snapshot buffer if it isn't already allocated. Use this only
1278 * where it is safe to sleep, as the allocation may sleep.
1280 * This causes a swap between the snapshot buffer and the current live
1281 * tracing buffer. You can use this to take snapshots of the live
1282 * trace when some condition is triggered, but continue to trace.
1284 void tracing_snapshot_alloc(void)
1288 ret = tracing_alloc_snapshot();
1294 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1297 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1298 * @tr: The tracing instance
1299 * @cond_data: User data to associate with the snapshot
1300 * @update: Implementation of the cond_snapshot update function
1302 * Check whether the conditional snapshot for the given instance has
1303 * already been enabled, or if the current tracer is already using a
1304 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1305 * save the cond_data and update function inside.
1307 * Returns 0 if successful, error otherwise.
1309 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1310 cond_update_fn_t update)
1312 struct cond_snapshot *cond_snapshot;
1315 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1319 cond_snapshot->cond_data = cond_data;
1320 cond_snapshot->update = update;
1322 mutex_lock(&trace_types_lock);
1324 ret = tracing_alloc_snapshot_instance(tr);
1328 if (tr->current_trace->use_max_tr) {
1334 * The cond_snapshot can only change to NULL without the
1335 * trace_types_lock. We don't care if we race with it going
1336 * to NULL, but we want to make sure that it's not set to
1337 * something other than NULL when we get here, which we can
1338 * do safely with only holding the trace_types_lock and not
1339 * having to take the max_lock.
1341 if (tr->cond_snapshot) {
1346 local_irq_disable();
1347 arch_spin_lock(&tr->max_lock);
1348 tr->cond_snapshot = cond_snapshot;
1349 arch_spin_unlock(&tr->max_lock);
1352 mutex_unlock(&trace_types_lock);
1357 mutex_unlock(&trace_types_lock);
1358 kfree(cond_snapshot);
1361 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1364 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1365 * @tr: The tracing instance
1367 * Check whether the conditional snapshot for the given instance is
1368 * enabled; if so, free the cond_snapshot associated with it,
1369 * otherwise return -EINVAL.
1371 * Returns 0 if successful, error otherwise.
1373 int tracing_snapshot_cond_disable(struct trace_array *tr)
1377 local_irq_disable();
1378 arch_spin_lock(&tr->max_lock);
1380 if (!tr->cond_snapshot)
1383 kfree(tr->cond_snapshot);
1384 tr->cond_snapshot = NULL;
1387 arch_spin_unlock(&tr->max_lock);
1392 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1394 void tracing_snapshot(void)
1396 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1398 EXPORT_SYMBOL_GPL(tracing_snapshot);
1399 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1401 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1403 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1404 int tracing_alloc_snapshot(void)
1406 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1409 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1410 void tracing_snapshot_alloc(void)
1415 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1416 void *tracing_cond_snapshot_data(struct trace_array *tr)
1420 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1421 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1425 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1426 int tracing_snapshot_cond_disable(struct trace_array *tr)
1430 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1431 #define free_snapshot(tr) do { } while (0)
1432 #endif /* CONFIG_TRACER_SNAPSHOT */
1434 void tracer_tracing_off(struct trace_array *tr)
1436 if (tr->array_buffer.buffer)
1437 ring_buffer_record_off(tr->array_buffer.buffer);
1439 * This flag is looked at when buffers haven't been allocated
1440 * yet, or by some tracers (like irqsoff), that just want to
1441 * know if the ring buffer has been disabled, but it can handle
1442 * races of where it gets disabled but we still do a record.
1443 * As the check is in the fast path of the tracers, it is more
1444 * important to be fast than accurate.
1446 tr->buffer_disabled = 1;
1447 /* Make the flag seen by readers */
1452 * tracing_off - turn off tracing buffers
1454 * This function stops the tracing buffers from recording data.
1455 * It does not disable any overhead the tracers themselves may
1456 * be causing. This function simply causes all recording to
1457 * the ring buffers to fail.
1459 void tracing_off(void)
1461 tracer_tracing_off(&global_trace);
1463 EXPORT_SYMBOL_GPL(tracing_off);
1465 void disable_trace_on_warning(void)
1467 if (__disable_trace_on_warning) {
1468 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1469 "Disabling tracing due to warning\n");
1475 * tracer_tracing_is_on - show real state of ring buffer enabled
1476 * @tr : the trace array to know if ring buffer is enabled
1478 * Shows real state of the ring buffer if it is enabled or not.
1480 bool tracer_tracing_is_on(struct trace_array *tr)
1482 if (tr->array_buffer.buffer)
1483 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1484 return !tr->buffer_disabled;
1488 * tracing_is_on - show state of ring buffers enabled
1490 int tracing_is_on(void)
1492 return tracer_tracing_is_on(&global_trace);
1494 EXPORT_SYMBOL_GPL(tracing_is_on);
1496 static int __init set_buf_size(char *str)
1498 unsigned long buf_size;
1502 buf_size = memparse(str, &str);
1504 * nr_entries can not be zero and the startup
1505 * tests require some buffer space. Therefore
1506 * ensure we have at least 4096 bytes of buffer.
1508 trace_buf_size = max(4096UL, buf_size);
1511 __setup("trace_buf_size=", set_buf_size);
1513 static int __init set_tracing_thresh(char *str)
1515 unsigned long threshold;
1520 ret = kstrtoul(str, 0, &threshold);
1523 tracing_thresh = threshold * 1000;
1526 __setup("tracing_thresh=", set_tracing_thresh);
1528 unsigned long nsecs_to_usecs(unsigned long nsecs)
1530 return nsecs / 1000;
1534 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1535 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1536 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1537 * of strings in the order that the evals (enum) were defined.
1542 /* These must match the bit positions in trace_iterator_flags */
1543 static const char *trace_options[] = {
1551 int in_ns; /* is this clock in nanoseconds? */
1552 } trace_clocks[] = {
1553 { trace_clock_local, "local", 1 },
1554 { trace_clock_global, "global", 1 },
1555 { trace_clock_counter, "counter", 0 },
1556 { trace_clock_jiffies, "uptime", 0 },
1557 { trace_clock, "perf", 1 },
1558 { ktime_get_mono_fast_ns, "mono", 1 },
1559 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1560 { ktime_get_boot_fast_ns, "boot", 1 },
1561 { ktime_get_tai_fast_ns, "tai", 1 },
1565 bool trace_clock_in_ns(struct trace_array *tr)
1567 if (trace_clocks[tr->clock_id].in_ns)
1574 * trace_parser_get_init - gets the buffer for trace parser
1576 int trace_parser_get_init(struct trace_parser *parser, int size)
1578 memset(parser, 0, sizeof(*parser));
1580 parser->buffer = kmalloc(size, GFP_KERNEL);
1581 if (!parser->buffer)
1584 parser->size = size;
1589 * trace_parser_put - frees the buffer for trace parser
1591 void trace_parser_put(struct trace_parser *parser)
1593 kfree(parser->buffer);
1594 parser->buffer = NULL;
1598 * trace_get_user - reads the user input string separated by space
1599 * (matched by isspace(ch))
1601 * For each string found the 'struct trace_parser' is updated,
1602 * and the function returns.
1604 * Returns number of bytes read.
1606 * See kernel/trace/trace.h for 'struct trace_parser' details.
1608 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1609 size_t cnt, loff_t *ppos)
1616 trace_parser_clear(parser);
1618 ret = get_user(ch, ubuf++);
1626 * The parser is not finished with the last write,
1627 * continue reading the user input without skipping spaces.
1629 if (!parser->cont) {
1630 /* skip white space */
1631 while (cnt && isspace(ch)) {
1632 ret = get_user(ch, ubuf++);
1641 /* only spaces were written */
1642 if (isspace(ch) || !ch) {
1649 /* read the non-space input */
1650 while (cnt && !isspace(ch) && ch) {
1651 if (parser->idx < parser->size - 1)
1652 parser->buffer[parser->idx++] = ch;
1657 ret = get_user(ch, ubuf++);
1664 /* We either got finished input or we have to wait for another call. */
1665 if (isspace(ch) || !ch) {
1666 parser->buffer[parser->idx] = 0;
1667 parser->cont = false;
1668 } else if (parser->idx < parser->size - 1) {
1669 parser->cont = true;
1670 parser->buffer[parser->idx++] = ch;
1671 /* Make sure the parsed string always terminates with '\0'. */
1672 parser->buffer[parser->idx] = 0;
1685 /* TODO add a seq_buf_to_buffer() */
1686 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1690 if (trace_seq_used(s) <= s->seq.readpos)
1693 len = trace_seq_used(s) - s->seq.readpos;
1696 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1698 s->seq.readpos += cnt;
1702 unsigned long __read_mostly tracing_thresh;
1704 #ifdef CONFIG_TRACER_MAX_TRACE
1705 static const struct file_operations tracing_max_lat_fops;
1707 #ifdef LATENCY_FS_NOTIFY
1709 static struct workqueue_struct *fsnotify_wq;
1711 static void latency_fsnotify_workfn(struct work_struct *work)
1713 struct trace_array *tr = container_of(work, struct trace_array,
1715 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1718 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1720 struct trace_array *tr = container_of(iwork, struct trace_array,
1722 queue_work(fsnotify_wq, &tr->fsnotify_work);
1725 static void trace_create_maxlat_file(struct trace_array *tr,
1726 struct dentry *d_tracer)
1728 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1729 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1730 tr->d_max_latency = trace_create_file("tracing_max_latency",
1733 &tracing_max_lat_fops);
1736 __init static int latency_fsnotify_init(void)
1738 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1739 WQ_UNBOUND | WQ_HIGHPRI, 0);
1741 pr_err("Unable to allocate tr_max_lat_wq\n");
1747 late_initcall_sync(latency_fsnotify_init);
1749 void latency_fsnotify(struct trace_array *tr)
1754 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1755 * possible that we are called from __schedule() or do_idle(), which
1756 * could cause a deadlock.
1758 irq_work_queue(&tr->fsnotify_irqwork);
1761 #else /* !LATENCY_FS_NOTIFY */
1763 #define trace_create_maxlat_file(tr, d_tracer) \
1764 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1765 d_tracer, tr, &tracing_max_lat_fops)
1770 * Copy the new maximum trace into the separate maximum-trace
1771 * structure. (this way the maximum trace is permanently saved,
1772 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1775 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1777 struct array_buffer *trace_buf = &tr->array_buffer;
1778 struct array_buffer *max_buf = &tr->max_buffer;
1779 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1780 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1783 max_buf->time_start = data->preempt_timestamp;
1785 max_data->saved_latency = tr->max_latency;
1786 max_data->critical_start = data->critical_start;
1787 max_data->critical_end = data->critical_end;
1789 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1790 max_data->pid = tsk->pid;
1792 * If tsk == current, then use current_uid(), as that does not use
1793 * RCU. The irq tracer can be called out of RCU scope.
1796 max_data->uid = current_uid();
1798 max_data->uid = task_uid(tsk);
1800 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1801 max_data->policy = tsk->policy;
1802 max_data->rt_priority = tsk->rt_priority;
1804 /* record this tasks comm */
1805 tracing_record_cmdline(tsk);
1806 latency_fsnotify(tr);
1810 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1812 * @tsk: the task with the latency
1813 * @cpu: The cpu that initiated the trace.
1814 * @cond_data: User data associated with a conditional snapshot
1816 * Flip the buffers between the @tr and the max_tr and record information
1817 * about which task was the cause of this latency.
1820 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1826 WARN_ON_ONCE(!irqs_disabled());
1828 if (!tr->allocated_snapshot) {
1829 /* Only the nop tracer should hit this when disabling */
1830 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1834 arch_spin_lock(&tr->max_lock);
1836 /* Inherit the recordable setting from array_buffer */
1837 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1838 ring_buffer_record_on(tr->max_buffer.buffer);
1840 ring_buffer_record_off(tr->max_buffer.buffer);
1842 #ifdef CONFIG_TRACER_SNAPSHOT
1843 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1844 arch_spin_unlock(&tr->max_lock);
1848 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1850 __update_max_tr(tr, tsk, cpu);
1852 arch_spin_unlock(&tr->max_lock);
1856 * update_max_tr_single - only copy one trace over, and reset the rest
1858 * @tsk: task with the latency
1859 * @cpu: the cpu of the buffer to copy.
1861 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1864 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1871 WARN_ON_ONCE(!irqs_disabled());
1872 if (!tr->allocated_snapshot) {
1873 /* Only the nop tracer should hit this when disabling */
1874 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1878 arch_spin_lock(&tr->max_lock);
1880 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1882 if (ret == -EBUSY) {
1884 * We failed to swap the buffer due to a commit taking
1885 * place on this CPU. We fail to record, but we reset
1886 * the max trace buffer (no one writes directly to it)
1887 * and flag that it failed.
1888 * Another reason is resize is in progress.
1890 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1891 "Failed to swap buffers due to commit or resize in progress\n");
1894 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1896 __update_max_tr(tr, tsk, cpu);
1897 arch_spin_unlock(&tr->max_lock);
1900 #endif /* CONFIG_TRACER_MAX_TRACE */
1902 static int wait_on_pipe(struct trace_iterator *iter, int full)
1904 /* Iterators are static, they should be filled or empty */
1905 if (trace_buffer_iter(iter, iter->cpu_file))
1908 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1912 #ifdef CONFIG_FTRACE_STARTUP_TEST
1913 static bool selftests_can_run;
1915 struct trace_selftests {
1916 struct list_head list;
1917 struct tracer *type;
1920 static LIST_HEAD(postponed_selftests);
1922 static int save_selftest(struct tracer *type)
1924 struct trace_selftests *selftest;
1926 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1930 selftest->type = type;
1931 list_add(&selftest->list, &postponed_selftests);
1935 static int run_tracer_selftest(struct tracer *type)
1937 struct trace_array *tr = &global_trace;
1938 struct tracer *saved_tracer = tr->current_trace;
1941 if (!type->selftest || tracing_selftest_disabled)
1945 * If a tracer registers early in boot up (before scheduling is
1946 * initialized and such), then do not run its selftests yet.
1947 * Instead, run it a little later in the boot process.
1949 if (!selftests_can_run)
1950 return save_selftest(type);
1952 if (!tracing_is_on()) {
1953 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1959 * Run a selftest on this tracer.
1960 * Here we reset the trace buffer, and set the current
1961 * tracer to be this tracer. The tracer can then run some
1962 * internal tracing to verify that everything is in order.
1963 * If we fail, we do not register this tracer.
1965 tracing_reset_online_cpus(&tr->array_buffer);
1967 tr->current_trace = type;
1969 #ifdef CONFIG_TRACER_MAX_TRACE
1970 if (type->use_max_tr) {
1971 /* If we expanded the buffers, make sure the max is expanded too */
1972 if (ring_buffer_expanded)
1973 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1974 RING_BUFFER_ALL_CPUS);
1975 tr->allocated_snapshot = true;
1979 /* the test is responsible for initializing and enabling */
1980 pr_info("Testing tracer %s: ", type->name);
1981 ret = type->selftest(type, tr);
1982 /* the test is responsible for resetting too */
1983 tr->current_trace = saved_tracer;
1985 printk(KERN_CONT "FAILED!\n");
1986 /* Add the warning after printing 'FAILED' */
1990 /* Only reset on passing, to avoid touching corrupted buffers */
1991 tracing_reset_online_cpus(&tr->array_buffer);
1993 #ifdef CONFIG_TRACER_MAX_TRACE
1994 if (type->use_max_tr) {
1995 tr->allocated_snapshot = false;
1997 /* Shrink the max buffer again */
1998 if (ring_buffer_expanded)
1999 ring_buffer_resize(tr->max_buffer.buffer, 1,
2000 RING_BUFFER_ALL_CPUS);
2004 printk(KERN_CONT "PASSED\n");
2008 static __init int init_trace_selftests(void)
2010 struct trace_selftests *p, *n;
2011 struct tracer *t, **last;
2014 selftests_can_run = true;
2016 mutex_lock(&trace_types_lock);
2018 if (list_empty(&postponed_selftests))
2021 pr_info("Running postponed tracer tests:\n");
2023 tracing_selftest_running = true;
2024 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2025 /* This loop can take minutes when sanitizers are enabled, so
2026 * lets make sure we allow RCU processing.
2029 ret = run_tracer_selftest(p->type);
2030 /* If the test fails, then warn and remove from available_tracers */
2032 WARN(1, "tracer: %s failed selftest, disabling\n",
2034 last = &trace_types;
2035 for (t = trace_types; t; t = t->next) {
2046 tracing_selftest_running = false;
2049 mutex_unlock(&trace_types_lock);
2053 core_initcall(init_trace_selftests);
2055 static inline int run_tracer_selftest(struct tracer *type)
2059 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2061 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2063 static void __init apply_trace_boot_options(void);
2066 * register_tracer - register a tracer with the ftrace system.
2067 * @type: the plugin for the tracer
2069 * Register a new plugin tracer.
2071 int __init register_tracer(struct tracer *type)
2077 pr_info("Tracer must have a name\n");
2081 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2082 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2086 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2087 pr_warn("Can not register tracer %s due to lockdown\n",
2092 mutex_lock(&trace_types_lock);
2094 tracing_selftest_running = true;
2096 for (t = trace_types; t; t = t->next) {
2097 if (strcmp(type->name, t->name) == 0) {
2099 pr_info("Tracer %s already registered\n",
2106 if (!type->set_flag)
2107 type->set_flag = &dummy_set_flag;
2109 /*allocate a dummy tracer_flags*/
2110 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2115 type->flags->val = 0;
2116 type->flags->opts = dummy_tracer_opt;
2118 if (!type->flags->opts)
2119 type->flags->opts = dummy_tracer_opt;
2121 /* store the tracer for __set_tracer_option */
2122 type->flags->trace = type;
2124 ret = run_tracer_selftest(type);
2128 type->next = trace_types;
2130 add_tracer_options(&global_trace, type);
2133 tracing_selftest_running = false;
2134 mutex_unlock(&trace_types_lock);
2136 if (ret || !default_bootup_tracer)
2139 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2142 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2143 /* Do we want this tracer to start on bootup? */
2144 tracing_set_tracer(&global_trace, type->name);
2145 default_bootup_tracer = NULL;
2147 apply_trace_boot_options();
2149 /* disable other selftests, since this will break it. */
2150 disable_tracing_selftest("running a tracer");
2156 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2158 struct trace_buffer *buffer = buf->buffer;
2163 ring_buffer_record_disable(buffer);
2165 /* Make sure all commits have finished */
2167 ring_buffer_reset_cpu(buffer, cpu);
2169 ring_buffer_record_enable(buffer);
2172 void tracing_reset_online_cpus(struct array_buffer *buf)
2174 struct trace_buffer *buffer = buf->buffer;
2179 ring_buffer_record_disable(buffer);
2181 /* Make sure all commits have finished */
2184 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2186 ring_buffer_reset_online_cpus(buffer);
2188 ring_buffer_record_enable(buffer);
2191 /* Must have trace_types_lock held */
2192 void tracing_reset_all_online_cpus_unlocked(void)
2194 struct trace_array *tr;
2196 lockdep_assert_held(&trace_types_lock);
2198 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2199 if (!tr->clear_trace)
2201 tr->clear_trace = false;
2202 tracing_reset_online_cpus(&tr->array_buffer);
2203 #ifdef CONFIG_TRACER_MAX_TRACE
2204 tracing_reset_online_cpus(&tr->max_buffer);
2209 void tracing_reset_all_online_cpus(void)
2211 mutex_lock(&trace_types_lock);
2212 tracing_reset_all_online_cpus_unlocked();
2213 mutex_unlock(&trace_types_lock);
2217 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2218 * is the tgid last observed corresponding to pid=i.
2220 static int *tgid_map;
2222 /* The maximum valid index into tgid_map. */
2223 static size_t tgid_map_max;
2225 #define SAVED_CMDLINES_DEFAULT 128
2226 #define NO_CMDLINE_MAP UINT_MAX
2228 * Preemption must be disabled before acquiring trace_cmdline_lock.
2229 * The various trace_arrays' max_lock must be acquired in a context
2230 * where interrupt is disabled.
2232 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2233 struct saved_cmdlines_buffer {
2234 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2235 unsigned *map_cmdline_to_pid;
2236 unsigned cmdline_num;
2238 char *saved_cmdlines;
2240 static struct saved_cmdlines_buffer *savedcmd;
2242 static inline char *get_saved_cmdlines(int idx)
2244 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2247 static inline void set_cmdline(int idx, const char *cmdline)
2249 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2252 static int allocate_cmdlines_buffer(unsigned int val,
2253 struct saved_cmdlines_buffer *s)
2255 s->map_cmdline_to_pid = kmalloc_array(val,
2256 sizeof(*s->map_cmdline_to_pid),
2258 if (!s->map_cmdline_to_pid)
2261 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2262 if (!s->saved_cmdlines) {
2263 kfree(s->map_cmdline_to_pid);
2268 s->cmdline_num = val;
2269 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2270 sizeof(s->map_pid_to_cmdline));
2271 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2272 val * sizeof(*s->map_cmdline_to_pid));
2277 static int trace_create_savedcmd(void)
2281 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2285 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2295 int is_tracing_stopped(void)
2297 return global_trace.stop_count;
2301 * tracing_start - quick start of the tracer
2303 * If tracing is enabled but was stopped by tracing_stop,
2304 * this will start the tracer back up.
2306 void tracing_start(void)
2308 struct trace_buffer *buffer;
2309 unsigned long flags;
2311 if (tracing_disabled)
2314 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2315 if (--global_trace.stop_count) {
2316 if (global_trace.stop_count < 0) {
2317 /* Someone screwed up their debugging */
2319 global_trace.stop_count = 0;
2324 /* Prevent the buffers from switching */
2325 arch_spin_lock(&global_trace.max_lock);
2327 buffer = global_trace.array_buffer.buffer;
2329 ring_buffer_record_enable(buffer);
2331 #ifdef CONFIG_TRACER_MAX_TRACE
2332 buffer = global_trace.max_buffer.buffer;
2334 ring_buffer_record_enable(buffer);
2337 arch_spin_unlock(&global_trace.max_lock);
2340 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2343 static void tracing_start_tr(struct trace_array *tr)
2345 struct trace_buffer *buffer;
2346 unsigned long flags;
2348 if (tracing_disabled)
2351 /* If global, we need to also start the max tracer */
2352 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2353 return tracing_start();
2355 raw_spin_lock_irqsave(&tr->start_lock, flags);
2357 if (--tr->stop_count) {
2358 if (tr->stop_count < 0) {
2359 /* Someone screwed up their debugging */
2366 buffer = tr->array_buffer.buffer;
2368 ring_buffer_record_enable(buffer);
2371 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2375 * tracing_stop - quick stop of the tracer
2377 * Light weight way to stop tracing. Use in conjunction with
2380 void tracing_stop(void)
2382 struct trace_buffer *buffer;
2383 unsigned long flags;
2385 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2386 if (global_trace.stop_count++)
2389 /* Prevent the buffers from switching */
2390 arch_spin_lock(&global_trace.max_lock);
2392 buffer = global_trace.array_buffer.buffer;
2394 ring_buffer_record_disable(buffer);
2396 #ifdef CONFIG_TRACER_MAX_TRACE
2397 buffer = global_trace.max_buffer.buffer;
2399 ring_buffer_record_disable(buffer);
2402 arch_spin_unlock(&global_trace.max_lock);
2405 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2408 static void tracing_stop_tr(struct trace_array *tr)
2410 struct trace_buffer *buffer;
2411 unsigned long flags;
2413 /* If global, we need to also stop the max tracer */
2414 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2415 return tracing_stop();
2417 raw_spin_lock_irqsave(&tr->start_lock, flags);
2418 if (tr->stop_count++)
2421 buffer = tr->array_buffer.buffer;
2423 ring_buffer_record_disable(buffer);
2426 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2429 static int trace_save_cmdline(struct task_struct *tsk)
2433 /* treat recording of idle task as a success */
2437 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2440 * It's not the end of the world if we don't get
2441 * the lock, but we also don't want to spin
2442 * nor do we want to disable interrupts,
2443 * so if we miss here, then better luck next time.
2445 * This is called within the scheduler and wake up, so interrupts
2446 * had better been disabled and run queue lock been held.
2448 lockdep_assert_preemption_disabled();
2449 if (!arch_spin_trylock(&trace_cmdline_lock))
2452 idx = savedcmd->map_pid_to_cmdline[tpid];
2453 if (idx == NO_CMDLINE_MAP) {
2454 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2456 savedcmd->map_pid_to_cmdline[tpid] = idx;
2457 savedcmd->cmdline_idx = idx;
2460 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2461 set_cmdline(idx, tsk->comm);
2463 arch_spin_unlock(&trace_cmdline_lock);
2468 static void __trace_find_cmdline(int pid, char comm[])
2474 strcpy(comm, "<idle>");
2478 if (WARN_ON_ONCE(pid < 0)) {
2479 strcpy(comm, "<XXX>");
2483 tpid = pid & (PID_MAX_DEFAULT - 1);
2484 map = savedcmd->map_pid_to_cmdline[tpid];
2485 if (map != NO_CMDLINE_MAP) {
2486 tpid = savedcmd->map_cmdline_to_pid[map];
2488 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2492 strcpy(comm, "<...>");
2495 void trace_find_cmdline(int pid, char comm[])
2498 arch_spin_lock(&trace_cmdline_lock);
2500 __trace_find_cmdline(pid, comm);
2502 arch_spin_unlock(&trace_cmdline_lock);
2506 static int *trace_find_tgid_ptr(int pid)
2509 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2510 * if we observe a non-NULL tgid_map then we also observe the correct
2513 int *map = smp_load_acquire(&tgid_map);
2515 if (unlikely(!map || pid > tgid_map_max))
2521 int trace_find_tgid(int pid)
2523 int *ptr = trace_find_tgid_ptr(pid);
2525 return ptr ? *ptr : 0;
2528 static int trace_save_tgid(struct task_struct *tsk)
2532 /* treat recording of idle task as a success */
2536 ptr = trace_find_tgid_ptr(tsk->pid);
2544 static bool tracing_record_taskinfo_skip(int flags)
2546 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2548 if (!__this_cpu_read(trace_taskinfo_save))
2554 * tracing_record_taskinfo - record the task info of a task
2556 * @task: task to record
2557 * @flags: TRACE_RECORD_CMDLINE for recording comm
2558 * TRACE_RECORD_TGID for recording tgid
2560 void tracing_record_taskinfo(struct task_struct *task, int flags)
2564 if (tracing_record_taskinfo_skip(flags))
2568 * Record as much task information as possible. If some fail, continue
2569 * to try to record the others.
2571 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2572 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2574 /* If recording any information failed, retry again soon. */
2578 __this_cpu_write(trace_taskinfo_save, false);
2582 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2584 * @prev: previous task during sched_switch
2585 * @next: next task during sched_switch
2586 * @flags: TRACE_RECORD_CMDLINE for recording comm
2587 * TRACE_RECORD_TGID for recording tgid
2589 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2590 struct task_struct *next, int flags)
2594 if (tracing_record_taskinfo_skip(flags))
2598 * Record as much task information as possible. If some fail, continue
2599 * to try to record the others.
2601 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2602 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2603 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2604 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2606 /* If recording any information failed, retry again soon. */
2610 __this_cpu_write(trace_taskinfo_save, false);
2613 /* Helpers to record a specific task information */
2614 void tracing_record_cmdline(struct task_struct *task)
2616 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2619 void tracing_record_tgid(struct task_struct *task)
2621 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2625 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2626 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2627 * simplifies those functions and keeps them in sync.
2629 enum print_line_t trace_handle_return(struct trace_seq *s)
2631 return trace_seq_has_overflowed(s) ?
2632 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2634 EXPORT_SYMBOL_GPL(trace_handle_return);
2636 static unsigned short migration_disable_value(void)
2638 #if defined(CONFIG_SMP)
2639 return current->migration_disabled;
2645 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2647 unsigned int trace_flags = irqs_status;
2650 pc = preempt_count();
2653 trace_flags |= TRACE_FLAG_NMI;
2654 if (pc & HARDIRQ_MASK)
2655 trace_flags |= TRACE_FLAG_HARDIRQ;
2656 if (in_serving_softirq())
2657 trace_flags |= TRACE_FLAG_SOFTIRQ;
2658 if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2659 trace_flags |= TRACE_FLAG_BH_OFF;
2661 if (tif_need_resched())
2662 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2663 if (test_preempt_need_resched())
2664 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2665 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2666 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2669 struct ring_buffer_event *
2670 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2673 unsigned int trace_ctx)
2675 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2678 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2679 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2680 static int trace_buffered_event_ref;
2683 * trace_buffered_event_enable - enable buffering events
2685 * When events are being filtered, it is quicker to use a temporary
2686 * buffer to write the event data into if there's a likely chance
2687 * that it will not be committed. The discard of the ring buffer
2688 * is not as fast as committing, and is much slower than copying
2691 * When an event is to be filtered, allocate per cpu buffers to
2692 * write the event data into, and if the event is filtered and discarded
2693 * it is simply dropped, otherwise, the entire data is to be committed
2696 void trace_buffered_event_enable(void)
2698 struct ring_buffer_event *event;
2702 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2704 if (trace_buffered_event_ref++)
2707 for_each_tracing_cpu(cpu) {
2708 page = alloc_pages_node(cpu_to_node(cpu),
2709 GFP_KERNEL | __GFP_NORETRY, 0);
2713 event = page_address(page);
2714 memset(event, 0, sizeof(*event));
2716 per_cpu(trace_buffered_event, cpu) = event;
2719 if (cpu == smp_processor_id() &&
2720 __this_cpu_read(trace_buffered_event) !=
2721 per_cpu(trace_buffered_event, cpu))
2728 trace_buffered_event_disable();
2731 static void enable_trace_buffered_event(void *data)
2733 /* Probably not needed, but do it anyway */
2735 this_cpu_dec(trace_buffered_event_cnt);
2738 static void disable_trace_buffered_event(void *data)
2740 this_cpu_inc(trace_buffered_event_cnt);
2744 * trace_buffered_event_disable - disable buffering events
2746 * When a filter is removed, it is faster to not use the buffered
2747 * events, and to commit directly into the ring buffer. Free up
2748 * the temp buffers when there are no more users. This requires
2749 * special synchronization with current events.
2751 void trace_buffered_event_disable(void)
2755 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2757 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2760 if (--trace_buffered_event_ref)
2764 /* For each CPU, set the buffer as used. */
2765 smp_call_function_many(tracing_buffer_mask,
2766 disable_trace_buffered_event, NULL, 1);
2769 /* Wait for all current users to finish */
2772 for_each_tracing_cpu(cpu) {
2773 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2774 per_cpu(trace_buffered_event, cpu) = NULL;
2777 * Make sure trace_buffered_event is NULL before clearing
2778 * trace_buffered_event_cnt.
2783 /* Do the work on each cpu */
2784 smp_call_function_many(tracing_buffer_mask,
2785 enable_trace_buffered_event, NULL, 1);
2789 static struct trace_buffer *temp_buffer;
2791 struct ring_buffer_event *
2792 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2793 struct trace_event_file *trace_file,
2794 int type, unsigned long len,
2795 unsigned int trace_ctx)
2797 struct ring_buffer_event *entry;
2798 struct trace_array *tr = trace_file->tr;
2801 *current_rb = tr->array_buffer.buffer;
2803 if (!tr->no_filter_buffering_ref &&
2804 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2805 preempt_disable_notrace();
2807 * Filtering is on, so try to use the per cpu buffer first.
2808 * This buffer will simulate a ring_buffer_event,
2809 * where the type_len is zero and the array[0] will
2810 * hold the full length.
2811 * (see include/linux/ring-buffer.h for details on
2812 * how the ring_buffer_event is structured).
2814 * Using a temp buffer during filtering and copying it
2815 * on a matched filter is quicker than writing directly
2816 * into the ring buffer and then discarding it when
2817 * it doesn't match. That is because the discard
2818 * requires several atomic operations to get right.
2819 * Copying on match and doing nothing on a failed match
2820 * is still quicker than no copy on match, but having
2821 * to discard out of the ring buffer on a failed match.
2823 if ((entry = __this_cpu_read(trace_buffered_event))) {
2824 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2826 val = this_cpu_inc_return(trace_buffered_event_cnt);
2829 * Preemption is disabled, but interrupts and NMIs
2830 * can still come in now. If that happens after
2831 * the above increment, then it will have to go
2832 * back to the old method of allocating the event
2833 * on the ring buffer, and if the filter fails, it
2834 * will have to call ring_buffer_discard_commit()
2837 * Need to also check the unlikely case that the
2838 * length is bigger than the temp buffer size.
2839 * If that happens, then the reserve is pretty much
2840 * guaranteed to fail, as the ring buffer currently
2841 * only allows events less than a page. But that may
2842 * change in the future, so let the ring buffer reserve
2843 * handle the failure in that case.
2845 if (val == 1 && likely(len <= max_len)) {
2846 trace_event_setup(entry, type, trace_ctx);
2847 entry->array[0] = len;
2848 /* Return with preemption disabled */
2851 this_cpu_dec(trace_buffered_event_cnt);
2853 /* __trace_buffer_lock_reserve() disables preemption */
2854 preempt_enable_notrace();
2857 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2860 * If tracing is off, but we have triggers enabled
2861 * we still need to look at the event data. Use the temp_buffer
2862 * to store the trace event for the trigger to use. It's recursive
2863 * safe and will not be recorded anywhere.
2865 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2866 *current_rb = temp_buffer;
2867 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2872 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2874 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2875 static DEFINE_MUTEX(tracepoint_printk_mutex);
2877 static void output_printk(struct trace_event_buffer *fbuffer)
2879 struct trace_event_call *event_call;
2880 struct trace_event_file *file;
2881 struct trace_event *event;
2882 unsigned long flags;
2883 struct trace_iterator *iter = tracepoint_print_iter;
2885 /* We should never get here if iter is NULL */
2886 if (WARN_ON_ONCE(!iter))
2889 event_call = fbuffer->trace_file->event_call;
2890 if (!event_call || !event_call->event.funcs ||
2891 !event_call->event.funcs->trace)
2894 file = fbuffer->trace_file;
2895 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2896 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2897 !filter_match_preds(file->filter, fbuffer->entry)))
2900 event = &fbuffer->trace_file->event_call->event;
2902 raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2903 trace_seq_init(&iter->seq);
2904 iter->ent = fbuffer->entry;
2905 event_call->event.funcs->trace(iter, 0, event);
2906 trace_seq_putc(&iter->seq, 0);
2907 printk("%s", iter->seq.buffer);
2909 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2912 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2913 void *buffer, size_t *lenp,
2916 int save_tracepoint_printk;
2919 mutex_lock(&tracepoint_printk_mutex);
2920 save_tracepoint_printk = tracepoint_printk;
2922 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2925 * This will force exiting early, as tracepoint_printk
2926 * is always zero when tracepoint_printk_iter is not allocated
2928 if (!tracepoint_print_iter)
2929 tracepoint_printk = 0;
2931 if (save_tracepoint_printk == tracepoint_printk)
2934 if (tracepoint_printk)
2935 static_key_enable(&tracepoint_printk_key.key);
2937 static_key_disable(&tracepoint_printk_key.key);
2940 mutex_unlock(&tracepoint_printk_mutex);
2945 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2947 enum event_trigger_type tt = ETT_NONE;
2948 struct trace_event_file *file = fbuffer->trace_file;
2950 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2951 fbuffer->entry, &tt))
2954 if (static_key_false(&tracepoint_printk_key.key))
2955 output_printk(fbuffer);
2957 if (static_branch_unlikely(&trace_event_exports_enabled))
2958 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2960 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2961 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2965 event_triggers_post_call(file, tt);
2968 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2973 * trace_buffer_unlock_commit_regs()
2974 * trace_event_buffer_commit()
2975 * trace_event_raw_event_xxx()
2977 # define STACK_SKIP 3
2979 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2980 struct trace_buffer *buffer,
2981 struct ring_buffer_event *event,
2982 unsigned int trace_ctx,
2983 struct pt_regs *regs)
2985 __buffer_unlock_commit(buffer, event);
2988 * If regs is not set, then skip the necessary functions.
2989 * Note, we can still get here via blktrace, wakeup tracer
2990 * and mmiotrace, but that's ok if they lose a function or
2991 * two. They are not that meaningful.
2993 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2994 ftrace_trace_userstack(tr, buffer, trace_ctx);
2998 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
3001 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
3002 struct ring_buffer_event *event)
3004 __buffer_unlock_commit(buffer, event);
3008 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
3009 parent_ip, unsigned int trace_ctx)
3011 struct trace_event_call *call = &event_function;
3012 struct trace_buffer *buffer = tr->array_buffer.buffer;
3013 struct ring_buffer_event *event;
3014 struct ftrace_entry *entry;
3016 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
3020 entry = ring_buffer_event_data(event);
3022 entry->parent_ip = parent_ip;
3024 if (!call_filter_check_discard(call, entry, buffer, event)) {
3025 if (static_branch_unlikely(&trace_function_exports_enabled))
3026 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
3027 __buffer_unlock_commit(buffer, event);
3031 #ifdef CONFIG_STACKTRACE
3033 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
3034 #define FTRACE_KSTACK_NESTING 4
3036 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
3038 struct ftrace_stack {
3039 unsigned long calls[FTRACE_KSTACK_ENTRIES];
3043 struct ftrace_stacks {
3044 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
3047 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
3048 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
3050 static void __ftrace_trace_stack(struct trace_buffer *buffer,
3051 unsigned int trace_ctx,
3052 int skip, struct pt_regs *regs)
3054 struct trace_event_call *call = &event_kernel_stack;
3055 struct ring_buffer_event *event;
3056 unsigned int size, nr_entries;
3057 struct ftrace_stack *fstack;
3058 struct stack_entry *entry;
3062 * Add one, for this function and the call to save_stack_trace()
3063 * If regs is set, then these functions will not be in the way.
3065 #ifndef CONFIG_UNWINDER_ORC
3070 preempt_disable_notrace();
3072 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3074 /* This should never happen. If it does, yell once and skip */
3075 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3079 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3080 * interrupt will either see the value pre increment or post
3081 * increment. If the interrupt happens pre increment it will have
3082 * restored the counter when it returns. We just need a barrier to
3083 * keep gcc from moving things around.
3087 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3088 size = ARRAY_SIZE(fstack->calls);
3091 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3094 nr_entries = stack_trace_save(fstack->calls, size, skip);
3097 size = nr_entries * sizeof(unsigned long);
3098 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3099 (sizeof(*entry) - sizeof(entry->caller)) + size,
3103 entry = ring_buffer_event_data(event);
3105 memcpy(&entry->caller, fstack->calls, size);
3106 entry->size = nr_entries;
3108 if (!call_filter_check_discard(call, entry, buffer, event))
3109 __buffer_unlock_commit(buffer, event);
3112 /* Again, don't let gcc optimize things here */
3114 __this_cpu_dec(ftrace_stack_reserve);
3115 preempt_enable_notrace();
3119 static inline void ftrace_trace_stack(struct trace_array *tr,
3120 struct trace_buffer *buffer,
3121 unsigned int trace_ctx,
3122 int skip, struct pt_regs *regs)
3124 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3127 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3130 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3133 struct trace_buffer *buffer = tr->array_buffer.buffer;
3135 if (rcu_is_watching()) {
3136 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3141 * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
3142 * but if the above rcu_is_watching() failed, then the NMI
3143 * triggered someplace critical, and ct_irq_enter() should
3144 * not be called from NMI.
3146 if (unlikely(in_nmi()))
3149 ct_irq_enter_irqson();
3150 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3151 ct_irq_exit_irqson();
3155 * trace_dump_stack - record a stack back trace in the trace buffer
3156 * @skip: Number of functions to skip (helper handlers)
3158 void trace_dump_stack(int skip)
3160 if (tracing_disabled || tracing_selftest_running)
3163 #ifndef CONFIG_UNWINDER_ORC
3164 /* Skip 1 to skip this function. */
3167 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3168 tracing_gen_ctx(), skip, NULL);
3170 EXPORT_SYMBOL_GPL(trace_dump_stack);
3172 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3173 static DEFINE_PER_CPU(int, user_stack_count);
3176 ftrace_trace_userstack(struct trace_array *tr,
3177 struct trace_buffer *buffer, unsigned int trace_ctx)
3179 struct trace_event_call *call = &event_user_stack;
3180 struct ring_buffer_event *event;
3181 struct userstack_entry *entry;
3183 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3187 * NMIs can not handle page faults, even with fix ups.
3188 * The save user stack can (and often does) fault.
3190 if (unlikely(in_nmi()))
3194 * prevent recursion, since the user stack tracing may
3195 * trigger other kernel events.
3198 if (__this_cpu_read(user_stack_count))
3201 __this_cpu_inc(user_stack_count);
3203 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3204 sizeof(*entry), trace_ctx);
3206 goto out_drop_count;
3207 entry = ring_buffer_event_data(event);
3209 entry->tgid = current->tgid;
3210 memset(&entry->caller, 0, sizeof(entry->caller));
3212 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3213 if (!call_filter_check_discard(call, entry, buffer, event))
3214 __buffer_unlock_commit(buffer, event);
3217 __this_cpu_dec(user_stack_count);
3221 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3222 static void ftrace_trace_userstack(struct trace_array *tr,
3223 struct trace_buffer *buffer,
3224 unsigned int trace_ctx)
3227 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3229 #endif /* CONFIG_STACKTRACE */
3232 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3233 unsigned long long delta)
3235 entry->bottom_delta_ts = delta & U32_MAX;
3236 entry->top_delta_ts = (delta >> 32);
3239 void trace_last_func_repeats(struct trace_array *tr,
3240 struct trace_func_repeats *last_info,
3241 unsigned int trace_ctx)
3243 struct trace_buffer *buffer = tr->array_buffer.buffer;
3244 struct func_repeats_entry *entry;
3245 struct ring_buffer_event *event;
3248 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3249 sizeof(*entry), trace_ctx);
3253 delta = ring_buffer_event_time_stamp(buffer, event) -
3254 last_info->ts_last_call;
3256 entry = ring_buffer_event_data(event);
3257 entry->ip = last_info->ip;
3258 entry->parent_ip = last_info->parent_ip;
3259 entry->count = last_info->count;
3260 func_repeats_set_delta_ts(entry, delta);
3262 __buffer_unlock_commit(buffer, event);
3265 /* created for use with alloc_percpu */
3266 struct trace_buffer_struct {
3268 char buffer[4][TRACE_BUF_SIZE];
3271 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3274 * This allows for lockless recording. If we're nested too deeply, then
3275 * this returns NULL.
3277 static char *get_trace_buf(void)
3279 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3281 if (!trace_percpu_buffer || buffer->nesting >= 4)
3286 /* Interrupts must see nesting incremented before we use the buffer */
3288 return &buffer->buffer[buffer->nesting - 1][0];
3291 static void put_trace_buf(void)
3293 /* Don't let the decrement of nesting leak before this */
3295 this_cpu_dec(trace_percpu_buffer->nesting);
3298 static int alloc_percpu_trace_buffer(void)
3300 struct trace_buffer_struct __percpu *buffers;
3302 if (trace_percpu_buffer)
3305 buffers = alloc_percpu(struct trace_buffer_struct);
3306 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3309 trace_percpu_buffer = buffers;
3313 static int buffers_allocated;
3315 void trace_printk_init_buffers(void)
3317 if (buffers_allocated)
3320 if (alloc_percpu_trace_buffer())
3323 /* trace_printk() is for debug use only. Don't use it in production. */
3326 pr_warn("**********************************************************\n");
3327 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3329 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3331 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3332 pr_warn("** unsafe for production use. **\n");
3334 pr_warn("** If you see this message and you are not debugging **\n");
3335 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3337 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3338 pr_warn("**********************************************************\n");
3340 /* Expand the buffers to set size */
3341 tracing_update_buffers();
3343 buffers_allocated = 1;
3346 * trace_printk_init_buffers() can be called by modules.
3347 * If that happens, then we need to start cmdline recording
3348 * directly here. If the global_trace.buffer is already
3349 * allocated here, then this was called by module code.
3351 if (global_trace.array_buffer.buffer)
3352 tracing_start_cmdline_record();
3354 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3356 void trace_printk_start_comm(void)
3358 /* Start tracing comms if trace printk is set */
3359 if (!buffers_allocated)
3361 tracing_start_cmdline_record();
3364 static void trace_printk_start_stop_comm(int enabled)
3366 if (!buffers_allocated)
3370 tracing_start_cmdline_record();
3372 tracing_stop_cmdline_record();
3376 * trace_vbprintk - write binary msg to tracing buffer
3377 * @ip: The address of the caller
3378 * @fmt: The string format to write to the buffer
3379 * @args: Arguments for @fmt
3381 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3383 struct trace_event_call *call = &event_bprint;
3384 struct ring_buffer_event *event;
3385 struct trace_buffer *buffer;
3386 struct trace_array *tr = &global_trace;
3387 struct bprint_entry *entry;
3388 unsigned int trace_ctx;
3392 if (unlikely(tracing_selftest_running || tracing_disabled))
3395 /* Don't pollute graph traces with trace_vprintk internals */
3396 pause_graph_tracing();
3398 trace_ctx = tracing_gen_ctx();
3399 preempt_disable_notrace();
3401 tbuffer = get_trace_buf();
3407 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3409 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3412 size = sizeof(*entry) + sizeof(u32) * len;
3413 buffer = tr->array_buffer.buffer;
3414 ring_buffer_nest_start(buffer);
3415 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3419 entry = ring_buffer_event_data(event);
3423 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3424 if (!call_filter_check_discard(call, entry, buffer, event)) {
3425 __buffer_unlock_commit(buffer, event);
3426 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3430 ring_buffer_nest_end(buffer);
3435 preempt_enable_notrace();
3436 unpause_graph_tracing();
3440 EXPORT_SYMBOL_GPL(trace_vbprintk);
3444 __trace_array_vprintk(struct trace_buffer *buffer,
3445 unsigned long ip, const char *fmt, va_list args)
3447 struct trace_event_call *call = &event_print;
3448 struct ring_buffer_event *event;
3450 struct print_entry *entry;
3451 unsigned int trace_ctx;
3454 if (tracing_disabled || tracing_selftest_running)
3457 /* Don't pollute graph traces with trace_vprintk internals */
3458 pause_graph_tracing();
3460 trace_ctx = tracing_gen_ctx();
3461 preempt_disable_notrace();
3464 tbuffer = get_trace_buf();
3470 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3472 size = sizeof(*entry) + len + 1;
3473 ring_buffer_nest_start(buffer);
3474 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3478 entry = ring_buffer_event_data(event);
3481 memcpy(&entry->buf, tbuffer, len + 1);
3482 if (!call_filter_check_discard(call, entry, buffer, event)) {
3483 __buffer_unlock_commit(buffer, event);
3484 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3488 ring_buffer_nest_end(buffer);
3492 preempt_enable_notrace();
3493 unpause_graph_tracing();
3499 int trace_array_vprintk(struct trace_array *tr,
3500 unsigned long ip, const char *fmt, va_list args)
3502 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3506 * trace_array_printk - Print a message to a specific instance
3507 * @tr: The instance trace_array descriptor
3508 * @ip: The instruction pointer that this is called from.
3509 * @fmt: The format to print (printf format)
3511 * If a subsystem sets up its own instance, they have the right to
3512 * printk strings into their tracing instance buffer using this
3513 * function. Note, this function will not write into the top level
3514 * buffer (use trace_printk() for that), as writing into the top level
3515 * buffer should only have events that can be individually disabled.
3516 * trace_printk() is only used for debugging a kernel, and should not
3517 * be ever incorporated in normal use.
3519 * trace_array_printk() can be used, as it will not add noise to the
3520 * top level tracing buffer.
3522 * Note, trace_array_init_printk() must be called on @tr before this
3526 int trace_array_printk(struct trace_array *tr,
3527 unsigned long ip, const char *fmt, ...)
3535 /* This is only allowed for created instances */
3536 if (tr == &global_trace)
3539 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3543 ret = trace_array_vprintk(tr, ip, fmt, ap);
3547 EXPORT_SYMBOL_GPL(trace_array_printk);
3550 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3551 * @tr: The trace array to initialize the buffers for
3553 * As trace_array_printk() only writes into instances, they are OK to
3554 * have in the kernel (unlike trace_printk()). This needs to be called
3555 * before trace_array_printk() can be used on a trace_array.
3557 int trace_array_init_printk(struct trace_array *tr)
3562 /* This is only allowed for created instances */
3563 if (tr == &global_trace)
3566 return alloc_percpu_trace_buffer();
3568 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3571 int trace_array_printk_buf(struct trace_buffer *buffer,
3572 unsigned long ip, const char *fmt, ...)
3577 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3581 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3587 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3589 return trace_array_vprintk(&global_trace, ip, fmt, args);
3591 EXPORT_SYMBOL_GPL(trace_vprintk);
3593 static void trace_iterator_increment(struct trace_iterator *iter)
3595 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3599 ring_buffer_iter_advance(buf_iter);
3602 static struct trace_entry *
3603 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3604 unsigned long *lost_events)
3606 struct ring_buffer_event *event;
3607 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3610 event = ring_buffer_iter_peek(buf_iter, ts);
3612 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3613 (unsigned long)-1 : 0;
3615 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3620 iter->ent_size = ring_buffer_event_length(event);
3621 return ring_buffer_event_data(event);
3627 static struct trace_entry *
3628 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3629 unsigned long *missing_events, u64 *ent_ts)
3631 struct trace_buffer *buffer = iter->array_buffer->buffer;
3632 struct trace_entry *ent, *next = NULL;
3633 unsigned long lost_events = 0, next_lost = 0;
3634 int cpu_file = iter->cpu_file;
3635 u64 next_ts = 0, ts;
3641 * If we are in a per_cpu trace file, don't bother by iterating over
3642 * all cpu and peek directly.
3644 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3645 if (ring_buffer_empty_cpu(buffer, cpu_file))
3647 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3649 *ent_cpu = cpu_file;
3654 for_each_tracing_cpu(cpu) {
3656 if (ring_buffer_empty_cpu(buffer, cpu))
3659 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3662 * Pick the entry with the smallest timestamp:
3664 if (ent && (!next || ts < next_ts)) {
3668 next_lost = lost_events;
3669 next_size = iter->ent_size;
3673 iter->ent_size = next_size;
3676 *ent_cpu = next_cpu;
3682 *missing_events = next_lost;
3687 #define STATIC_FMT_BUF_SIZE 128
3688 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3690 static char *trace_iter_expand_format(struct trace_iterator *iter)
3695 * iter->tr is NULL when used with tp_printk, which makes
3696 * this get called where it is not safe to call krealloc().
3698 if (!iter->tr || iter->fmt == static_fmt_buf)
3701 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3704 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3711 /* Returns true if the string is safe to dereference from an event */
3712 static bool trace_safe_str(struct trace_iterator *iter, const char *str,
3715 unsigned long addr = (unsigned long)str;
3716 struct trace_event *trace_event;
3717 struct trace_event_call *event;
3719 /* Ignore strings with no length */
3723 /* OK if part of the event data */
3724 if ((addr >= (unsigned long)iter->ent) &&
3725 (addr < (unsigned long)iter->ent + iter->ent_size))
3728 /* OK if part of the temp seq buffer */
3729 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3730 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3733 /* Core rodata can not be freed */
3734 if (is_kernel_rodata(addr))
3737 if (trace_is_tracepoint_string(str))
3741 * Now this could be a module event, referencing core module
3742 * data, which is OK.
3747 trace_event = ftrace_find_event(iter->ent->type);
3751 event = container_of(trace_event, struct trace_event_call, event);
3752 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3755 /* Would rather have rodata, but this will suffice */
3756 if (within_module_core(addr, event->module))
3762 static const char *show_buffer(struct trace_seq *s)
3764 struct seq_buf *seq = &s->seq;
3766 seq_buf_terminate(seq);
3771 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3773 static int test_can_verify_check(const char *fmt, ...)
3780 * The verifier is dependent on vsnprintf() modifies the va_list
3781 * passed to it, where it is sent as a reference. Some architectures
3782 * (like x86_32) passes it by value, which means that vsnprintf()
3783 * does not modify the va_list passed to it, and the verifier
3784 * would then need to be able to understand all the values that
3785 * vsnprintf can use. If it is passed by value, then the verifier
3789 vsnprintf(buf, 16, "%d", ap);
3790 ret = va_arg(ap, int);
3796 static void test_can_verify(void)
3798 if (!test_can_verify_check("%d %d", 0, 1)) {
3799 pr_info("trace event string verifier disabled\n");
3800 static_branch_inc(&trace_no_verify);
3805 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3806 * @iter: The iterator that holds the seq buffer and the event being printed
3807 * @fmt: The format used to print the event
3808 * @ap: The va_list holding the data to print from @fmt.
3810 * This writes the data into the @iter->seq buffer using the data from
3811 * @fmt and @ap. If the format has a %s, then the source of the string
3812 * is examined to make sure it is safe to print, otherwise it will
3813 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3816 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3819 const char *p = fmt;
3823 if (WARN_ON_ONCE(!fmt))
3826 if (static_branch_unlikely(&trace_no_verify))
3829 /* Don't bother checking when doing a ftrace_dump() */
3830 if (iter->fmt == static_fmt_buf)
3839 /* We only care about %s and variants */
3840 for (i = 0; p[i]; i++) {
3841 if (i + 1 >= iter->fmt_size) {
3843 * If we can't expand the copy buffer,
3846 if (!trace_iter_expand_format(iter))
3850 if (p[i] == '\\' && p[i+1]) {
3855 /* Need to test cases like %08.*s */
3856 for (j = 1; p[i+j]; j++) {
3857 if (isdigit(p[i+j]) ||
3860 if (p[i+j] == '*') {
3872 /* If no %s found then just print normally */
3876 /* Copy up to the %s, and print that */
3877 strncpy(iter->fmt, p, i);
3878 iter->fmt[i] = '\0';
3879 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3882 * If iter->seq is full, the above call no longer guarantees
3883 * that ap is in sync with fmt processing, and further calls
3884 * to va_arg() can return wrong positional arguments.
3886 * Ensure that ap is no longer used in this case.
3888 if (iter->seq.full) {
3894 len = va_arg(ap, int);
3896 /* The ap now points to the string data of the %s */
3897 str = va_arg(ap, const char *);
3900 * If you hit this warning, it is likely that the
3901 * trace event in question used %s on a string that
3902 * was saved at the time of the event, but may not be
3903 * around when the trace is read. Use __string(),
3904 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3905 * instead. See samples/trace_events/trace-events-sample.h
3908 if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
3909 "fmt: '%s' current_buffer: '%s'",
3910 fmt, show_buffer(&iter->seq))) {
3913 /* Try to safely read the string */
3915 if (len + 1 > iter->fmt_size)
3916 len = iter->fmt_size - 1;
3919 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3923 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3927 trace_seq_printf(&iter->seq, "(0x%px)", str);
3929 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3931 str = "[UNSAFE-MEMORY]";
3932 strcpy(iter->fmt, "%s");
3934 strncpy(iter->fmt, p + i, j + 1);
3935 iter->fmt[j+1] = '\0';
3938 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3940 trace_seq_printf(&iter->seq, iter->fmt, str);
3946 trace_seq_vprintf(&iter->seq, p, ap);
3949 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3951 const char *p, *new_fmt;
3954 if (WARN_ON_ONCE(!fmt))
3957 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3961 new_fmt = q = iter->fmt;
3963 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3964 if (!trace_iter_expand_format(iter))
3967 q += iter->fmt - new_fmt;
3968 new_fmt = iter->fmt;
3973 /* Replace %p with %px */
3977 } else if (p[0] == 'p' && !isalnum(p[1])) {
3988 #define STATIC_TEMP_BUF_SIZE 128
3989 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3991 /* Find the next real entry, without updating the iterator itself */
3992 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3993 int *ent_cpu, u64 *ent_ts)
3995 /* __find_next_entry will reset ent_size */
3996 int ent_size = iter->ent_size;
3997 struct trace_entry *entry;
4000 * If called from ftrace_dump(), then the iter->temp buffer
4001 * will be the static_temp_buf and not created from kmalloc.
4002 * If the entry size is greater than the buffer, we can
4003 * not save it. Just return NULL in that case. This is only
4004 * used to add markers when two consecutive events' time
4005 * stamps have a large delta. See trace_print_lat_context()
4007 if (iter->temp == static_temp_buf &&
4008 STATIC_TEMP_BUF_SIZE < ent_size)
4012 * The __find_next_entry() may call peek_next_entry(), which may
4013 * call ring_buffer_peek() that may make the contents of iter->ent
4014 * undefined. Need to copy iter->ent now.
4016 if (iter->ent && iter->ent != iter->temp) {
4017 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
4018 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
4020 temp = kmalloc(iter->ent_size, GFP_KERNEL);
4025 iter->temp_size = iter->ent_size;
4027 memcpy(iter->temp, iter->ent, iter->ent_size);
4028 iter->ent = iter->temp;
4030 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
4031 /* Put back the original ent_size */
4032 iter->ent_size = ent_size;
4037 /* Find the next real entry, and increment the iterator to the next entry */
4038 void *trace_find_next_entry_inc(struct trace_iterator *iter)
4040 iter->ent = __find_next_entry(iter, &iter->cpu,
4041 &iter->lost_events, &iter->ts);
4044 trace_iterator_increment(iter);
4046 return iter->ent ? iter : NULL;
4049 static void trace_consume(struct trace_iterator *iter)
4051 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
4052 &iter->lost_events);
4055 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
4057 struct trace_iterator *iter = m->private;
4061 WARN_ON_ONCE(iter->leftover);
4065 /* can't go backwards */
4070 ent = trace_find_next_entry_inc(iter);
4074 while (ent && iter->idx < i)
4075 ent = trace_find_next_entry_inc(iter);
4082 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4084 struct ring_buffer_iter *buf_iter;
4085 unsigned long entries = 0;
4088 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4090 buf_iter = trace_buffer_iter(iter, cpu);
4094 ring_buffer_iter_reset(buf_iter);
4097 * We could have the case with the max latency tracers
4098 * that a reset never took place on a cpu. This is evident
4099 * by the timestamp being before the start of the buffer.
4101 while (ring_buffer_iter_peek(buf_iter, &ts)) {
4102 if (ts >= iter->array_buffer->time_start)
4105 ring_buffer_iter_advance(buf_iter);
4108 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4112 * The current tracer is copied to avoid a global locking
4115 static void *s_start(struct seq_file *m, loff_t *pos)
4117 struct trace_iterator *iter = m->private;
4118 struct trace_array *tr = iter->tr;
4119 int cpu_file = iter->cpu_file;
4125 * copy the tracer to avoid using a global lock all around.
4126 * iter->trace is a copy of current_trace, the pointer to the
4127 * name may be used instead of a strcmp(), as iter->trace->name
4128 * will point to the same string as current_trace->name.
4130 mutex_lock(&trace_types_lock);
4131 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) {
4132 /* Close iter->trace before switching to the new current tracer */
4133 if (iter->trace->close)
4134 iter->trace->close(iter);
4135 *iter->trace = *tr->current_trace;
4136 /* Reopen the new current tracer */
4137 if (iter->trace->open)
4138 iter->trace->open(iter);
4140 mutex_unlock(&trace_types_lock);
4142 #ifdef CONFIG_TRACER_MAX_TRACE
4143 if (iter->snapshot && iter->trace->use_max_tr)
4144 return ERR_PTR(-EBUSY);
4147 if (*pos != iter->pos) {
4152 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4153 for_each_tracing_cpu(cpu)
4154 tracing_iter_reset(iter, cpu);
4156 tracing_iter_reset(iter, cpu_file);
4159 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4164 * If we overflowed the seq_file before, then we want
4165 * to just reuse the trace_seq buffer again.
4171 p = s_next(m, p, &l);
4175 trace_event_read_lock();
4176 trace_access_lock(cpu_file);
4180 static void s_stop(struct seq_file *m, void *p)
4182 struct trace_iterator *iter = m->private;
4184 #ifdef CONFIG_TRACER_MAX_TRACE
4185 if (iter->snapshot && iter->trace->use_max_tr)
4189 trace_access_unlock(iter->cpu_file);
4190 trace_event_read_unlock();
4194 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4195 unsigned long *entries, int cpu)
4197 unsigned long count;
4199 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4201 * If this buffer has skipped entries, then we hold all
4202 * entries for the trace and we need to ignore the
4203 * ones before the time stamp.
4205 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4206 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4207 /* total is the same as the entries */
4211 ring_buffer_overrun_cpu(buf->buffer, cpu);
4216 get_total_entries(struct array_buffer *buf,
4217 unsigned long *total, unsigned long *entries)
4225 for_each_tracing_cpu(cpu) {
4226 get_total_entries_cpu(buf, &t, &e, cpu);
4232 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4234 unsigned long total, entries;
4239 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4244 unsigned long trace_total_entries(struct trace_array *tr)
4246 unsigned long total, entries;
4251 get_total_entries(&tr->array_buffer, &total, &entries);
4256 static void print_lat_help_header(struct seq_file *m)
4258 seq_puts(m, "# _------=> CPU# \n"
4259 "# / _-----=> irqs-off/BH-disabled\n"
4260 "# | / _----=> need-resched \n"
4261 "# || / _---=> hardirq/softirq \n"
4262 "# ||| / _--=> preempt-depth \n"
4263 "# |||| / _-=> migrate-disable \n"
4264 "# ||||| / delay \n"
4265 "# cmd pid |||||| time | caller \n"
4266 "# \\ / |||||| \\ | / \n");
4269 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4271 unsigned long total;
4272 unsigned long entries;
4274 get_total_entries(buf, &total, &entries);
4275 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4276 entries, total, num_online_cpus());
4280 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4283 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4285 print_event_info(buf, m);
4287 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4288 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4291 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4294 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4295 static const char space[] = " ";
4296 int prec = tgid ? 12 : 2;
4298 print_event_info(buf, m);
4300 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
4301 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4302 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4303 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4304 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4305 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4306 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4307 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
4311 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4313 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4314 struct array_buffer *buf = iter->array_buffer;
4315 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4316 struct tracer *type = iter->trace;
4317 unsigned long entries;
4318 unsigned long total;
4319 const char *name = type->name;
4321 get_total_entries(buf, &total, &entries);
4323 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4325 seq_puts(m, "# -----------------------------------"
4326 "---------------------------------\n");
4327 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4328 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4329 nsecs_to_usecs(data->saved_latency),
4333 preempt_model_none() ? "server" :
4334 preempt_model_voluntary() ? "desktop" :
4335 preempt_model_full() ? "preempt" :
4336 preempt_model_rt() ? "preempt_rt" :
4338 /* These are reserved for later use */
4341 seq_printf(m, " #P:%d)\n", num_online_cpus());
4345 seq_puts(m, "# -----------------\n");
4346 seq_printf(m, "# | task: %.16s-%d "
4347 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4348 data->comm, data->pid,
4349 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4350 data->policy, data->rt_priority);
4351 seq_puts(m, "# -----------------\n");
4353 if (data->critical_start) {
4354 seq_puts(m, "# => started at: ");
4355 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4356 trace_print_seq(m, &iter->seq);
4357 seq_puts(m, "\n# => ended at: ");
4358 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4359 trace_print_seq(m, &iter->seq);
4360 seq_puts(m, "\n#\n");
4366 static void test_cpu_buff_start(struct trace_iterator *iter)
4368 struct trace_seq *s = &iter->seq;
4369 struct trace_array *tr = iter->tr;
4371 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4374 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4377 if (cpumask_available(iter->started) &&
4378 cpumask_test_cpu(iter->cpu, iter->started))
4381 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4384 if (cpumask_available(iter->started))
4385 cpumask_set_cpu(iter->cpu, iter->started);
4387 /* Don't print started cpu buffer for the first entry of the trace */
4389 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4393 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4395 struct trace_array *tr = iter->tr;
4396 struct trace_seq *s = &iter->seq;
4397 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4398 struct trace_entry *entry;
4399 struct trace_event *event;
4403 test_cpu_buff_start(iter);
4405 event = ftrace_find_event(entry->type);
4407 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4408 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4409 trace_print_lat_context(iter);
4411 trace_print_context(iter);
4414 if (trace_seq_has_overflowed(s))
4415 return TRACE_TYPE_PARTIAL_LINE;
4418 return event->funcs->trace(iter, sym_flags, event);
4420 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4422 return trace_handle_return(s);
4425 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4427 struct trace_array *tr = iter->tr;
4428 struct trace_seq *s = &iter->seq;
4429 struct trace_entry *entry;
4430 struct trace_event *event;
4434 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4435 trace_seq_printf(s, "%d %d %llu ",
4436 entry->pid, iter->cpu, iter->ts);
4438 if (trace_seq_has_overflowed(s))
4439 return TRACE_TYPE_PARTIAL_LINE;
4441 event = ftrace_find_event(entry->type);
4443 return event->funcs->raw(iter, 0, event);
4445 trace_seq_printf(s, "%d ?\n", entry->type);
4447 return trace_handle_return(s);
4450 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4452 struct trace_array *tr = iter->tr;
4453 struct trace_seq *s = &iter->seq;
4454 unsigned char newline = '\n';
4455 struct trace_entry *entry;
4456 struct trace_event *event;
4460 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4461 SEQ_PUT_HEX_FIELD(s, entry->pid);
4462 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4463 SEQ_PUT_HEX_FIELD(s, iter->ts);
4464 if (trace_seq_has_overflowed(s))
4465 return TRACE_TYPE_PARTIAL_LINE;
4468 event = ftrace_find_event(entry->type);
4470 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4471 if (ret != TRACE_TYPE_HANDLED)
4475 SEQ_PUT_FIELD(s, newline);
4477 return trace_handle_return(s);
4480 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4482 struct trace_array *tr = iter->tr;
4483 struct trace_seq *s = &iter->seq;
4484 struct trace_entry *entry;
4485 struct trace_event *event;
4489 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4490 SEQ_PUT_FIELD(s, entry->pid);
4491 SEQ_PUT_FIELD(s, iter->cpu);
4492 SEQ_PUT_FIELD(s, iter->ts);
4493 if (trace_seq_has_overflowed(s))
4494 return TRACE_TYPE_PARTIAL_LINE;
4497 event = ftrace_find_event(entry->type);
4498 return event ? event->funcs->binary(iter, 0, event) :
4502 int trace_empty(struct trace_iterator *iter)
4504 struct ring_buffer_iter *buf_iter;
4507 /* If we are looking at one CPU buffer, only check that one */
4508 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4509 cpu = iter->cpu_file;
4510 buf_iter = trace_buffer_iter(iter, cpu);
4512 if (!ring_buffer_iter_empty(buf_iter))
4515 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4521 for_each_tracing_cpu(cpu) {
4522 buf_iter = trace_buffer_iter(iter, cpu);
4524 if (!ring_buffer_iter_empty(buf_iter))
4527 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4535 /* Called with trace_event_read_lock() held. */
4536 enum print_line_t print_trace_line(struct trace_iterator *iter)
4538 struct trace_array *tr = iter->tr;
4539 unsigned long trace_flags = tr->trace_flags;
4540 enum print_line_t ret;
4542 if (iter->lost_events) {
4543 if (iter->lost_events == (unsigned long)-1)
4544 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4547 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4548 iter->cpu, iter->lost_events);
4549 if (trace_seq_has_overflowed(&iter->seq))
4550 return TRACE_TYPE_PARTIAL_LINE;
4553 if (iter->trace && iter->trace->print_line) {
4554 ret = iter->trace->print_line(iter);
4555 if (ret != TRACE_TYPE_UNHANDLED)
4559 if (iter->ent->type == TRACE_BPUTS &&
4560 trace_flags & TRACE_ITER_PRINTK &&
4561 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4562 return trace_print_bputs_msg_only(iter);
4564 if (iter->ent->type == TRACE_BPRINT &&
4565 trace_flags & TRACE_ITER_PRINTK &&
4566 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4567 return trace_print_bprintk_msg_only(iter);
4569 if (iter->ent->type == TRACE_PRINT &&
4570 trace_flags & TRACE_ITER_PRINTK &&
4571 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4572 return trace_print_printk_msg_only(iter);
4574 if (trace_flags & TRACE_ITER_BIN)
4575 return print_bin_fmt(iter);
4577 if (trace_flags & TRACE_ITER_HEX)
4578 return print_hex_fmt(iter);
4580 if (trace_flags & TRACE_ITER_RAW)
4581 return print_raw_fmt(iter);
4583 return print_trace_fmt(iter);
4586 void trace_latency_header(struct seq_file *m)
4588 struct trace_iterator *iter = m->private;
4589 struct trace_array *tr = iter->tr;
4591 /* print nothing if the buffers are empty */
4592 if (trace_empty(iter))
4595 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4596 print_trace_header(m, iter);
4598 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4599 print_lat_help_header(m);
4602 void trace_default_header(struct seq_file *m)
4604 struct trace_iterator *iter = m->private;
4605 struct trace_array *tr = iter->tr;
4606 unsigned long trace_flags = tr->trace_flags;
4608 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4611 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4612 /* print nothing if the buffers are empty */
4613 if (trace_empty(iter))
4615 print_trace_header(m, iter);
4616 if (!(trace_flags & TRACE_ITER_VERBOSE))
4617 print_lat_help_header(m);
4619 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4620 if (trace_flags & TRACE_ITER_IRQ_INFO)
4621 print_func_help_header_irq(iter->array_buffer,
4624 print_func_help_header(iter->array_buffer, m,
4630 static void test_ftrace_alive(struct seq_file *m)
4632 if (!ftrace_is_dead())
4634 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4635 "# MAY BE MISSING FUNCTION EVENTS\n");
4638 #ifdef CONFIG_TRACER_MAX_TRACE
4639 static void show_snapshot_main_help(struct seq_file *m)
4641 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4642 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4643 "# Takes a snapshot of the main buffer.\n"
4644 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4645 "# (Doesn't have to be '2' works with any number that\n"
4646 "# is not a '0' or '1')\n");
4649 static void show_snapshot_percpu_help(struct seq_file *m)
4651 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4652 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4653 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4654 "# Takes a snapshot of the main buffer for this cpu.\n");
4656 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4657 "# Must use main snapshot file to allocate.\n");
4659 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4660 "# (Doesn't have to be '2' works with any number that\n"
4661 "# is not a '0' or '1')\n");
4664 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4666 if (iter->tr->allocated_snapshot)
4667 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4669 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4671 seq_puts(m, "# Snapshot commands:\n");
4672 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4673 show_snapshot_main_help(m);
4675 show_snapshot_percpu_help(m);
4678 /* Should never be called */
4679 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4682 static int s_show(struct seq_file *m, void *v)
4684 struct trace_iterator *iter = v;
4687 if (iter->ent == NULL) {
4689 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4691 test_ftrace_alive(m);
4693 if (iter->snapshot && trace_empty(iter))
4694 print_snapshot_help(m, iter);
4695 else if (iter->trace && iter->trace->print_header)
4696 iter->trace->print_header(m);
4698 trace_default_header(m);
4700 } else if (iter->leftover) {
4702 * If we filled the seq_file buffer earlier, we
4703 * want to just show it now.
4705 ret = trace_print_seq(m, &iter->seq);
4707 /* ret should this time be zero, but you never know */
4708 iter->leftover = ret;
4711 print_trace_line(iter);
4712 ret = trace_print_seq(m, &iter->seq);
4714 * If we overflow the seq_file buffer, then it will
4715 * ask us for this data again at start up.
4717 * ret is 0 if seq_file write succeeded.
4720 iter->leftover = ret;
4727 * Should be used after trace_array_get(), trace_types_lock
4728 * ensures that i_cdev was already initialized.
4730 static inline int tracing_get_cpu(struct inode *inode)
4732 if (inode->i_cdev) /* See trace_create_cpu_file() */
4733 return (long)inode->i_cdev - 1;
4734 return RING_BUFFER_ALL_CPUS;
4737 static const struct seq_operations tracer_seq_ops = {
4744 static struct trace_iterator *
4745 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4747 struct trace_array *tr = inode->i_private;
4748 struct trace_iterator *iter;
4751 if (tracing_disabled)
4752 return ERR_PTR(-ENODEV);
4754 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4756 return ERR_PTR(-ENOMEM);
4758 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4760 if (!iter->buffer_iter)
4764 * trace_find_next_entry() may need to save off iter->ent.
4765 * It will place it into the iter->temp buffer. As most
4766 * events are less than 128, allocate a buffer of that size.
4767 * If one is greater, then trace_find_next_entry() will
4768 * allocate a new buffer to adjust for the bigger iter->ent.
4769 * It's not critical if it fails to get allocated here.
4771 iter->temp = kmalloc(128, GFP_KERNEL);
4773 iter->temp_size = 128;
4776 * trace_event_printf() may need to modify given format
4777 * string to replace %p with %px so that it shows real address
4778 * instead of hash value. However, that is only for the event
4779 * tracing, other tracer may not need. Defer the allocation
4780 * until it is needed.
4786 * We make a copy of the current tracer to avoid concurrent
4787 * changes on it while we are reading.
4789 mutex_lock(&trace_types_lock);
4790 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4794 *iter->trace = *tr->current_trace;
4796 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4801 #ifdef CONFIG_TRACER_MAX_TRACE
4802 /* Currently only the top directory has a snapshot */
4803 if (tr->current_trace->print_max || snapshot)
4804 iter->array_buffer = &tr->max_buffer;
4807 iter->array_buffer = &tr->array_buffer;
4808 iter->snapshot = snapshot;
4810 iter->cpu_file = tracing_get_cpu(inode);
4811 mutex_init(&iter->mutex);
4813 /* Notify the tracer early; before we stop tracing. */
4814 if (iter->trace->open)
4815 iter->trace->open(iter);
4817 /* Annotate start of buffers if we had overruns */
4818 if (ring_buffer_overruns(iter->array_buffer->buffer))
4819 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4821 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4822 if (trace_clocks[tr->clock_id].in_ns)
4823 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4826 * If pause-on-trace is enabled, then stop the trace while
4827 * dumping, unless this is the "snapshot" file
4829 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4830 tracing_stop_tr(tr);
4832 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4833 for_each_tracing_cpu(cpu) {
4834 iter->buffer_iter[cpu] =
4835 ring_buffer_read_prepare(iter->array_buffer->buffer,
4838 ring_buffer_read_prepare_sync();
4839 for_each_tracing_cpu(cpu) {
4840 ring_buffer_read_start(iter->buffer_iter[cpu]);
4841 tracing_iter_reset(iter, cpu);
4844 cpu = iter->cpu_file;
4845 iter->buffer_iter[cpu] =
4846 ring_buffer_read_prepare(iter->array_buffer->buffer,
4848 ring_buffer_read_prepare_sync();
4849 ring_buffer_read_start(iter->buffer_iter[cpu]);
4850 tracing_iter_reset(iter, cpu);
4853 mutex_unlock(&trace_types_lock);
4858 mutex_unlock(&trace_types_lock);
4861 kfree(iter->buffer_iter);
4863 seq_release_private(inode, file);
4864 return ERR_PTR(-ENOMEM);
4867 int tracing_open_generic(struct inode *inode, struct file *filp)
4871 ret = tracing_check_open_get_tr(NULL);
4875 filp->private_data = inode->i_private;
4879 bool tracing_is_disabled(void)
4881 return (tracing_disabled) ? true: false;
4885 * Open and update trace_array ref count.
4886 * Must have the current trace_array passed to it.
4888 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4890 struct trace_array *tr = inode->i_private;
4893 ret = tracing_check_open_get_tr(tr);
4897 filp->private_data = inode->i_private;
4903 * The private pointer of the inode is the trace_event_file.
4904 * Update the tr ref count associated to it.
4906 int tracing_open_file_tr(struct inode *inode, struct file *filp)
4908 struct trace_event_file *file = inode->i_private;
4911 ret = tracing_check_open_get_tr(file->tr);
4915 filp->private_data = inode->i_private;
4920 int tracing_release_file_tr(struct inode *inode, struct file *filp)
4922 struct trace_event_file *file = inode->i_private;
4924 trace_array_put(file->tr);
4929 static int tracing_mark_open(struct inode *inode, struct file *filp)
4931 stream_open(inode, filp);
4932 return tracing_open_generic_tr(inode, filp);
4935 static int tracing_release(struct inode *inode, struct file *file)
4937 struct trace_array *tr = inode->i_private;
4938 struct seq_file *m = file->private_data;
4939 struct trace_iterator *iter;
4942 if (!(file->f_mode & FMODE_READ)) {
4943 trace_array_put(tr);
4947 /* Writes do not use seq_file */
4949 mutex_lock(&trace_types_lock);
4951 for_each_tracing_cpu(cpu) {
4952 if (iter->buffer_iter[cpu])
4953 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4956 if (iter->trace && iter->trace->close)
4957 iter->trace->close(iter);
4959 if (!iter->snapshot && tr->stop_count)
4960 /* reenable tracing if it was previously enabled */
4961 tracing_start_tr(tr);
4963 __trace_array_put(tr);
4965 mutex_unlock(&trace_types_lock);
4967 mutex_destroy(&iter->mutex);
4968 free_cpumask_var(iter->started);
4972 kfree(iter->buffer_iter);
4973 seq_release_private(inode, file);
4978 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4980 struct trace_array *tr = inode->i_private;
4982 trace_array_put(tr);
4986 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4988 struct trace_array *tr = inode->i_private;
4990 trace_array_put(tr);
4992 return single_release(inode, file);
4995 static int tracing_open(struct inode *inode, struct file *file)
4997 struct trace_array *tr = inode->i_private;
4998 struct trace_iterator *iter;
5001 ret = tracing_check_open_get_tr(tr);
5005 /* If this file was open for write, then erase contents */
5006 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
5007 int cpu = tracing_get_cpu(inode);
5008 struct array_buffer *trace_buf = &tr->array_buffer;
5010 #ifdef CONFIG_TRACER_MAX_TRACE
5011 if (tr->current_trace->print_max)
5012 trace_buf = &tr->max_buffer;
5015 if (cpu == RING_BUFFER_ALL_CPUS)
5016 tracing_reset_online_cpus(trace_buf);
5018 tracing_reset_cpu(trace_buf, cpu);
5021 if (file->f_mode & FMODE_READ) {
5022 iter = __tracing_open(inode, file, false);
5024 ret = PTR_ERR(iter);
5025 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5026 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5030 trace_array_put(tr);
5036 * Some tracers are not suitable for instance buffers.
5037 * A tracer is always available for the global array (toplevel)
5038 * or if it explicitly states that it is.
5041 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
5043 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
5046 /* Find the next tracer that this trace array may use */
5047 static struct tracer *
5048 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
5050 while (t && !trace_ok_for_array(t, tr))
5057 t_next(struct seq_file *m, void *v, loff_t *pos)
5059 struct trace_array *tr = m->private;
5060 struct tracer *t = v;
5065 t = get_tracer_for_array(tr, t->next);
5070 static void *t_start(struct seq_file *m, loff_t *pos)
5072 struct trace_array *tr = m->private;
5076 mutex_lock(&trace_types_lock);
5078 t = get_tracer_for_array(tr, trace_types);
5079 for (; t && l < *pos; t = t_next(m, t, &l))
5085 static void t_stop(struct seq_file *m, void *p)
5087 mutex_unlock(&trace_types_lock);
5090 static int t_show(struct seq_file *m, void *v)
5092 struct tracer *t = v;
5097 seq_puts(m, t->name);
5106 static const struct seq_operations show_traces_seq_ops = {
5113 static int show_traces_open(struct inode *inode, struct file *file)
5115 struct trace_array *tr = inode->i_private;
5119 ret = tracing_check_open_get_tr(tr);
5123 ret = seq_open(file, &show_traces_seq_ops);
5125 trace_array_put(tr);
5129 m = file->private_data;
5135 static int show_traces_release(struct inode *inode, struct file *file)
5137 struct trace_array *tr = inode->i_private;
5139 trace_array_put(tr);
5140 return seq_release(inode, file);
5144 tracing_write_stub(struct file *filp, const char __user *ubuf,
5145 size_t count, loff_t *ppos)
5150 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5154 if (file->f_mode & FMODE_READ)
5155 ret = seq_lseek(file, offset, whence);
5157 file->f_pos = ret = 0;
5162 static const struct file_operations tracing_fops = {
5163 .open = tracing_open,
5165 .read_iter = seq_read_iter,
5166 .splice_read = generic_file_splice_read,
5167 .write = tracing_write_stub,
5168 .llseek = tracing_lseek,
5169 .release = tracing_release,
5172 static const struct file_operations show_traces_fops = {
5173 .open = show_traces_open,
5175 .llseek = seq_lseek,
5176 .release = show_traces_release,
5180 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5181 size_t count, loff_t *ppos)
5183 struct trace_array *tr = file_inode(filp)->i_private;
5187 len = snprintf(NULL, 0, "%*pb\n",
5188 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5189 mask_str = kmalloc(len, GFP_KERNEL);
5193 len = snprintf(mask_str, len, "%*pb\n",
5194 cpumask_pr_args(tr->tracing_cpumask));
5199 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5207 int tracing_set_cpumask(struct trace_array *tr,
5208 cpumask_var_t tracing_cpumask_new)
5215 local_irq_disable();
5216 arch_spin_lock(&tr->max_lock);
5217 for_each_tracing_cpu(cpu) {
5219 * Increase/decrease the disabled counter if we are
5220 * about to flip a bit in the cpumask:
5222 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5223 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5224 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5225 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5226 #ifdef CONFIG_TRACER_MAX_TRACE
5227 ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
5230 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5231 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5232 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5233 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5234 #ifdef CONFIG_TRACER_MAX_TRACE
5235 ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
5239 arch_spin_unlock(&tr->max_lock);
5242 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5248 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5249 size_t count, loff_t *ppos)
5251 struct trace_array *tr = file_inode(filp)->i_private;
5252 cpumask_var_t tracing_cpumask_new;
5255 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5258 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5262 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5266 free_cpumask_var(tracing_cpumask_new);
5271 free_cpumask_var(tracing_cpumask_new);
5276 static const struct file_operations tracing_cpumask_fops = {
5277 .open = tracing_open_generic_tr,
5278 .read = tracing_cpumask_read,
5279 .write = tracing_cpumask_write,
5280 .release = tracing_release_generic_tr,
5281 .llseek = generic_file_llseek,
5284 static int tracing_trace_options_show(struct seq_file *m, void *v)
5286 struct tracer_opt *trace_opts;
5287 struct trace_array *tr = m->private;
5291 mutex_lock(&trace_types_lock);
5292 tracer_flags = tr->current_trace->flags->val;
5293 trace_opts = tr->current_trace->flags->opts;
5295 for (i = 0; trace_options[i]; i++) {
5296 if (tr->trace_flags & (1 << i))
5297 seq_printf(m, "%s\n", trace_options[i]);
5299 seq_printf(m, "no%s\n", trace_options[i]);
5302 for (i = 0; trace_opts[i].name; i++) {
5303 if (tracer_flags & trace_opts[i].bit)
5304 seq_printf(m, "%s\n", trace_opts[i].name);
5306 seq_printf(m, "no%s\n", trace_opts[i].name);
5308 mutex_unlock(&trace_types_lock);
5313 static int __set_tracer_option(struct trace_array *tr,
5314 struct tracer_flags *tracer_flags,
5315 struct tracer_opt *opts, int neg)
5317 struct tracer *trace = tracer_flags->trace;
5320 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5325 tracer_flags->val &= ~opts->bit;
5327 tracer_flags->val |= opts->bit;
5331 /* Try to assign a tracer specific option */
5332 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5334 struct tracer *trace = tr->current_trace;
5335 struct tracer_flags *tracer_flags = trace->flags;
5336 struct tracer_opt *opts = NULL;
5339 for (i = 0; tracer_flags->opts[i].name; i++) {
5340 opts = &tracer_flags->opts[i];
5342 if (strcmp(cmp, opts->name) == 0)
5343 return __set_tracer_option(tr, trace->flags, opts, neg);
5349 /* Some tracers require overwrite to stay enabled */
5350 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5352 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5358 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5362 if ((mask == TRACE_ITER_RECORD_TGID) ||
5363 (mask == TRACE_ITER_RECORD_CMD))
5364 lockdep_assert_held(&event_mutex);
5366 /* do nothing if flag is already set */
5367 if (!!(tr->trace_flags & mask) == !!enabled)
5370 /* Give the tracer a chance to approve the change */
5371 if (tr->current_trace->flag_changed)
5372 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5376 tr->trace_flags |= mask;
5378 tr->trace_flags &= ~mask;
5380 if (mask == TRACE_ITER_RECORD_CMD)
5381 trace_event_enable_cmd_record(enabled);
5383 if (mask == TRACE_ITER_RECORD_TGID) {
5385 tgid_map_max = pid_max;
5386 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5390 * Pairs with smp_load_acquire() in
5391 * trace_find_tgid_ptr() to ensure that if it observes
5392 * the tgid_map we just allocated then it also observes
5393 * the corresponding tgid_map_max value.
5395 smp_store_release(&tgid_map, map);
5398 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5402 trace_event_enable_tgid_record(enabled);
5405 if (mask == TRACE_ITER_EVENT_FORK)
5406 trace_event_follow_fork(tr, enabled);
5408 if (mask == TRACE_ITER_FUNC_FORK)
5409 ftrace_pid_follow_fork(tr, enabled);
5411 if (mask == TRACE_ITER_OVERWRITE) {
5412 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5413 #ifdef CONFIG_TRACER_MAX_TRACE
5414 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5418 if (mask == TRACE_ITER_PRINTK) {
5419 trace_printk_start_stop_comm(enabled);
5420 trace_printk_control(enabled);
5426 int trace_set_options(struct trace_array *tr, char *option)
5431 size_t orig_len = strlen(option);
5434 cmp = strstrip(option);
5436 len = str_has_prefix(cmp, "no");
5442 mutex_lock(&event_mutex);
5443 mutex_lock(&trace_types_lock);
5445 ret = match_string(trace_options, -1, cmp);
5446 /* If no option could be set, test the specific tracer options */
5448 ret = set_tracer_option(tr, cmp, neg);
5450 ret = set_tracer_flag(tr, 1 << ret, !neg);
5452 mutex_unlock(&trace_types_lock);
5453 mutex_unlock(&event_mutex);
5456 * If the first trailing whitespace is replaced with '\0' by strstrip,
5457 * turn it back into a space.
5459 if (orig_len > strlen(option))
5460 option[strlen(option)] = ' ';
5465 static void __init apply_trace_boot_options(void)
5467 char *buf = trace_boot_options_buf;
5471 option = strsep(&buf, ",");
5477 trace_set_options(&global_trace, option);
5479 /* Put back the comma to allow this to be called again */
5486 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5487 size_t cnt, loff_t *ppos)
5489 struct seq_file *m = filp->private_data;
5490 struct trace_array *tr = m->private;
5494 if (cnt >= sizeof(buf))
5497 if (copy_from_user(buf, ubuf, cnt))
5502 ret = trace_set_options(tr, buf);
5511 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5513 struct trace_array *tr = inode->i_private;
5516 ret = tracing_check_open_get_tr(tr);
5520 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5522 trace_array_put(tr);
5527 static const struct file_operations tracing_iter_fops = {
5528 .open = tracing_trace_options_open,
5530 .llseek = seq_lseek,
5531 .release = tracing_single_release_tr,
5532 .write = tracing_trace_options_write,
5535 static const char readme_msg[] =
5536 "tracing mini-HOWTO:\n\n"
5537 "# echo 0 > tracing_on : quick way to disable tracing\n"
5538 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5539 " Important files:\n"
5540 " trace\t\t\t- The static contents of the buffer\n"
5541 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5542 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5543 " current_tracer\t- function and latency tracers\n"
5544 " available_tracers\t- list of configured tracers for current_tracer\n"
5545 " error_log\t- error log for failed commands (that support it)\n"
5546 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5547 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5548 " trace_clock\t\t- change the clock used to order events\n"
5549 " local: Per cpu clock but may not be synced across CPUs\n"
5550 " global: Synced across CPUs but slows tracing down.\n"
5551 " counter: Not a clock, but just an increment\n"
5552 " uptime: Jiffy counter from time of boot\n"
5553 " perf: Same clock that perf events use\n"
5554 #ifdef CONFIG_X86_64
5555 " x86-tsc: TSC cycle counter\n"
5557 "\n timestamp_mode\t- view the mode used to timestamp events\n"
5558 " delta: Delta difference against a buffer-wide timestamp\n"
5559 " absolute: Absolute (standalone) timestamp\n"
5560 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5561 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5562 " tracing_cpumask\t- Limit which CPUs to trace\n"
5563 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5564 "\t\t\t Remove sub-buffer with rmdir\n"
5565 " trace_options\t\t- Set format or modify how tracing happens\n"
5566 "\t\t\t Disable an option by prefixing 'no' to the\n"
5567 "\t\t\t option name\n"
5568 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5569 #ifdef CONFIG_DYNAMIC_FTRACE
5570 "\n available_filter_functions - list of functions that can be filtered on\n"
5571 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5572 "\t\t\t functions\n"
5573 "\t accepts: func_full_name or glob-matching-pattern\n"
5574 "\t modules: Can select a group via module\n"
5575 "\t Format: :mod:<module-name>\n"
5576 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5577 "\t triggers: a command to perform when function is hit\n"
5578 "\t Format: <function>:<trigger>[:count]\n"
5579 "\t trigger: traceon, traceoff\n"
5580 "\t\t enable_event:<system>:<event>\n"
5581 "\t\t disable_event:<system>:<event>\n"
5582 #ifdef CONFIG_STACKTRACE
5585 #ifdef CONFIG_TRACER_SNAPSHOT
5590 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5591 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5592 "\t The first one will disable tracing every time do_fault is hit\n"
5593 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5594 "\t The first time do trap is hit and it disables tracing, the\n"
5595 "\t counter will decrement to 2. If tracing is already disabled,\n"
5596 "\t the counter will not decrement. It only decrements when the\n"
5597 "\t trigger did work\n"
5598 "\t To remove trigger without count:\n"
5599 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5600 "\t To remove trigger with a count:\n"
5601 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5602 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5603 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5604 "\t modules: Can select a group via module command :mod:\n"
5605 "\t Does not accept triggers\n"
5606 #endif /* CONFIG_DYNAMIC_FTRACE */
5607 #ifdef CONFIG_FUNCTION_TRACER
5608 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5610 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5613 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5614 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5615 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5616 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5618 #ifdef CONFIG_TRACER_SNAPSHOT
5619 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5620 "\t\t\t snapshot buffer. Read the contents for more\n"
5621 "\t\t\t information\n"
5623 #ifdef CONFIG_STACK_TRACER
5624 " stack_trace\t\t- Shows the max stack trace when active\n"
5625 " stack_max_size\t- Shows current max stack size that was traced\n"
5626 "\t\t\t Write into this file to reset the max size (trigger a\n"
5627 "\t\t\t new trace)\n"
5628 #ifdef CONFIG_DYNAMIC_FTRACE
5629 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5632 #endif /* CONFIG_STACK_TRACER */
5633 #ifdef CONFIG_DYNAMIC_EVENTS
5634 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5635 "\t\t\t Write into this file to define/undefine new trace events.\n"
5637 #ifdef CONFIG_KPROBE_EVENTS
5638 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5639 "\t\t\t Write into this file to define/undefine new trace events.\n"
5641 #ifdef CONFIG_UPROBE_EVENTS
5642 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5643 "\t\t\t Write into this file to define/undefine new trace events.\n"
5645 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5646 "\t accepts: event-definitions (one definition per line)\n"
5647 "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n"
5648 "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
5649 #ifdef CONFIG_HIST_TRIGGERS
5650 "\t s:[synthetic/]<event> <field> [<field>]\n"
5652 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
5653 "\t -:[<group>/][<event>]\n"
5654 #ifdef CONFIG_KPROBE_EVENTS
5655 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5656 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5658 #ifdef CONFIG_UPROBE_EVENTS
5659 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5661 "\t args: <name>=fetcharg[:type]\n"
5662 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5663 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5664 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5666 "\t $stack<index>, $stack, $retval, $comm,\n"
5668 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5669 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5670 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5671 "\t symstr, <type>\\[<array-size>\\]\n"
5672 #ifdef CONFIG_HIST_TRIGGERS
5673 "\t field: <stype> <name>;\n"
5674 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5675 "\t [unsigned] char/int/long\n"
5677 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5678 "\t of the <attached-group>/<attached-event>.\n"
5680 " events/\t\t- Directory containing all trace event subsystems:\n"
5681 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5682 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5683 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5685 " filter\t\t- If set, only events passing filter are traced\n"
5686 " events/<system>/<event>/\t- Directory containing control files for\n"
5688 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5689 " filter\t\t- If set, only events passing filter are traced\n"
5690 " trigger\t\t- If set, a command to perform when event is hit\n"
5691 "\t Format: <trigger>[:count][if <filter>]\n"
5692 "\t trigger: traceon, traceoff\n"
5693 "\t enable_event:<system>:<event>\n"
5694 "\t disable_event:<system>:<event>\n"
5695 #ifdef CONFIG_HIST_TRIGGERS
5696 "\t enable_hist:<system>:<event>\n"
5697 "\t disable_hist:<system>:<event>\n"
5699 #ifdef CONFIG_STACKTRACE
5702 #ifdef CONFIG_TRACER_SNAPSHOT
5705 #ifdef CONFIG_HIST_TRIGGERS
5706 "\t\t hist (see below)\n"
5708 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5709 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5710 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5711 "\t events/block/block_unplug/trigger\n"
5712 "\t The first disables tracing every time block_unplug is hit.\n"
5713 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5714 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5715 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5716 "\t Like function triggers, the counter is only decremented if it\n"
5717 "\t enabled or disabled tracing.\n"
5718 "\t To remove a trigger without a count:\n"
5719 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5720 "\t To remove a trigger with a count:\n"
5721 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5722 "\t Filters can be ignored when removing a trigger.\n"
5723 #ifdef CONFIG_HIST_TRIGGERS
5724 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5725 "\t Format: hist:keys=<field1[,field2,...]>\n"
5726 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5727 "\t [:values=<field1[,field2,...]>]\n"
5728 "\t [:sort=<field1[,field2,...]>]\n"
5729 "\t [:size=#entries]\n"
5730 "\t [:pause][:continue][:clear]\n"
5731 "\t [:name=histname1]\n"
5732 "\t [:<handler>.<action>]\n"
5733 "\t [if <filter>]\n\n"
5734 "\t Note, special fields can be used as well:\n"
5735 "\t common_timestamp - to record current timestamp\n"
5736 "\t common_cpu - to record the CPU the event happened on\n"
5738 "\t A hist trigger variable can be:\n"
5739 "\t - a reference to a field e.g. x=current_timestamp,\n"
5740 "\t - a reference to another variable e.g. y=$x,\n"
5741 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5742 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5744 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5745 "\t multiplication(*) and division(/) operators. An operand can be either a\n"
5746 "\t variable reference, field or numeric literal.\n"
5748 "\t When a matching event is hit, an entry is added to a hash\n"
5749 "\t table using the key(s) and value(s) named, and the value of a\n"
5750 "\t sum called 'hitcount' is incremented. Keys and values\n"
5751 "\t correspond to fields in the event's format description. Keys\n"
5752 "\t can be any field, or the special string 'stacktrace'.\n"
5753 "\t Compound keys consisting of up to two fields can be specified\n"
5754 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5755 "\t fields. Sort keys consisting of up to two fields can be\n"
5756 "\t specified using the 'sort' keyword. The sort direction can\n"
5757 "\t be modified by appending '.descending' or '.ascending' to a\n"
5758 "\t sort field. The 'size' parameter can be used to specify more\n"
5759 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5760 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5761 "\t its histogram data will be shared with other triggers of the\n"
5762 "\t same name, and trigger hits will update this common data.\n\n"
5763 "\t Reading the 'hist' file for the event will dump the hash\n"
5764 "\t table in its entirety to stdout. If there are multiple hist\n"
5765 "\t triggers attached to an event, there will be a table for each\n"
5766 "\t trigger in the output. The table displayed for a named\n"
5767 "\t trigger will be the same as any other instance having the\n"
5768 "\t same name. The default format used to display a given field\n"
5769 "\t can be modified by appending any of the following modifiers\n"
5770 "\t to the field name, as applicable:\n\n"
5771 "\t .hex display a number as a hex value\n"
5772 "\t .sym display an address as a symbol\n"
5773 "\t .sym-offset display an address as a symbol and offset\n"
5774 "\t .execname display a common_pid as a program name\n"
5775 "\t .syscall display a syscall id as a syscall name\n"
5776 "\t .log2 display log2 value rather than raw number\n"
5777 "\t .buckets=size display values in groups of size rather than raw number\n"
5778 "\t .usecs display a common_timestamp in microseconds\n"
5779 "\t .percent display a number of percentage value\n"
5780 "\t .graph display a bar-graph of a value\n\n"
5781 "\t The 'pause' parameter can be used to pause an existing hist\n"
5782 "\t trigger or to start a hist trigger but not log any events\n"
5783 "\t until told to do so. 'continue' can be used to start or\n"
5784 "\t restart a paused hist trigger.\n\n"
5785 "\t The 'clear' parameter will clear the contents of a running\n"
5786 "\t hist trigger and leave its current paused/active state\n"
5788 "\t The enable_hist and disable_hist triggers can be used to\n"
5789 "\t have one event conditionally start and stop another event's\n"
5790 "\t already-attached hist trigger. The syntax is analogous to\n"
5791 "\t the enable_event and disable_event triggers.\n\n"
5792 "\t Hist trigger handlers and actions are executed whenever a\n"
5793 "\t a histogram entry is added or updated. They take the form:\n\n"
5794 "\t <handler>.<action>\n\n"
5795 "\t The available handlers are:\n\n"
5796 "\t onmatch(matching.event) - invoke on addition or update\n"
5797 "\t onmax(var) - invoke if var exceeds current max\n"
5798 "\t onchange(var) - invoke action if var changes\n\n"
5799 "\t The available actions are:\n\n"
5800 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5801 "\t save(field,...) - save current event fields\n"
5802 #ifdef CONFIG_TRACER_SNAPSHOT
5803 "\t snapshot() - snapshot the trace buffer\n\n"
5805 #ifdef CONFIG_SYNTH_EVENTS
5806 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5807 "\t Write into this file to define/undefine new synthetic events.\n"
5808 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
5814 tracing_readme_read(struct file *filp, char __user *ubuf,
5815 size_t cnt, loff_t *ppos)
5817 return simple_read_from_buffer(ubuf, cnt, ppos,
5818 readme_msg, strlen(readme_msg));
5821 static const struct file_operations tracing_readme_fops = {
5822 .open = tracing_open_generic,
5823 .read = tracing_readme_read,
5824 .llseek = generic_file_llseek,
5827 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5831 return trace_find_tgid_ptr(pid);
5834 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5838 return trace_find_tgid_ptr(pid);
5841 static void saved_tgids_stop(struct seq_file *m, void *v)
5845 static int saved_tgids_show(struct seq_file *m, void *v)
5847 int *entry = (int *)v;
5848 int pid = entry - tgid_map;
5854 seq_printf(m, "%d %d\n", pid, tgid);
5858 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5859 .start = saved_tgids_start,
5860 .stop = saved_tgids_stop,
5861 .next = saved_tgids_next,
5862 .show = saved_tgids_show,
5865 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5869 ret = tracing_check_open_get_tr(NULL);
5873 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5877 static const struct file_operations tracing_saved_tgids_fops = {
5878 .open = tracing_saved_tgids_open,
5880 .llseek = seq_lseek,
5881 .release = seq_release,
5884 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5886 unsigned int *ptr = v;
5888 if (*pos || m->count)
5893 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5895 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5904 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5910 arch_spin_lock(&trace_cmdline_lock);
5912 v = &savedcmd->map_cmdline_to_pid[0];
5914 v = saved_cmdlines_next(m, v, &l);
5922 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5924 arch_spin_unlock(&trace_cmdline_lock);
5928 static int saved_cmdlines_show(struct seq_file *m, void *v)
5930 char buf[TASK_COMM_LEN];
5931 unsigned int *pid = v;
5933 __trace_find_cmdline(*pid, buf);
5934 seq_printf(m, "%d %s\n", *pid, buf);
5938 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5939 .start = saved_cmdlines_start,
5940 .next = saved_cmdlines_next,
5941 .stop = saved_cmdlines_stop,
5942 .show = saved_cmdlines_show,
5945 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5949 ret = tracing_check_open_get_tr(NULL);
5953 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5956 static const struct file_operations tracing_saved_cmdlines_fops = {
5957 .open = tracing_saved_cmdlines_open,
5959 .llseek = seq_lseek,
5960 .release = seq_release,
5964 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5965 size_t cnt, loff_t *ppos)
5971 arch_spin_lock(&trace_cmdline_lock);
5972 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5973 arch_spin_unlock(&trace_cmdline_lock);
5976 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5979 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5981 kfree(s->saved_cmdlines);
5982 kfree(s->map_cmdline_to_pid);
5986 static int tracing_resize_saved_cmdlines(unsigned int val)
5988 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5990 s = kmalloc(sizeof(*s), GFP_KERNEL);
5994 if (allocate_cmdlines_buffer(val, s) < 0) {
6000 arch_spin_lock(&trace_cmdline_lock);
6001 savedcmd_temp = savedcmd;
6003 arch_spin_unlock(&trace_cmdline_lock);
6005 free_saved_cmdlines_buffer(savedcmd_temp);
6011 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
6012 size_t cnt, loff_t *ppos)
6017 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6021 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
6022 if (!val || val > PID_MAX_DEFAULT)
6025 ret = tracing_resize_saved_cmdlines((unsigned int)val);
6034 static const struct file_operations tracing_saved_cmdlines_size_fops = {
6035 .open = tracing_open_generic,
6036 .read = tracing_saved_cmdlines_size_read,
6037 .write = tracing_saved_cmdlines_size_write,
6040 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
6041 static union trace_eval_map_item *
6042 update_eval_map(union trace_eval_map_item *ptr)
6044 if (!ptr->map.eval_string) {
6045 if (ptr->tail.next) {
6046 ptr = ptr->tail.next;
6047 /* Set ptr to the next real item (skip head) */
6055 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
6057 union trace_eval_map_item *ptr = v;
6060 * Paranoid! If ptr points to end, we don't want to increment past it.
6061 * This really should never happen.
6064 ptr = update_eval_map(ptr);
6065 if (WARN_ON_ONCE(!ptr))
6069 ptr = update_eval_map(ptr);
6074 static void *eval_map_start(struct seq_file *m, loff_t *pos)
6076 union trace_eval_map_item *v;
6079 mutex_lock(&trace_eval_mutex);
6081 v = trace_eval_maps;
6085 while (v && l < *pos) {
6086 v = eval_map_next(m, v, &l);
6092 static void eval_map_stop(struct seq_file *m, void *v)
6094 mutex_unlock(&trace_eval_mutex);
6097 static int eval_map_show(struct seq_file *m, void *v)
6099 union trace_eval_map_item *ptr = v;
6101 seq_printf(m, "%s %ld (%s)\n",
6102 ptr->map.eval_string, ptr->map.eval_value,
6108 static const struct seq_operations tracing_eval_map_seq_ops = {
6109 .start = eval_map_start,
6110 .next = eval_map_next,
6111 .stop = eval_map_stop,
6112 .show = eval_map_show,
6115 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
6119 ret = tracing_check_open_get_tr(NULL);
6123 return seq_open(filp, &tracing_eval_map_seq_ops);
6126 static const struct file_operations tracing_eval_map_fops = {
6127 .open = tracing_eval_map_open,
6129 .llseek = seq_lseek,
6130 .release = seq_release,
6133 static inline union trace_eval_map_item *
6134 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
6136 /* Return tail of array given the head */
6137 return ptr + ptr->head.length + 1;
6141 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
6144 struct trace_eval_map **stop;
6145 struct trace_eval_map **map;
6146 union trace_eval_map_item *map_array;
6147 union trace_eval_map_item *ptr;
6152 * The trace_eval_maps contains the map plus a head and tail item,
6153 * where the head holds the module and length of array, and the
6154 * tail holds a pointer to the next list.
6156 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6158 pr_warn("Unable to allocate trace eval mapping\n");
6162 mutex_lock(&trace_eval_mutex);
6164 if (!trace_eval_maps)
6165 trace_eval_maps = map_array;
6167 ptr = trace_eval_maps;
6169 ptr = trace_eval_jmp_to_tail(ptr);
6170 if (!ptr->tail.next)
6172 ptr = ptr->tail.next;
6175 ptr->tail.next = map_array;
6177 map_array->head.mod = mod;
6178 map_array->head.length = len;
6181 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6182 map_array->map = **map;
6185 memset(map_array, 0, sizeof(*map_array));
6187 mutex_unlock(&trace_eval_mutex);
6190 static void trace_create_eval_file(struct dentry *d_tracer)
6192 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
6193 NULL, &tracing_eval_map_fops);
6196 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
6197 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6198 static inline void trace_insert_eval_map_file(struct module *mod,
6199 struct trace_eval_map **start, int len) { }
6200 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6202 static void trace_insert_eval_map(struct module *mod,
6203 struct trace_eval_map **start, int len)
6205 struct trace_eval_map **map;
6212 trace_event_eval_update(map, len);
6214 trace_insert_eval_map_file(mod, start, len);
6218 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6219 size_t cnt, loff_t *ppos)
6221 struct trace_array *tr = filp->private_data;
6222 char buf[MAX_TRACER_SIZE+2];
6225 mutex_lock(&trace_types_lock);
6226 r = sprintf(buf, "%s\n", tr->current_trace->name);
6227 mutex_unlock(&trace_types_lock);
6229 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6232 int tracer_init(struct tracer *t, struct trace_array *tr)
6234 tracing_reset_online_cpus(&tr->array_buffer);
6238 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6242 for_each_tracing_cpu(cpu)
6243 per_cpu_ptr(buf->data, cpu)->entries = val;
6246 #ifdef CONFIG_TRACER_MAX_TRACE
6247 /* resize @tr's buffer to the size of @size_tr's entries */
6248 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6249 struct array_buffer *size_buf, int cpu_id)
6253 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6254 for_each_tracing_cpu(cpu) {
6255 ret = ring_buffer_resize(trace_buf->buffer,
6256 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6259 per_cpu_ptr(trace_buf->data, cpu)->entries =
6260 per_cpu_ptr(size_buf->data, cpu)->entries;
6263 ret = ring_buffer_resize(trace_buf->buffer,
6264 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6266 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6267 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6272 #endif /* CONFIG_TRACER_MAX_TRACE */
6274 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6275 unsigned long size, int cpu)
6280 * If kernel or user changes the size of the ring buffer
6281 * we use the size that was given, and we can forget about
6282 * expanding it later.
6284 ring_buffer_expanded = true;
6286 /* May be called before buffers are initialized */
6287 if (!tr->array_buffer.buffer)
6290 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6294 #ifdef CONFIG_TRACER_MAX_TRACE
6295 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
6296 !tr->current_trace->use_max_tr)
6299 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6301 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6302 &tr->array_buffer, cpu);
6305 * AARGH! We are left with different
6306 * size max buffer!!!!
6307 * The max buffer is our "snapshot" buffer.
6308 * When a tracer needs a snapshot (one of the
6309 * latency tracers), it swaps the max buffer
6310 * with the saved snap shot. We succeeded to
6311 * update the size of the main buffer, but failed to
6312 * update the size of the max buffer. But when we tried
6313 * to reset the main buffer to the original size, we
6314 * failed there too. This is very unlikely to
6315 * happen, but if it does, warn and kill all
6319 tracing_disabled = 1;
6324 if (cpu == RING_BUFFER_ALL_CPUS)
6325 set_buffer_entries(&tr->max_buffer, size);
6327 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
6330 #endif /* CONFIG_TRACER_MAX_TRACE */
6332 if (cpu == RING_BUFFER_ALL_CPUS)
6333 set_buffer_entries(&tr->array_buffer, size);
6335 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
6340 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6341 unsigned long size, int cpu_id)
6345 mutex_lock(&trace_types_lock);
6347 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6348 /* make sure, this cpu is enabled in the mask */
6349 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6355 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6360 mutex_unlock(&trace_types_lock);
6367 * tracing_update_buffers - used by tracing facility to expand ring buffers
6369 * To save on memory when the tracing is never used on a system with it
6370 * configured in. The ring buffers are set to a minimum size. But once
6371 * a user starts to use the tracing facility, then they need to grow
6372 * to their default size.
6374 * This function is to be called when a tracer is about to be used.
6376 int tracing_update_buffers(void)
6380 mutex_lock(&trace_types_lock);
6381 if (!ring_buffer_expanded)
6382 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6383 RING_BUFFER_ALL_CPUS);
6384 mutex_unlock(&trace_types_lock);
6389 struct trace_option_dentry;
6392 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6395 * Used to clear out the tracer before deletion of an instance.
6396 * Must have trace_types_lock held.
6398 static void tracing_set_nop(struct trace_array *tr)
6400 if (tr->current_trace == &nop_trace)
6403 tr->current_trace->enabled--;
6405 if (tr->current_trace->reset)
6406 tr->current_trace->reset(tr);
6408 tr->current_trace = &nop_trace;
6411 static bool tracer_options_updated;
6413 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6415 /* Only enable if the directory has been created already. */
6419 /* Only create trace option files after update_tracer_options finish */
6420 if (!tracer_options_updated)
6423 create_trace_option_files(tr, t);
6426 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6429 #ifdef CONFIG_TRACER_MAX_TRACE
6434 mutex_lock(&trace_types_lock);
6436 if (!ring_buffer_expanded) {
6437 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6438 RING_BUFFER_ALL_CPUS);
6444 for (t = trace_types; t; t = t->next) {
6445 if (strcmp(t->name, buf) == 0)
6452 if (t == tr->current_trace)
6455 #ifdef CONFIG_TRACER_SNAPSHOT
6456 if (t->use_max_tr) {
6457 local_irq_disable();
6458 arch_spin_lock(&tr->max_lock);
6459 if (tr->cond_snapshot)
6461 arch_spin_unlock(&tr->max_lock);
6467 /* Some tracers won't work on kernel command line */
6468 if (system_state < SYSTEM_RUNNING && t->noboot) {
6469 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6474 /* Some tracers are only allowed for the top level buffer */
6475 if (!trace_ok_for_array(t, tr)) {
6480 /* If trace pipe files are being read, we can't change the tracer */
6481 if (tr->trace_ref) {
6486 trace_branch_disable();
6488 tr->current_trace->enabled--;
6490 if (tr->current_trace->reset)
6491 tr->current_trace->reset(tr);
6493 #ifdef CONFIG_TRACER_MAX_TRACE
6494 had_max_tr = tr->current_trace->use_max_tr;
6496 /* Current trace needs to be nop_trace before synchronize_rcu */
6497 tr->current_trace = &nop_trace;
6499 if (had_max_tr && !t->use_max_tr) {
6501 * We need to make sure that the update_max_tr sees that
6502 * current_trace changed to nop_trace to keep it from
6503 * swapping the buffers after we resize it.
6504 * The update_max_tr is called from interrupts disabled
6505 * so a synchronized_sched() is sufficient.
6511 if (t->use_max_tr && !tr->allocated_snapshot) {
6512 ret = tracing_alloc_snapshot_instance(tr);
6517 tr->current_trace = &nop_trace;
6521 ret = tracer_init(t, tr);
6526 tr->current_trace = t;
6527 tr->current_trace->enabled++;
6528 trace_branch_enable(tr);
6530 mutex_unlock(&trace_types_lock);
6536 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6537 size_t cnt, loff_t *ppos)
6539 struct trace_array *tr = filp->private_data;
6540 char buf[MAX_TRACER_SIZE+1];
6547 if (cnt > MAX_TRACER_SIZE)
6548 cnt = MAX_TRACER_SIZE;
6550 if (copy_from_user(buf, ubuf, cnt))
6557 err = tracing_set_tracer(tr, name);
6567 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6568 size_t cnt, loff_t *ppos)
6573 r = snprintf(buf, sizeof(buf), "%ld\n",
6574 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6575 if (r > sizeof(buf))
6577 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6581 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6582 size_t cnt, loff_t *ppos)
6587 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6597 tracing_thresh_read(struct file *filp, char __user *ubuf,
6598 size_t cnt, loff_t *ppos)
6600 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6604 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6605 size_t cnt, loff_t *ppos)
6607 struct trace_array *tr = filp->private_data;
6610 mutex_lock(&trace_types_lock);
6611 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6615 if (tr->current_trace->update_thresh) {
6616 ret = tr->current_trace->update_thresh(tr);
6623 mutex_unlock(&trace_types_lock);
6628 #ifdef CONFIG_TRACER_MAX_TRACE
6631 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6632 size_t cnt, loff_t *ppos)
6634 struct trace_array *tr = filp->private_data;
6636 return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
6640 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6641 size_t cnt, loff_t *ppos)
6643 struct trace_array *tr = filp->private_data;
6645 return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
6650 static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
6652 if (cpu == RING_BUFFER_ALL_CPUS) {
6653 if (cpumask_empty(tr->pipe_cpumask)) {
6654 cpumask_setall(tr->pipe_cpumask);
6657 } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
6658 cpumask_set_cpu(cpu, tr->pipe_cpumask);
6664 static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
6666 if (cpu == RING_BUFFER_ALL_CPUS) {
6667 WARN_ON(!cpumask_full(tr->pipe_cpumask));
6668 cpumask_clear(tr->pipe_cpumask);
6670 WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
6671 cpumask_clear_cpu(cpu, tr->pipe_cpumask);
6675 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6677 struct trace_array *tr = inode->i_private;
6678 struct trace_iterator *iter;
6682 ret = tracing_check_open_get_tr(tr);
6686 mutex_lock(&trace_types_lock);
6687 cpu = tracing_get_cpu(inode);
6688 ret = open_pipe_on_cpu(tr, cpu);
6690 goto fail_pipe_on_cpu;
6692 /* create a buffer to store the information to pass to userspace */
6693 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6696 goto fail_alloc_iter;
6699 trace_seq_init(&iter->seq);
6700 iter->trace = tr->current_trace;
6702 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6707 /* trace pipe does not show start of buffer */
6708 cpumask_setall(iter->started);
6710 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6711 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6713 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6714 if (trace_clocks[tr->clock_id].in_ns)
6715 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6718 iter->array_buffer = &tr->array_buffer;
6719 iter->cpu_file = cpu;
6720 mutex_init(&iter->mutex);
6721 filp->private_data = iter;
6723 if (iter->trace->pipe_open)
6724 iter->trace->pipe_open(iter);
6726 nonseekable_open(inode, filp);
6730 mutex_unlock(&trace_types_lock);
6736 close_pipe_on_cpu(tr, cpu);
6738 __trace_array_put(tr);
6739 mutex_unlock(&trace_types_lock);
6743 static int tracing_release_pipe(struct inode *inode, struct file *file)
6745 struct trace_iterator *iter = file->private_data;
6746 struct trace_array *tr = inode->i_private;
6748 mutex_lock(&trace_types_lock);
6752 if (iter->trace->pipe_close)
6753 iter->trace->pipe_close(iter);
6754 close_pipe_on_cpu(tr, iter->cpu_file);
6755 mutex_unlock(&trace_types_lock);
6757 free_cpumask_var(iter->started);
6760 mutex_destroy(&iter->mutex);
6763 trace_array_put(tr);
6769 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6771 struct trace_array *tr = iter->tr;
6773 /* Iterators are static, they should be filled or empty */
6774 if (trace_buffer_iter(iter, iter->cpu_file))
6775 return EPOLLIN | EPOLLRDNORM;
6777 if (tr->trace_flags & TRACE_ITER_BLOCK)
6779 * Always select as readable when in blocking mode
6781 return EPOLLIN | EPOLLRDNORM;
6783 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6784 filp, poll_table, iter->tr->buffer_percent);
6788 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6790 struct trace_iterator *iter = filp->private_data;
6792 return trace_poll(iter, filp, poll_table);
6795 /* Must be called with iter->mutex held. */
6796 static int tracing_wait_pipe(struct file *filp)
6798 struct trace_iterator *iter = filp->private_data;
6801 while (trace_empty(iter)) {
6803 if ((filp->f_flags & O_NONBLOCK)) {
6808 * We block until we read something and tracing is disabled.
6809 * We still block if tracing is disabled, but we have never
6810 * read anything. This allows a user to cat this file, and
6811 * then enable tracing. But after we have read something,
6812 * we give an EOF when tracing is again disabled.
6814 * iter->pos will be 0 if we haven't read anything.
6816 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6819 mutex_unlock(&iter->mutex);
6821 ret = wait_on_pipe(iter, 0);
6823 mutex_lock(&iter->mutex);
6836 tracing_read_pipe(struct file *filp, char __user *ubuf,
6837 size_t cnt, loff_t *ppos)
6839 struct trace_iterator *iter = filp->private_data;
6843 * Avoid more than one consumer on a single file descriptor
6844 * This is just a matter of traces coherency, the ring buffer itself
6847 mutex_lock(&iter->mutex);
6849 /* return any leftover data */
6850 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6854 trace_seq_init(&iter->seq);
6856 if (iter->trace->read) {
6857 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6863 sret = tracing_wait_pipe(filp);
6867 /* stop when tracing is finished */
6868 if (trace_empty(iter)) {
6873 if (cnt >= PAGE_SIZE)
6874 cnt = PAGE_SIZE - 1;
6876 /* reset all but tr, trace, and overruns */
6877 trace_iterator_reset(iter);
6878 cpumask_clear(iter->started);
6879 trace_seq_init(&iter->seq);
6881 trace_event_read_lock();
6882 trace_access_lock(iter->cpu_file);
6883 while (trace_find_next_entry_inc(iter) != NULL) {
6884 enum print_line_t ret;
6885 int save_len = iter->seq.seq.len;
6887 ret = print_trace_line(iter);
6888 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6890 * If one print_trace_line() fills entire trace_seq in one shot,
6891 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6892 * In this case, we need to consume it, otherwise, loop will peek
6893 * this event next time, resulting in an infinite loop.
6895 if (save_len == 0) {
6897 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6898 trace_consume(iter);
6902 /* In other cases, don't print partial lines */
6903 iter->seq.seq.len = save_len;
6906 if (ret != TRACE_TYPE_NO_CONSUME)
6907 trace_consume(iter);
6909 if (trace_seq_used(&iter->seq) >= cnt)
6913 * Setting the full flag means we reached the trace_seq buffer
6914 * size and we should leave by partial output condition above.
6915 * One of the trace_seq_* functions is not used properly.
6917 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6920 trace_access_unlock(iter->cpu_file);
6921 trace_event_read_unlock();
6923 /* Now copy what we have to the user */
6924 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6925 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6926 trace_seq_init(&iter->seq);
6929 * If there was nothing to send to user, in spite of consuming trace
6930 * entries, go back to wait for more entries.
6936 mutex_unlock(&iter->mutex);
6941 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6944 __free_page(spd->pages[idx]);
6948 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6954 /* Seq buffer is page-sized, exactly what we need. */
6956 save_len = iter->seq.seq.len;
6957 ret = print_trace_line(iter);
6959 if (trace_seq_has_overflowed(&iter->seq)) {
6960 iter->seq.seq.len = save_len;
6965 * This should not be hit, because it should only
6966 * be set if the iter->seq overflowed. But check it
6967 * anyway to be safe.
6969 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6970 iter->seq.seq.len = save_len;
6974 count = trace_seq_used(&iter->seq) - save_len;
6977 iter->seq.seq.len = save_len;
6981 if (ret != TRACE_TYPE_NO_CONSUME)
6982 trace_consume(iter);
6984 if (!trace_find_next_entry_inc(iter)) {
6994 static ssize_t tracing_splice_read_pipe(struct file *filp,
6996 struct pipe_inode_info *pipe,
7000 struct page *pages_def[PIPE_DEF_BUFFERS];
7001 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7002 struct trace_iterator *iter = filp->private_data;
7003 struct splice_pipe_desc spd = {
7005 .partial = partial_def,
7006 .nr_pages = 0, /* This gets updated below. */
7007 .nr_pages_max = PIPE_DEF_BUFFERS,
7008 .ops = &default_pipe_buf_ops,
7009 .spd_release = tracing_spd_release_pipe,
7015 if (splice_grow_spd(pipe, &spd))
7018 mutex_lock(&iter->mutex);
7020 if (iter->trace->splice_read) {
7021 ret = iter->trace->splice_read(iter, filp,
7022 ppos, pipe, len, flags);
7027 ret = tracing_wait_pipe(filp);
7031 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
7036 trace_event_read_lock();
7037 trace_access_lock(iter->cpu_file);
7039 /* Fill as many pages as possible. */
7040 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
7041 spd.pages[i] = alloc_page(GFP_KERNEL);
7045 rem = tracing_fill_pipe_page(rem, iter);
7047 /* Copy the data into the page, so we can start over. */
7048 ret = trace_seq_to_buffer(&iter->seq,
7049 page_address(spd.pages[i]),
7050 trace_seq_used(&iter->seq));
7052 __free_page(spd.pages[i]);
7055 spd.partial[i].offset = 0;
7056 spd.partial[i].len = trace_seq_used(&iter->seq);
7058 trace_seq_init(&iter->seq);
7061 trace_access_unlock(iter->cpu_file);
7062 trace_event_read_unlock();
7063 mutex_unlock(&iter->mutex);
7068 ret = splice_to_pipe(pipe, &spd);
7072 splice_shrink_spd(&spd);
7076 mutex_unlock(&iter->mutex);
7081 tracing_entries_read(struct file *filp, char __user *ubuf,
7082 size_t cnt, loff_t *ppos)
7084 struct inode *inode = file_inode(filp);
7085 struct trace_array *tr = inode->i_private;
7086 int cpu = tracing_get_cpu(inode);
7091 mutex_lock(&trace_types_lock);
7093 if (cpu == RING_BUFFER_ALL_CPUS) {
7094 int cpu, buf_size_same;
7099 /* check if all cpu sizes are same */
7100 for_each_tracing_cpu(cpu) {
7101 /* fill in the size from first enabled cpu */
7103 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
7104 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
7110 if (buf_size_same) {
7111 if (!ring_buffer_expanded)
7112 r = sprintf(buf, "%lu (expanded: %lu)\n",
7114 trace_buf_size >> 10);
7116 r = sprintf(buf, "%lu\n", size >> 10);
7118 r = sprintf(buf, "X\n");
7120 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
7122 mutex_unlock(&trace_types_lock);
7124 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7129 tracing_entries_write(struct file *filp, const char __user *ubuf,
7130 size_t cnt, loff_t *ppos)
7132 struct inode *inode = file_inode(filp);
7133 struct trace_array *tr = inode->i_private;
7137 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7141 /* must have at least 1 entry */
7145 /* value is in KB */
7147 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
7157 tracing_total_entries_read(struct file *filp, char __user *ubuf,
7158 size_t cnt, loff_t *ppos)
7160 struct trace_array *tr = filp->private_data;
7163 unsigned long size = 0, expanded_size = 0;
7165 mutex_lock(&trace_types_lock);
7166 for_each_tracing_cpu(cpu) {
7167 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7168 if (!ring_buffer_expanded)
7169 expanded_size += trace_buf_size >> 10;
7171 if (ring_buffer_expanded)
7172 r = sprintf(buf, "%lu\n", size);
7174 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
7175 mutex_unlock(&trace_types_lock);
7177 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7181 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7182 size_t cnt, loff_t *ppos)
7185 * There is no need to read what the user has written, this function
7186 * is just to make sure that there is no error when "echo" is used
7195 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7197 struct trace_array *tr = inode->i_private;
7199 /* disable tracing ? */
7200 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7201 tracer_tracing_off(tr);
7202 /* resize the ring buffer to 0 */
7203 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7205 trace_array_put(tr);
7211 tracing_mark_write(struct file *filp, const char __user *ubuf,
7212 size_t cnt, loff_t *fpos)
7214 struct trace_array *tr = filp->private_data;
7215 struct ring_buffer_event *event;
7216 enum event_trigger_type tt = ETT_NONE;
7217 struct trace_buffer *buffer;
7218 struct print_entry *entry;
7223 /* Used in tracing_mark_raw_write() as well */
7224 #define FAULTED_STR "<faulted>"
7225 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7227 if (tracing_disabled)
7230 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7233 if (cnt > TRACE_BUF_SIZE)
7234 cnt = TRACE_BUF_SIZE;
7236 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7238 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7240 /* If less than "<faulted>", then make sure we can still add that */
7241 if (cnt < FAULTED_SIZE)
7242 size += FAULTED_SIZE - cnt;
7244 buffer = tr->array_buffer.buffer;
7245 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7247 if (unlikely(!event))
7248 /* Ring buffer disabled, return as if not open for write */
7251 entry = ring_buffer_event_data(event);
7252 entry->ip = _THIS_IP_;
7254 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7256 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7262 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7263 /* do not add \n before testing triggers, but add \0 */
7264 entry->buf[cnt] = '\0';
7265 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7268 if (entry->buf[cnt - 1] != '\n') {
7269 entry->buf[cnt] = '\n';
7270 entry->buf[cnt + 1] = '\0';
7272 entry->buf[cnt] = '\0';
7274 if (static_branch_unlikely(&trace_marker_exports_enabled))
7275 ftrace_exports(event, TRACE_EXPORT_MARKER);
7276 __buffer_unlock_commit(buffer, event);
7279 event_triggers_post_call(tr->trace_marker_file, tt);
7284 /* Limit it for now to 3K (including tag) */
7285 #define RAW_DATA_MAX_SIZE (1024*3)
7288 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7289 size_t cnt, loff_t *fpos)
7291 struct trace_array *tr = filp->private_data;
7292 struct ring_buffer_event *event;
7293 struct trace_buffer *buffer;
7294 struct raw_data_entry *entry;
7299 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7301 if (tracing_disabled)
7304 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7307 /* The marker must at least have a tag id */
7308 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7311 if (cnt > TRACE_BUF_SIZE)
7312 cnt = TRACE_BUF_SIZE;
7314 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7316 size = sizeof(*entry) + cnt;
7317 if (cnt < FAULT_SIZE_ID)
7318 size += FAULT_SIZE_ID - cnt;
7320 buffer = tr->array_buffer.buffer;
7321 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7324 /* Ring buffer disabled, return as if not open for write */
7327 entry = ring_buffer_event_data(event);
7329 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7332 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7337 __buffer_unlock_commit(buffer, event);
7342 static int tracing_clock_show(struct seq_file *m, void *v)
7344 struct trace_array *tr = m->private;
7347 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7349 "%s%s%s%s", i ? " " : "",
7350 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7351 i == tr->clock_id ? "]" : "");
7357 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7361 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7362 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7365 if (i == ARRAY_SIZE(trace_clocks))
7368 mutex_lock(&trace_types_lock);
7372 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7375 * New clock may not be consistent with the previous clock.
7376 * Reset the buffer so that it doesn't have incomparable timestamps.
7378 tracing_reset_online_cpus(&tr->array_buffer);
7380 #ifdef CONFIG_TRACER_MAX_TRACE
7381 if (tr->max_buffer.buffer)
7382 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7383 tracing_reset_online_cpus(&tr->max_buffer);
7386 mutex_unlock(&trace_types_lock);
7391 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7392 size_t cnt, loff_t *fpos)
7394 struct seq_file *m = filp->private_data;
7395 struct trace_array *tr = m->private;
7397 const char *clockstr;
7400 if (cnt >= sizeof(buf))
7403 if (copy_from_user(buf, ubuf, cnt))
7408 clockstr = strstrip(buf);
7410 ret = tracing_set_clock(tr, clockstr);
7419 static int tracing_clock_open(struct inode *inode, struct file *file)
7421 struct trace_array *tr = inode->i_private;
7424 ret = tracing_check_open_get_tr(tr);
7428 ret = single_open(file, tracing_clock_show, inode->i_private);
7430 trace_array_put(tr);
7435 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7437 struct trace_array *tr = m->private;
7439 mutex_lock(&trace_types_lock);
7441 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7442 seq_puts(m, "delta [absolute]\n");
7444 seq_puts(m, "[delta] absolute\n");
7446 mutex_unlock(&trace_types_lock);
7451 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7453 struct trace_array *tr = inode->i_private;
7456 ret = tracing_check_open_get_tr(tr);
7460 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7462 trace_array_put(tr);
7467 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7469 if (rbe == this_cpu_read(trace_buffered_event))
7470 return ring_buffer_time_stamp(buffer);
7472 return ring_buffer_event_time_stamp(buffer, rbe);
7476 * Set or disable using the per CPU trace_buffer_event when possible.
7478 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7482 mutex_lock(&trace_types_lock);
7484 if (set && tr->no_filter_buffering_ref++)
7488 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7493 --tr->no_filter_buffering_ref;
7496 mutex_unlock(&trace_types_lock);
7501 struct ftrace_buffer_info {
7502 struct trace_iterator iter;
7504 unsigned int spare_cpu;
7508 #ifdef CONFIG_TRACER_SNAPSHOT
7509 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7511 struct trace_array *tr = inode->i_private;
7512 struct trace_iterator *iter;
7516 ret = tracing_check_open_get_tr(tr);
7520 if (file->f_mode & FMODE_READ) {
7521 iter = __tracing_open(inode, file, true);
7523 ret = PTR_ERR(iter);
7525 /* Writes still need the seq_file to hold the private data */
7527 m = kzalloc(sizeof(*m), GFP_KERNEL);
7530 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7538 iter->array_buffer = &tr->max_buffer;
7539 iter->cpu_file = tracing_get_cpu(inode);
7541 file->private_data = m;
7545 trace_array_put(tr);
7550 static void tracing_swap_cpu_buffer(void *tr)
7552 update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
7556 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7559 struct seq_file *m = filp->private_data;
7560 struct trace_iterator *iter = m->private;
7561 struct trace_array *tr = iter->tr;
7565 ret = tracing_update_buffers();
7569 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7573 mutex_lock(&trace_types_lock);
7575 if (tr->current_trace->use_max_tr) {
7580 local_irq_disable();
7581 arch_spin_lock(&tr->max_lock);
7582 if (tr->cond_snapshot)
7584 arch_spin_unlock(&tr->max_lock);
7591 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7595 if (tr->allocated_snapshot)
7599 /* Only allow per-cpu swap if the ring buffer supports it */
7600 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7601 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7606 if (tr->allocated_snapshot)
7607 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7608 &tr->array_buffer, iter->cpu_file);
7610 ret = tracing_alloc_snapshot_instance(tr);
7613 /* Now, we're going to swap */
7614 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
7615 local_irq_disable();
7616 update_max_tr(tr, current, smp_processor_id(), NULL);
7619 smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
7624 if (tr->allocated_snapshot) {
7625 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7626 tracing_reset_online_cpus(&tr->max_buffer);
7628 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7638 mutex_unlock(&trace_types_lock);
7642 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7644 struct seq_file *m = file->private_data;
7647 ret = tracing_release(inode, file);
7649 if (file->f_mode & FMODE_READ)
7652 /* If write only, the seq_file is just a stub */
7660 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7661 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7662 size_t count, loff_t *ppos);
7663 static int tracing_buffers_release(struct inode *inode, struct file *file);
7664 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7665 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7667 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7669 struct ftrace_buffer_info *info;
7672 /* The following checks for tracefs lockdown */
7673 ret = tracing_buffers_open(inode, filp);
7677 info = filp->private_data;
7679 if (info->iter.trace->use_max_tr) {
7680 tracing_buffers_release(inode, filp);
7684 info->iter.snapshot = true;
7685 info->iter.array_buffer = &info->iter.tr->max_buffer;
7690 #endif /* CONFIG_TRACER_SNAPSHOT */
7693 static const struct file_operations tracing_thresh_fops = {
7694 .open = tracing_open_generic,
7695 .read = tracing_thresh_read,
7696 .write = tracing_thresh_write,
7697 .llseek = generic_file_llseek,
7700 #ifdef CONFIG_TRACER_MAX_TRACE
7701 static const struct file_operations tracing_max_lat_fops = {
7702 .open = tracing_open_generic_tr,
7703 .read = tracing_max_lat_read,
7704 .write = tracing_max_lat_write,
7705 .llseek = generic_file_llseek,
7706 .release = tracing_release_generic_tr,
7710 static const struct file_operations set_tracer_fops = {
7711 .open = tracing_open_generic,
7712 .read = tracing_set_trace_read,
7713 .write = tracing_set_trace_write,
7714 .llseek = generic_file_llseek,
7717 static const struct file_operations tracing_pipe_fops = {
7718 .open = tracing_open_pipe,
7719 .poll = tracing_poll_pipe,
7720 .read = tracing_read_pipe,
7721 .splice_read = tracing_splice_read_pipe,
7722 .release = tracing_release_pipe,
7723 .llseek = no_llseek,
7726 static const struct file_operations tracing_entries_fops = {
7727 .open = tracing_open_generic_tr,
7728 .read = tracing_entries_read,
7729 .write = tracing_entries_write,
7730 .llseek = generic_file_llseek,
7731 .release = tracing_release_generic_tr,
7734 static const struct file_operations tracing_total_entries_fops = {
7735 .open = tracing_open_generic_tr,
7736 .read = tracing_total_entries_read,
7737 .llseek = generic_file_llseek,
7738 .release = tracing_release_generic_tr,
7741 static const struct file_operations tracing_free_buffer_fops = {
7742 .open = tracing_open_generic_tr,
7743 .write = tracing_free_buffer_write,
7744 .release = tracing_free_buffer_release,
7747 static const struct file_operations tracing_mark_fops = {
7748 .open = tracing_mark_open,
7749 .write = tracing_mark_write,
7750 .release = tracing_release_generic_tr,
7753 static const struct file_operations tracing_mark_raw_fops = {
7754 .open = tracing_mark_open,
7755 .write = tracing_mark_raw_write,
7756 .release = tracing_release_generic_tr,
7759 static const struct file_operations trace_clock_fops = {
7760 .open = tracing_clock_open,
7762 .llseek = seq_lseek,
7763 .release = tracing_single_release_tr,
7764 .write = tracing_clock_write,
7767 static const struct file_operations trace_time_stamp_mode_fops = {
7768 .open = tracing_time_stamp_mode_open,
7770 .llseek = seq_lseek,
7771 .release = tracing_single_release_tr,
7774 #ifdef CONFIG_TRACER_SNAPSHOT
7775 static const struct file_operations snapshot_fops = {
7776 .open = tracing_snapshot_open,
7778 .write = tracing_snapshot_write,
7779 .llseek = tracing_lseek,
7780 .release = tracing_snapshot_release,
7783 static const struct file_operations snapshot_raw_fops = {
7784 .open = snapshot_raw_open,
7785 .read = tracing_buffers_read,
7786 .release = tracing_buffers_release,
7787 .splice_read = tracing_buffers_splice_read,
7788 .llseek = no_llseek,
7791 #endif /* CONFIG_TRACER_SNAPSHOT */
7794 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7795 * @filp: The active open file structure
7796 * @ubuf: The userspace provided buffer to read value into
7797 * @cnt: The maximum number of bytes to read
7798 * @ppos: The current "file" position
7800 * This function implements the write interface for a struct trace_min_max_param.
7801 * The filp->private_data must point to a trace_min_max_param structure that
7802 * defines where to write the value, the min and the max acceptable values,
7803 * and a lock to protect the write.
7806 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7808 struct trace_min_max_param *param = filp->private_data;
7815 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7820 mutex_lock(param->lock);
7822 if (param->min && val < *param->min)
7825 if (param->max && val > *param->max)
7832 mutex_unlock(param->lock);
7841 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7842 * @filp: The active open file structure
7843 * @ubuf: The userspace provided buffer to read value into
7844 * @cnt: The maximum number of bytes to read
7845 * @ppos: The current "file" position
7847 * This function implements the read interface for a struct trace_min_max_param.
7848 * The filp->private_data must point to a trace_min_max_param struct with valid
7852 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7854 struct trace_min_max_param *param = filp->private_data;
7855 char buf[U64_STR_SIZE];
7864 if (cnt > sizeof(buf))
7867 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7869 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7872 const struct file_operations trace_min_max_fops = {
7873 .open = tracing_open_generic,
7874 .read = trace_min_max_read,
7875 .write = trace_min_max_write,
7878 #define TRACING_LOG_ERRS_MAX 8
7879 #define TRACING_LOG_LOC_MAX 128
7881 #define CMD_PREFIX " Command: "
7884 const char **errs; /* ptr to loc-specific array of err strings */
7885 u8 type; /* index into errs -> specific err string */
7886 u16 pos; /* caret position */
7890 struct tracing_log_err {
7891 struct list_head list;
7892 struct err_info info;
7893 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7894 char *cmd; /* what caused err */
7897 static DEFINE_MUTEX(tracing_err_log_lock);
7899 static struct tracing_log_err *alloc_tracing_log_err(int len)
7901 struct tracing_log_err *err;
7903 err = kzalloc(sizeof(*err), GFP_KERNEL);
7905 return ERR_PTR(-ENOMEM);
7907 err->cmd = kzalloc(len, GFP_KERNEL);
7910 return ERR_PTR(-ENOMEM);
7916 static void free_tracing_log_err(struct tracing_log_err *err)
7922 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
7925 struct tracing_log_err *err;
7928 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7929 err = alloc_tracing_log_err(len);
7930 if (PTR_ERR(err) != -ENOMEM)
7931 tr->n_err_log_entries++;
7935 cmd = kzalloc(len, GFP_KERNEL);
7937 return ERR_PTR(-ENOMEM);
7938 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7941 list_del(&err->list);
7947 * err_pos - find the position of a string within a command for error careting
7948 * @cmd: The tracing command that caused the error
7949 * @str: The string to position the caret at within @cmd
7951 * Finds the position of the first occurrence of @str within @cmd. The
7952 * return value can be passed to tracing_log_err() for caret placement
7955 * Returns the index within @cmd of the first occurrence of @str or 0
7956 * if @str was not found.
7958 unsigned int err_pos(char *cmd, const char *str)
7962 if (WARN_ON(!strlen(cmd)))
7965 found = strstr(cmd, str);
7973 * tracing_log_err - write an error to the tracing error log
7974 * @tr: The associated trace array for the error (NULL for top level array)
7975 * @loc: A string describing where the error occurred
7976 * @cmd: The tracing command that caused the error
7977 * @errs: The array of loc-specific static error strings
7978 * @type: The index into errs[], which produces the specific static err string
7979 * @pos: The position the caret should be placed in the cmd
7981 * Writes an error into tracing/error_log of the form:
7983 * <loc>: error: <text>
7987 * tracing/error_log is a small log file containing the last
7988 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7989 * unless there has been a tracing error, and the error log can be
7990 * cleared and have its memory freed by writing the empty string in
7991 * truncation mode to it i.e. echo > tracing/error_log.
7993 * NOTE: the @errs array along with the @type param are used to
7994 * produce a static error string - this string is not copied and saved
7995 * when the error is logged - only a pointer to it is saved. See
7996 * existing callers for examples of how static strings are typically
7997 * defined for use with tracing_log_err().
7999 void tracing_log_err(struct trace_array *tr,
8000 const char *loc, const char *cmd,
8001 const char **errs, u8 type, u16 pos)
8003 struct tracing_log_err *err;
8009 len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
8011 mutex_lock(&tracing_err_log_lock);
8012 err = get_tracing_log_err(tr, len);
8013 if (PTR_ERR(err) == -ENOMEM) {
8014 mutex_unlock(&tracing_err_log_lock);
8018 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
8019 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
8021 err->info.errs = errs;
8022 err->info.type = type;
8023 err->info.pos = pos;
8024 err->info.ts = local_clock();
8026 list_add_tail(&err->list, &tr->err_log);
8027 mutex_unlock(&tracing_err_log_lock);
8030 static void clear_tracing_err_log(struct trace_array *tr)
8032 struct tracing_log_err *err, *next;
8034 mutex_lock(&tracing_err_log_lock);
8035 list_for_each_entry_safe(err, next, &tr->err_log, list) {
8036 list_del(&err->list);
8037 free_tracing_log_err(err);
8040 tr->n_err_log_entries = 0;
8041 mutex_unlock(&tracing_err_log_lock);
8044 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
8046 struct trace_array *tr = m->private;
8048 mutex_lock(&tracing_err_log_lock);
8050 return seq_list_start(&tr->err_log, *pos);
8053 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
8055 struct trace_array *tr = m->private;
8057 return seq_list_next(v, &tr->err_log, pos);
8060 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
8062 mutex_unlock(&tracing_err_log_lock);
8065 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
8069 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
8071 for (i = 0; i < pos; i++)
8076 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
8078 struct tracing_log_err *err = v;
8081 const char *err_text = err->info.errs[err->info.type];
8082 u64 sec = err->info.ts;
8085 nsec = do_div(sec, NSEC_PER_SEC);
8086 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
8087 err->loc, err_text);
8088 seq_printf(m, "%s", err->cmd);
8089 tracing_err_log_show_pos(m, err->info.pos);
8095 static const struct seq_operations tracing_err_log_seq_ops = {
8096 .start = tracing_err_log_seq_start,
8097 .next = tracing_err_log_seq_next,
8098 .stop = tracing_err_log_seq_stop,
8099 .show = tracing_err_log_seq_show
8102 static int tracing_err_log_open(struct inode *inode, struct file *file)
8104 struct trace_array *tr = inode->i_private;
8107 ret = tracing_check_open_get_tr(tr);
8111 /* If this file was opened for write, then erase contents */
8112 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
8113 clear_tracing_err_log(tr);
8115 if (file->f_mode & FMODE_READ) {
8116 ret = seq_open(file, &tracing_err_log_seq_ops);
8118 struct seq_file *m = file->private_data;
8121 trace_array_put(tr);
8127 static ssize_t tracing_err_log_write(struct file *file,
8128 const char __user *buffer,
8129 size_t count, loff_t *ppos)
8134 static int tracing_err_log_release(struct inode *inode, struct file *file)
8136 struct trace_array *tr = inode->i_private;
8138 trace_array_put(tr);
8140 if (file->f_mode & FMODE_READ)
8141 seq_release(inode, file);
8146 static const struct file_operations tracing_err_log_fops = {
8147 .open = tracing_err_log_open,
8148 .write = tracing_err_log_write,
8150 .llseek = tracing_lseek,
8151 .release = tracing_err_log_release,
8154 static int tracing_buffers_open(struct inode *inode, struct file *filp)
8156 struct trace_array *tr = inode->i_private;
8157 struct ftrace_buffer_info *info;
8160 ret = tracing_check_open_get_tr(tr);
8164 info = kvzalloc(sizeof(*info), GFP_KERNEL);
8166 trace_array_put(tr);
8170 mutex_lock(&trace_types_lock);
8173 info->iter.cpu_file = tracing_get_cpu(inode);
8174 info->iter.trace = tr->current_trace;
8175 info->iter.array_buffer = &tr->array_buffer;
8177 /* Force reading ring buffer for first read */
8178 info->read = (unsigned int)-1;
8180 filp->private_data = info;
8184 mutex_unlock(&trace_types_lock);
8186 ret = nonseekable_open(inode, filp);
8188 trace_array_put(tr);
8194 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8196 struct ftrace_buffer_info *info = filp->private_data;
8197 struct trace_iterator *iter = &info->iter;
8199 return trace_poll(iter, filp, poll_table);
8203 tracing_buffers_read(struct file *filp, char __user *ubuf,
8204 size_t count, loff_t *ppos)
8206 struct ftrace_buffer_info *info = filp->private_data;
8207 struct trace_iterator *iter = &info->iter;
8214 #ifdef CONFIG_TRACER_MAX_TRACE
8215 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8220 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8222 if (IS_ERR(info->spare)) {
8223 ret = PTR_ERR(info->spare);
8226 info->spare_cpu = iter->cpu_file;
8232 /* Do we have previous read data to read? */
8233 if (info->read < PAGE_SIZE)
8237 trace_access_lock(iter->cpu_file);
8238 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8242 trace_access_unlock(iter->cpu_file);
8245 if (trace_empty(iter)) {
8246 if ((filp->f_flags & O_NONBLOCK))
8249 ret = wait_on_pipe(iter, 0);
8260 size = PAGE_SIZE - info->read;
8264 ret = copy_to_user(ubuf, info->spare + info->read, size);
8276 static int tracing_buffers_release(struct inode *inode, struct file *file)
8278 struct ftrace_buffer_info *info = file->private_data;
8279 struct trace_iterator *iter = &info->iter;
8281 mutex_lock(&trace_types_lock);
8283 iter->tr->trace_ref--;
8285 __trace_array_put(iter->tr);
8288 /* Make sure the waiters see the new wait_index */
8291 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8294 ring_buffer_free_read_page(iter->array_buffer->buffer,
8295 info->spare_cpu, info->spare);
8298 mutex_unlock(&trace_types_lock);
8304 struct trace_buffer *buffer;
8307 refcount_t refcount;
8310 static void buffer_ref_release(struct buffer_ref *ref)
8312 if (!refcount_dec_and_test(&ref->refcount))
8314 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8318 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8319 struct pipe_buffer *buf)
8321 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8323 buffer_ref_release(ref);
8327 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8328 struct pipe_buffer *buf)
8330 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8332 if (refcount_read(&ref->refcount) > INT_MAX/2)
8335 refcount_inc(&ref->refcount);
8339 /* Pipe buffer operations for a buffer. */
8340 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8341 .release = buffer_pipe_buf_release,
8342 .get = buffer_pipe_buf_get,
8346 * Callback from splice_to_pipe(), if we need to release some pages
8347 * at the end of the spd in case we error'ed out in filling the pipe.
8349 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8351 struct buffer_ref *ref =
8352 (struct buffer_ref *)spd->partial[i].private;
8354 buffer_ref_release(ref);
8355 spd->partial[i].private = 0;
8359 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8360 struct pipe_inode_info *pipe, size_t len,
8363 struct ftrace_buffer_info *info = file->private_data;
8364 struct trace_iterator *iter = &info->iter;
8365 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8366 struct page *pages_def[PIPE_DEF_BUFFERS];
8367 struct splice_pipe_desc spd = {
8369 .partial = partial_def,
8370 .nr_pages_max = PIPE_DEF_BUFFERS,
8371 .ops = &buffer_pipe_buf_ops,
8372 .spd_release = buffer_spd_release,
8374 struct buffer_ref *ref;
8378 #ifdef CONFIG_TRACER_MAX_TRACE
8379 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8383 if (*ppos & (PAGE_SIZE - 1))
8386 if (len & (PAGE_SIZE - 1)) {
8387 if (len < PAGE_SIZE)
8392 if (splice_grow_spd(pipe, &spd))
8396 trace_access_lock(iter->cpu_file);
8397 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8399 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8403 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8409 refcount_set(&ref->refcount, 1);
8410 ref->buffer = iter->array_buffer->buffer;
8411 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8412 if (IS_ERR(ref->page)) {
8413 ret = PTR_ERR(ref->page);
8418 ref->cpu = iter->cpu_file;
8420 r = ring_buffer_read_page(ref->buffer, &ref->page,
8421 len, iter->cpu_file, 1);
8423 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8429 page = virt_to_page(ref->page);
8431 spd.pages[i] = page;
8432 spd.partial[i].len = PAGE_SIZE;
8433 spd.partial[i].offset = 0;
8434 spd.partial[i].private = (unsigned long)ref;
8438 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8441 trace_access_unlock(iter->cpu_file);
8444 /* did we read anything? */
8445 if (!spd.nr_pages) {
8452 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8455 wait_index = READ_ONCE(iter->wait_index);
8457 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8461 /* No need to wait after waking up when tracing is off */
8462 if (!tracer_tracing_is_on(iter->tr))
8465 /* Make sure we see the new wait_index */
8467 if (wait_index != iter->wait_index)
8473 ret = splice_to_pipe(pipe, &spd);
8475 splice_shrink_spd(&spd);
8480 /* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */
8481 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8483 struct ftrace_buffer_info *info = file->private_data;
8484 struct trace_iterator *iter = &info->iter;
8487 return -ENOIOCTLCMD;
8489 mutex_lock(&trace_types_lock);
8492 /* Make sure the waiters see the new wait_index */
8495 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8497 mutex_unlock(&trace_types_lock);
8501 static const struct file_operations tracing_buffers_fops = {
8502 .open = tracing_buffers_open,
8503 .read = tracing_buffers_read,
8504 .poll = tracing_buffers_poll,
8505 .release = tracing_buffers_release,
8506 .splice_read = tracing_buffers_splice_read,
8507 .unlocked_ioctl = tracing_buffers_ioctl,
8508 .llseek = no_llseek,
8512 tracing_stats_read(struct file *filp, char __user *ubuf,
8513 size_t count, loff_t *ppos)
8515 struct inode *inode = file_inode(filp);
8516 struct trace_array *tr = inode->i_private;
8517 struct array_buffer *trace_buf = &tr->array_buffer;
8518 int cpu = tracing_get_cpu(inode);
8519 struct trace_seq *s;
8521 unsigned long long t;
8522 unsigned long usec_rem;
8524 s = kmalloc(sizeof(*s), GFP_KERNEL);
8530 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8531 trace_seq_printf(s, "entries: %ld\n", cnt);
8533 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8534 trace_seq_printf(s, "overrun: %ld\n", cnt);
8536 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8537 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8539 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8540 trace_seq_printf(s, "bytes: %ld\n", cnt);
8542 if (trace_clocks[tr->clock_id].in_ns) {
8543 /* local or global for trace_clock */
8544 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8545 usec_rem = do_div(t, USEC_PER_SEC);
8546 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8549 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8550 usec_rem = do_div(t, USEC_PER_SEC);
8551 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8553 /* counter or tsc mode for trace_clock */
8554 trace_seq_printf(s, "oldest event ts: %llu\n",
8555 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8557 trace_seq_printf(s, "now ts: %llu\n",
8558 ring_buffer_time_stamp(trace_buf->buffer));
8561 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8562 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8564 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8565 trace_seq_printf(s, "read events: %ld\n", cnt);
8567 count = simple_read_from_buffer(ubuf, count, ppos,
8568 s->buffer, trace_seq_used(s));
8575 static const struct file_operations tracing_stats_fops = {
8576 .open = tracing_open_generic_tr,
8577 .read = tracing_stats_read,
8578 .llseek = generic_file_llseek,
8579 .release = tracing_release_generic_tr,
8582 #ifdef CONFIG_DYNAMIC_FTRACE
8585 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8586 size_t cnt, loff_t *ppos)
8592 /* 256 should be plenty to hold the amount needed */
8593 buf = kmalloc(256, GFP_KERNEL);
8597 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8598 ftrace_update_tot_cnt,
8599 ftrace_number_of_pages,
8600 ftrace_number_of_groups);
8602 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8607 static const struct file_operations tracing_dyn_info_fops = {
8608 .open = tracing_open_generic,
8609 .read = tracing_read_dyn_info,
8610 .llseek = generic_file_llseek,
8612 #endif /* CONFIG_DYNAMIC_FTRACE */
8614 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8616 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8617 struct trace_array *tr, struct ftrace_probe_ops *ops,
8620 tracing_snapshot_instance(tr);
8624 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8625 struct trace_array *tr, struct ftrace_probe_ops *ops,
8628 struct ftrace_func_mapper *mapper = data;
8632 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8642 tracing_snapshot_instance(tr);
8646 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8647 struct ftrace_probe_ops *ops, void *data)
8649 struct ftrace_func_mapper *mapper = data;
8652 seq_printf(m, "%ps:", (void *)ip);
8654 seq_puts(m, "snapshot");
8657 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8660 seq_printf(m, ":count=%ld\n", *count);
8662 seq_puts(m, ":unlimited\n");
8668 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8669 unsigned long ip, void *init_data, void **data)
8671 struct ftrace_func_mapper *mapper = *data;
8674 mapper = allocate_ftrace_func_mapper();
8680 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8684 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8685 unsigned long ip, void *data)
8687 struct ftrace_func_mapper *mapper = data;
8692 free_ftrace_func_mapper(mapper, NULL);
8696 ftrace_func_mapper_remove_ip(mapper, ip);
8699 static struct ftrace_probe_ops snapshot_probe_ops = {
8700 .func = ftrace_snapshot,
8701 .print = ftrace_snapshot_print,
8704 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8705 .func = ftrace_count_snapshot,
8706 .print = ftrace_snapshot_print,
8707 .init = ftrace_snapshot_init,
8708 .free = ftrace_snapshot_free,
8712 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8713 char *glob, char *cmd, char *param, int enable)
8715 struct ftrace_probe_ops *ops;
8716 void *count = (void *)-1;
8723 /* hash funcs only work with set_ftrace_filter */
8727 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8730 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8735 number = strsep(¶m, ":");
8737 if (!strlen(number))
8741 * We use the callback data field (which is a pointer)
8744 ret = kstrtoul(number, 0, (unsigned long *)&count);
8749 ret = tracing_alloc_snapshot_instance(tr);
8753 ret = register_ftrace_function_probe(glob, tr, ops, count);
8756 return ret < 0 ? ret : 0;
8759 static struct ftrace_func_command ftrace_snapshot_cmd = {
8761 .func = ftrace_trace_snapshot_callback,
8764 static __init int register_snapshot_cmd(void)
8766 return register_ftrace_command(&ftrace_snapshot_cmd);
8769 static inline __init int register_snapshot_cmd(void) { return 0; }
8770 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8772 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8774 if (WARN_ON(!tr->dir))
8775 return ERR_PTR(-ENODEV);
8777 /* Top directory uses NULL as the parent */
8778 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8781 /* All sub buffers have a descriptor */
8785 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8787 struct dentry *d_tracer;
8790 return tr->percpu_dir;
8792 d_tracer = tracing_get_dentry(tr);
8793 if (IS_ERR(d_tracer))
8796 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8798 MEM_FAIL(!tr->percpu_dir,
8799 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8801 return tr->percpu_dir;
8804 static struct dentry *
8805 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8806 void *data, long cpu, const struct file_operations *fops)
8808 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8810 if (ret) /* See tracing_get_cpu() */
8811 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8816 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8818 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8819 struct dentry *d_cpu;
8820 char cpu_dir[30]; /* 30 characters should be more than enough */
8825 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8826 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8828 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8832 /* per cpu trace_pipe */
8833 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8834 tr, cpu, &tracing_pipe_fops);
8837 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8838 tr, cpu, &tracing_fops);
8840 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8841 tr, cpu, &tracing_buffers_fops);
8843 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8844 tr, cpu, &tracing_stats_fops);
8846 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8847 tr, cpu, &tracing_entries_fops);
8849 #ifdef CONFIG_TRACER_SNAPSHOT
8850 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8851 tr, cpu, &snapshot_fops);
8853 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8854 tr, cpu, &snapshot_raw_fops);
8858 #ifdef CONFIG_FTRACE_SELFTEST
8859 /* Let selftest have access to static functions in this file */
8860 #include "trace_selftest.c"
8864 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8867 struct trace_option_dentry *topt = filp->private_data;
8870 if (topt->flags->val & topt->opt->bit)
8875 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8879 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8882 struct trace_option_dentry *topt = filp->private_data;
8886 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8890 if (val != 0 && val != 1)
8893 if (!!(topt->flags->val & topt->opt->bit) != val) {
8894 mutex_lock(&trace_types_lock);
8895 ret = __set_tracer_option(topt->tr, topt->flags,
8897 mutex_unlock(&trace_types_lock);
8908 static const struct file_operations trace_options_fops = {
8909 .open = tracing_open_generic,
8910 .read = trace_options_read,
8911 .write = trace_options_write,
8912 .llseek = generic_file_llseek,
8916 * In order to pass in both the trace_array descriptor as well as the index
8917 * to the flag that the trace option file represents, the trace_array
8918 * has a character array of trace_flags_index[], which holds the index
8919 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8920 * The address of this character array is passed to the flag option file
8921 * read/write callbacks.
8923 * In order to extract both the index and the trace_array descriptor,
8924 * get_tr_index() uses the following algorithm.
8928 * As the pointer itself contains the address of the index (remember
8931 * Then to get the trace_array descriptor, by subtracting that index
8932 * from the ptr, we get to the start of the index itself.
8934 * ptr - idx == &index[0]
8936 * Then a simple container_of() from that pointer gets us to the
8937 * trace_array descriptor.
8939 static void get_tr_index(void *data, struct trace_array **ptr,
8940 unsigned int *pindex)
8942 *pindex = *(unsigned char *)data;
8944 *ptr = container_of(data - *pindex, struct trace_array,
8949 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8952 void *tr_index = filp->private_data;
8953 struct trace_array *tr;
8957 get_tr_index(tr_index, &tr, &index);
8959 if (tr->trace_flags & (1 << index))
8964 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8968 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8971 void *tr_index = filp->private_data;
8972 struct trace_array *tr;
8977 get_tr_index(tr_index, &tr, &index);
8979 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8983 if (val != 0 && val != 1)
8986 mutex_lock(&event_mutex);
8987 mutex_lock(&trace_types_lock);
8988 ret = set_tracer_flag(tr, 1 << index, val);
8989 mutex_unlock(&trace_types_lock);
8990 mutex_unlock(&event_mutex);
9000 static const struct file_operations trace_options_core_fops = {
9001 .open = tracing_open_generic,
9002 .read = trace_options_core_read,
9003 .write = trace_options_core_write,
9004 .llseek = generic_file_llseek,
9007 struct dentry *trace_create_file(const char *name,
9009 struct dentry *parent,
9011 const struct file_operations *fops)
9015 ret = tracefs_create_file(name, mode, parent, data, fops);
9017 pr_warn("Could not create tracefs '%s' entry\n", name);
9023 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
9025 struct dentry *d_tracer;
9030 d_tracer = tracing_get_dentry(tr);
9031 if (IS_ERR(d_tracer))
9034 tr->options = tracefs_create_dir("options", d_tracer);
9036 pr_warn("Could not create tracefs directory 'options'\n");
9044 create_trace_option_file(struct trace_array *tr,
9045 struct trace_option_dentry *topt,
9046 struct tracer_flags *flags,
9047 struct tracer_opt *opt)
9049 struct dentry *t_options;
9051 t_options = trace_options_init_dentry(tr);
9055 topt->flags = flags;
9059 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
9060 t_options, topt, &trace_options_fops);
9065 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
9067 struct trace_option_dentry *topts;
9068 struct trace_options *tr_topts;
9069 struct tracer_flags *flags;
9070 struct tracer_opt *opts;
9077 flags = tracer->flags;
9079 if (!flags || !flags->opts)
9083 * If this is an instance, only create flags for tracers
9084 * the instance may have.
9086 if (!trace_ok_for_array(tracer, tr))
9089 for (i = 0; i < tr->nr_topts; i++) {
9090 /* Make sure there's no duplicate flags. */
9091 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
9097 for (cnt = 0; opts[cnt].name; cnt++)
9100 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
9104 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
9111 tr->topts = tr_topts;
9112 tr->topts[tr->nr_topts].tracer = tracer;
9113 tr->topts[tr->nr_topts].topts = topts;
9116 for (cnt = 0; opts[cnt].name; cnt++) {
9117 create_trace_option_file(tr, &topts[cnt], flags,
9119 MEM_FAIL(topts[cnt].entry == NULL,
9120 "Failed to create trace option: %s",
9125 static struct dentry *
9126 create_trace_option_core_file(struct trace_array *tr,
9127 const char *option, long index)
9129 struct dentry *t_options;
9131 t_options = trace_options_init_dentry(tr);
9135 return trace_create_file(option, TRACE_MODE_WRITE, t_options,
9136 (void *)&tr->trace_flags_index[index],
9137 &trace_options_core_fops);
9140 static void create_trace_options_dir(struct trace_array *tr)
9142 struct dentry *t_options;
9143 bool top_level = tr == &global_trace;
9146 t_options = trace_options_init_dentry(tr);
9150 for (i = 0; trace_options[i]; i++) {
9152 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
9153 create_trace_option_core_file(tr, trace_options[i], i);
9158 rb_simple_read(struct file *filp, char __user *ubuf,
9159 size_t cnt, loff_t *ppos)
9161 struct trace_array *tr = filp->private_data;
9165 r = tracer_tracing_is_on(tr);
9166 r = sprintf(buf, "%d\n", r);
9168 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9172 rb_simple_write(struct file *filp, const char __user *ubuf,
9173 size_t cnt, loff_t *ppos)
9175 struct trace_array *tr = filp->private_data;
9176 struct trace_buffer *buffer = tr->array_buffer.buffer;
9180 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9185 mutex_lock(&trace_types_lock);
9186 if (!!val == tracer_tracing_is_on(tr)) {
9187 val = 0; /* do nothing */
9189 tracer_tracing_on(tr);
9190 if (tr->current_trace->start)
9191 tr->current_trace->start(tr);
9193 tracer_tracing_off(tr);
9194 if (tr->current_trace->stop)
9195 tr->current_trace->stop(tr);
9196 /* Wake up any waiters */
9197 ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
9199 mutex_unlock(&trace_types_lock);
9207 static const struct file_operations rb_simple_fops = {
9208 .open = tracing_open_generic_tr,
9209 .read = rb_simple_read,
9210 .write = rb_simple_write,
9211 .release = tracing_release_generic_tr,
9212 .llseek = default_llseek,
9216 buffer_percent_read(struct file *filp, char __user *ubuf,
9217 size_t cnt, loff_t *ppos)
9219 struct trace_array *tr = filp->private_data;
9223 r = tr->buffer_percent;
9224 r = sprintf(buf, "%d\n", r);
9226 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9230 buffer_percent_write(struct file *filp, const char __user *ubuf,
9231 size_t cnt, loff_t *ppos)
9233 struct trace_array *tr = filp->private_data;
9237 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9244 tr->buffer_percent = val;
9251 static const struct file_operations buffer_percent_fops = {
9252 .open = tracing_open_generic_tr,
9253 .read = buffer_percent_read,
9254 .write = buffer_percent_write,
9255 .release = tracing_release_generic_tr,
9256 .llseek = default_llseek,
9259 static struct dentry *trace_instance_dir;
9262 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9265 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9267 enum ring_buffer_flags rb_flags;
9269 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9273 buf->buffer = ring_buffer_alloc(size, rb_flags);
9277 buf->data = alloc_percpu(struct trace_array_cpu);
9279 ring_buffer_free(buf->buffer);
9284 /* Allocate the first page for all buffers */
9285 set_buffer_entries(&tr->array_buffer,
9286 ring_buffer_size(tr->array_buffer.buffer, 0));
9291 static void free_trace_buffer(struct array_buffer *buf)
9294 ring_buffer_free(buf->buffer);
9296 free_percpu(buf->data);
9301 static int allocate_trace_buffers(struct trace_array *tr, int size)
9305 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9309 #ifdef CONFIG_TRACER_MAX_TRACE
9310 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9311 allocate_snapshot ? size : 1);
9312 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9313 free_trace_buffer(&tr->array_buffer);
9316 tr->allocated_snapshot = allocate_snapshot;
9319 * Only the top level trace array gets its snapshot allocated
9320 * from the kernel command line.
9322 allocate_snapshot = false;
9328 static void free_trace_buffers(struct trace_array *tr)
9333 free_trace_buffer(&tr->array_buffer);
9335 #ifdef CONFIG_TRACER_MAX_TRACE
9336 free_trace_buffer(&tr->max_buffer);
9340 static void init_trace_flags_index(struct trace_array *tr)
9344 /* Used by the trace options files */
9345 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9346 tr->trace_flags_index[i] = i;
9349 static void __update_tracer_options(struct trace_array *tr)
9353 for (t = trace_types; t; t = t->next)
9354 add_tracer_options(tr, t);
9357 static void update_tracer_options(struct trace_array *tr)
9359 mutex_lock(&trace_types_lock);
9360 tracer_options_updated = true;
9361 __update_tracer_options(tr);
9362 mutex_unlock(&trace_types_lock);
9365 /* Must have trace_types_lock held */
9366 struct trace_array *trace_array_find(const char *instance)
9368 struct trace_array *tr, *found = NULL;
9370 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9371 if (tr->name && strcmp(tr->name, instance) == 0) {
9380 struct trace_array *trace_array_find_get(const char *instance)
9382 struct trace_array *tr;
9384 mutex_lock(&trace_types_lock);
9385 tr = trace_array_find(instance);
9388 mutex_unlock(&trace_types_lock);
9393 static int trace_array_create_dir(struct trace_array *tr)
9397 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9401 ret = event_trace_add_tracer(tr->dir, tr);
9403 tracefs_remove(tr->dir);
9407 init_tracer_tracefs(tr, tr->dir);
9408 __update_tracer_options(tr);
9413 static struct trace_array *trace_array_create(const char *name)
9415 struct trace_array *tr;
9419 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9421 return ERR_PTR(ret);
9423 tr->name = kstrdup(name, GFP_KERNEL);
9427 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9430 if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
9433 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9435 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9437 raw_spin_lock_init(&tr->start_lock);
9439 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9441 tr->current_trace = &nop_trace;
9443 INIT_LIST_HEAD(&tr->systems);
9444 INIT_LIST_HEAD(&tr->events);
9445 INIT_LIST_HEAD(&tr->hist_vars);
9446 INIT_LIST_HEAD(&tr->err_log);
9448 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9451 if (ftrace_allocate_ftrace_ops(tr) < 0)
9454 ftrace_init_trace_array(tr);
9456 init_trace_flags_index(tr);
9458 if (trace_instance_dir) {
9459 ret = trace_array_create_dir(tr);
9463 __trace_early_add_events(tr);
9465 list_add(&tr->list, &ftrace_trace_arrays);
9472 ftrace_free_ftrace_ops(tr);
9473 free_trace_buffers(tr);
9474 free_cpumask_var(tr->pipe_cpumask);
9475 free_cpumask_var(tr->tracing_cpumask);
9479 return ERR_PTR(ret);
9482 static int instance_mkdir(const char *name)
9484 struct trace_array *tr;
9487 mutex_lock(&event_mutex);
9488 mutex_lock(&trace_types_lock);
9491 if (trace_array_find(name))
9494 tr = trace_array_create(name);
9496 ret = PTR_ERR_OR_ZERO(tr);
9499 mutex_unlock(&trace_types_lock);
9500 mutex_unlock(&event_mutex);
9505 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9506 * @name: The name of the trace array to be looked up/created.
9508 * Returns pointer to trace array with given name.
9509 * NULL, if it cannot be created.
9511 * NOTE: This function increments the reference counter associated with the
9512 * trace array returned. This makes sure it cannot be freed while in use.
9513 * Use trace_array_put() once the trace array is no longer needed.
9514 * If the trace_array is to be freed, trace_array_destroy() needs to
9515 * be called after the trace_array_put(), or simply let user space delete
9516 * it from the tracefs instances directory. But until the
9517 * trace_array_put() is called, user space can not delete it.
9520 struct trace_array *trace_array_get_by_name(const char *name)
9522 struct trace_array *tr;
9524 mutex_lock(&event_mutex);
9525 mutex_lock(&trace_types_lock);
9527 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9528 if (tr->name && strcmp(tr->name, name) == 0)
9532 tr = trace_array_create(name);
9540 mutex_unlock(&trace_types_lock);
9541 mutex_unlock(&event_mutex);
9544 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9546 static int __remove_instance(struct trace_array *tr)
9550 /* Reference counter for a newly created trace array = 1. */
9551 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9554 list_del(&tr->list);
9556 /* Disable all the flags that were enabled coming in */
9557 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9558 if ((1 << i) & ZEROED_TRACE_FLAGS)
9559 set_tracer_flag(tr, 1 << i, 0);
9562 tracing_set_nop(tr);
9563 clear_ftrace_function_probes(tr);
9564 event_trace_del_tracer(tr);
9565 ftrace_clear_pids(tr);
9566 ftrace_destroy_function_files(tr);
9567 tracefs_remove(tr->dir);
9568 free_percpu(tr->last_func_repeats);
9569 free_trace_buffers(tr);
9570 clear_tracing_err_log(tr);
9572 for (i = 0; i < tr->nr_topts; i++) {
9573 kfree(tr->topts[i].topts);
9577 free_cpumask_var(tr->pipe_cpumask);
9578 free_cpumask_var(tr->tracing_cpumask);
9585 int trace_array_destroy(struct trace_array *this_tr)
9587 struct trace_array *tr;
9593 mutex_lock(&event_mutex);
9594 mutex_lock(&trace_types_lock);
9598 /* Making sure trace array exists before destroying it. */
9599 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9600 if (tr == this_tr) {
9601 ret = __remove_instance(tr);
9606 mutex_unlock(&trace_types_lock);
9607 mutex_unlock(&event_mutex);
9611 EXPORT_SYMBOL_GPL(trace_array_destroy);
9613 static int instance_rmdir(const char *name)
9615 struct trace_array *tr;
9618 mutex_lock(&event_mutex);
9619 mutex_lock(&trace_types_lock);
9622 tr = trace_array_find(name);
9624 ret = __remove_instance(tr);
9626 mutex_unlock(&trace_types_lock);
9627 mutex_unlock(&event_mutex);
9632 static __init void create_trace_instances(struct dentry *d_tracer)
9634 struct trace_array *tr;
9636 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9639 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9642 mutex_lock(&event_mutex);
9643 mutex_lock(&trace_types_lock);
9645 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9648 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9649 "Failed to create instance directory\n"))
9653 mutex_unlock(&trace_types_lock);
9654 mutex_unlock(&event_mutex);
9658 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9660 struct trace_event_file *file;
9663 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9664 tr, &show_traces_fops);
9666 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9667 tr, &set_tracer_fops);
9669 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9670 tr, &tracing_cpumask_fops);
9672 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9673 tr, &tracing_iter_fops);
9675 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9678 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9679 tr, &tracing_pipe_fops);
9681 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9682 tr, &tracing_entries_fops);
9684 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9685 tr, &tracing_total_entries_fops);
9687 trace_create_file("free_buffer", 0200, d_tracer,
9688 tr, &tracing_free_buffer_fops);
9690 trace_create_file("trace_marker", 0220, d_tracer,
9691 tr, &tracing_mark_fops);
9693 file = __find_event_file(tr, "ftrace", "print");
9694 if (file && file->dir)
9695 trace_create_file("trigger", TRACE_MODE_WRITE, file->dir,
9696 file, &event_trigger_fops);
9697 tr->trace_marker_file = file;
9699 trace_create_file("trace_marker_raw", 0220, d_tracer,
9700 tr, &tracing_mark_raw_fops);
9702 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9705 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9706 tr, &rb_simple_fops);
9708 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9709 &trace_time_stamp_mode_fops);
9711 tr->buffer_percent = 50;
9713 trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
9714 tr, &buffer_percent_fops);
9716 create_trace_options_dir(tr);
9718 #ifdef CONFIG_TRACER_MAX_TRACE
9719 trace_create_maxlat_file(tr, d_tracer);
9722 if (ftrace_create_function_files(tr, d_tracer))
9723 MEM_FAIL(1, "Could not allocate function filter files");
9725 #ifdef CONFIG_TRACER_SNAPSHOT
9726 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9727 tr, &snapshot_fops);
9730 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9731 tr, &tracing_err_log_fops);
9733 for_each_tracing_cpu(cpu)
9734 tracing_init_tracefs_percpu(tr, cpu);
9736 ftrace_init_tracefs(tr, d_tracer);
9739 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9741 struct vfsmount *mnt;
9742 struct file_system_type *type;
9745 * To maintain backward compatibility for tools that mount
9746 * debugfs to get to the tracing facility, tracefs is automatically
9747 * mounted to the debugfs/tracing directory.
9749 type = get_fs_type("tracefs");
9752 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9753 put_filesystem(type);
9762 * tracing_init_dentry - initialize top level trace array
9764 * This is called when creating files or directories in the tracing
9765 * directory. It is called via fs_initcall() by any of the boot up code
9766 * and expects to return the dentry of the top level tracing directory.
9768 int tracing_init_dentry(void)
9770 struct trace_array *tr = &global_trace;
9772 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9773 pr_warn("Tracing disabled due to lockdown\n");
9777 /* The top level trace array uses NULL as parent */
9781 if (WARN_ON(!tracefs_initialized()))
9785 * As there may still be users that expect the tracing
9786 * files to exist in debugfs/tracing, we must automount
9787 * the tracefs file system there, so older tools still
9788 * work with the newer kernel.
9790 tr->dir = debugfs_create_automount("tracing", NULL,
9791 trace_automount, NULL);
9796 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9797 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9799 static struct workqueue_struct *eval_map_wq __initdata;
9800 static struct work_struct eval_map_work __initdata;
9801 static struct work_struct tracerfs_init_work __initdata;
9803 static void __init eval_map_work_func(struct work_struct *work)
9807 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9808 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9811 static int __init trace_eval_init(void)
9813 INIT_WORK(&eval_map_work, eval_map_work_func);
9815 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9817 pr_err("Unable to allocate eval_map_wq\n");
9819 eval_map_work_func(&eval_map_work);
9823 queue_work(eval_map_wq, &eval_map_work);
9827 subsys_initcall(trace_eval_init);
9829 static int __init trace_eval_sync(void)
9831 /* Make sure the eval map updates are finished */
9833 destroy_workqueue(eval_map_wq);
9837 late_initcall_sync(trace_eval_sync);
9840 #ifdef CONFIG_MODULES
9841 static void trace_module_add_evals(struct module *mod)
9843 if (!mod->num_trace_evals)
9847 * Modules with bad taint do not have events created, do
9848 * not bother with enums either.
9850 if (trace_module_has_bad_taint(mod))
9853 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9856 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9857 static void trace_module_remove_evals(struct module *mod)
9859 union trace_eval_map_item *map;
9860 union trace_eval_map_item **last = &trace_eval_maps;
9862 if (!mod->num_trace_evals)
9865 mutex_lock(&trace_eval_mutex);
9867 map = trace_eval_maps;
9870 if (map->head.mod == mod)
9872 map = trace_eval_jmp_to_tail(map);
9873 last = &map->tail.next;
9874 map = map->tail.next;
9879 *last = trace_eval_jmp_to_tail(map)->tail.next;
9882 mutex_unlock(&trace_eval_mutex);
9885 static inline void trace_module_remove_evals(struct module *mod) { }
9886 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9888 static int trace_module_notify(struct notifier_block *self,
9889 unsigned long val, void *data)
9891 struct module *mod = data;
9894 case MODULE_STATE_COMING:
9895 trace_module_add_evals(mod);
9897 case MODULE_STATE_GOING:
9898 trace_module_remove_evals(mod);
9905 static struct notifier_block trace_module_nb = {
9906 .notifier_call = trace_module_notify,
9909 #endif /* CONFIG_MODULES */
9911 static __init void tracer_init_tracefs_work_func(struct work_struct *work)
9916 init_tracer_tracefs(&global_trace, NULL);
9917 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9919 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
9920 &global_trace, &tracing_thresh_fops);
9922 trace_create_file("README", TRACE_MODE_READ, NULL,
9923 NULL, &tracing_readme_fops);
9925 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
9926 NULL, &tracing_saved_cmdlines_fops);
9928 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
9929 NULL, &tracing_saved_cmdlines_size_fops);
9931 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
9932 NULL, &tracing_saved_tgids_fops);
9934 trace_create_eval_file(NULL);
9936 #ifdef CONFIG_MODULES
9937 register_module_notifier(&trace_module_nb);
9940 #ifdef CONFIG_DYNAMIC_FTRACE
9941 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
9942 NULL, &tracing_dyn_info_fops);
9945 create_trace_instances(NULL);
9947 update_tracer_options(&global_trace);
9950 static __init int tracer_init_tracefs(void)
9954 trace_access_lock_init();
9956 ret = tracing_init_dentry();
9961 INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
9962 queue_work(eval_map_wq, &tracerfs_init_work);
9964 tracer_init_tracefs_work_func(NULL);
9967 rv_init_interface();
9972 fs_initcall(tracer_init_tracefs);
9974 static int trace_panic_handler(struct notifier_block *this,
9975 unsigned long event, void *unused)
9977 if (ftrace_dump_on_oops)
9978 ftrace_dump(ftrace_dump_on_oops);
9982 static struct notifier_block trace_panic_notifier = {
9983 .notifier_call = trace_panic_handler,
9985 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9988 static int trace_die_handler(struct notifier_block *self,
9994 if (ftrace_dump_on_oops)
9995 ftrace_dump(ftrace_dump_on_oops);
10003 static struct notifier_block trace_die_notifier = {
10004 .notifier_call = trace_die_handler,
10009 * printk is set to max of 1024, we really don't need it that big.
10010 * Nothing should be printing 1000 characters anyway.
10012 #define TRACE_MAX_PRINT 1000
10015 * Define here KERN_TRACE so that we have one place to modify
10016 * it if we decide to change what log level the ftrace dump
10019 #define KERN_TRACE KERN_EMERG
10022 trace_printk_seq(struct trace_seq *s)
10024 /* Probably should print a warning here. */
10025 if (s->seq.len >= TRACE_MAX_PRINT)
10026 s->seq.len = TRACE_MAX_PRINT;
10029 * More paranoid code. Although the buffer size is set to
10030 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
10031 * an extra layer of protection.
10033 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
10034 s->seq.len = s->seq.size - 1;
10036 /* should be zero ended, but we are paranoid. */
10037 s->buffer[s->seq.len] = 0;
10039 printk(KERN_TRACE "%s", s->buffer);
10044 void trace_init_global_iter(struct trace_iterator *iter)
10046 iter->tr = &global_trace;
10047 iter->trace = iter->tr->current_trace;
10048 iter->cpu_file = RING_BUFFER_ALL_CPUS;
10049 iter->array_buffer = &global_trace.array_buffer;
10051 if (iter->trace && iter->trace->open)
10052 iter->trace->open(iter);
10054 /* Annotate start of buffers if we had overruns */
10055 if (ring_buffer_overruns(iter->array_buffer->buffer))
10056 iter->iter_flags |= TRACE_FILE_ANNOTATE;
10058 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
10059 if (trace_clocks[iter->tr->clock_id].in_ns)
10060 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
10062 /* Can not use kmalloc for iter.temp and iter.fmt */
10063 iter->temp = static_temp_buf;
10064 iter->temp_size = STATIC_TEMP_BUF_SIZE;
10065 iter->fmt = static_fmt_buf;
10066 iter->fmt_size = STATIC_FMT_BUF_SIZE;
10069 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
10071 /* use static because iter can be a bit big for the stack */
10072 static struct trace_iterator iter;
10073 static atomic_t dump_running;
10074 struct trace_array *tr = &global_trace;
10075 unsigned int old_userobj;
10076 unsigned long flags;
10079 /* Only allow one dump user at a time. */
10080 if (atomic_inc_return(&dump_running) != 1) {
10081 atomic_dec(&dump_running);
10086 * Always turn off tracing when we dump.
10087 * We don't need to show trace output of what happens
10088 * between multiple crashes.
10090 * If the user does a sysrq-z, then they can re-enable
10091 * tracing with echo 1 > tracing_on.
10095 local_irq_save(flags);
10097 /* Simulate the iterator */
10098 trace_init_global_iter(&iter);
10100 for_each_tracing_cpu(cpu) {
10101 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10104 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10106 /* don't look at user memory in panic mode */
10107 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10109 switch (oops_dump_mode) {
10111 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10114 iter.cpu_file = raw_smp_processor_id();
10119 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
10120 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10123 printk(KERN_TRACE "Dumping ftrace buffer:\n");
10125 /* Did function tracer already get disabled? */
10126 if (ftrace_is_dead()) {
10127 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
10128 printk("# MAY BE MISSING FUNCTION EVENTS\n");
10132 * We need to stop all tracing on all CPUS to read
10133 * the next buffer. This is a bit expensive, but is
10134 * not done often. We fill all what we can read,
10135 * and then release the locks again.
10138 while (!trace_empty(&iter)) {
10141 printk(KERN_TRACE "---------------------------------\n");
10145 trace_iterator_reset(&iter);
10146 iter.iter_flags |= TRACE_FILE_LAT_FMT;
10148 if (trace_find_next_entry_inc(&iter) != NULL) {
10151 ret = print_trace_line(&iter);
10152 if (ret != TRACE_TYPE_NO_CONSUME)
10153 trace_consume(&iter);
10155 touch_nmi_watchdog();
10157 trace_printk_seq(&iter.seq);
10161 printk(KERN_TRACE " (ftrace buffer empty)\n");
10163 printk(KERN_TRACE "---------------------------------\n");
10166 tr->trace_flags |= old_userobj;
10168 for_each_tracing_cpu(cpu) {
10169 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10171 atomic_dec(&dump_running);
10172 local_irq_restore(flags);
10174 EXPORT_SYMBOL_GPL(ftrace_dump);
10176 #define WRITE_BUFSIZE 4096
10178 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
10179 size_t count, loff_t *ppos,
10180 int (*createfn)(const char *))
10182 char *kbuf, *buf, *tmp;
10187 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
10191 while (done < count) {
10192 size = count - done;
10194 if (size >= WRITE_BUFSIZE)
10195 size = WRITE_BUFSIZE - 1;
10197 if (copy_from_user(kbuf, buffer + done, size)) {
10204 tmp = strchr(buf, '\n');
10207 size = tmp - buf + 1;
10209 size = strlen(buf);
10210 if (done + size < count) {
10213 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10214 pr_warn("Line length is too long: Should be less than %d\n",
10215 WRITE_BUFSIZE - 2);
10222 /* Remove comments */
10223 tmp = strchr(buf, '#');
10228 ret = createfn(buf);
10233 } while (done < count);
10243 __init static int tracer_alloc_buffers(void)
10249 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10250 pr_warn("Tracing disabled due to lockdown\n");
10255 * Make sure we don't accidentally add more trace options
10256 * than we have bits for.
10258 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10260 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10263 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10264 goto out_free_buffer_mask;
10266 /* Only allocate trace_printk buffers if a trace_printk exists */
10267 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10268 /* Must be called before global_trace.buffer is allocated */
10269 trace_printk_init_buffers();
10271 /* To save memory, keep the ring buffer size to its minimum */
10272 if (ring_buffer_expanded)
10273 ring_buf_size = trace_buf_size;
10277 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10278 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10280 raw_spin_lock_init(&global_trace.start_lock);
10283 * The prepare callbacks allocates some memory for the ring buffer. We
10284 * don't free the buffer if the CPU goes down. If we were to free
10285 * the buffer, then the user would lose any trace that was in the
10286 * buffer. The memory will be removed once the "instance" is removed.
10288 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10289 "trace/RB:prepare", trace_rb_cpu_prepare,
10292 goto out_free_cpumask;
10293 /* Used for event triggers */
10295 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10297 goto out_rm_hp_state;
10299 if (trace_create_savedcmd() < 0)
10300 goto out_free_temp_buffer;
10302 if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
10303 goto out_free_savedcmd;
10305 /* TODO: make the number of buffers hot pluggable with CPUS */
10306 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10307 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10308 goto out_free_pipe_cpumask;
10310 if (global_trace.buffer_disabled)
10313 if (trace_boot_clock) {
10314 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10316 pr_warn("Trace clock %s not defined, going back to default\n",
10321 * register_tracer() might reference current_trace, so it
10322 * needs to be set before we register anything. This is
10323 * just a bootstrap of current_trace anyway.
10325 global_trace.current_trace = &nop_trace;
10327 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10329 ftrace_init_global_array_ops(&global_trace);
10331 init_trace_flags_index(&global_trace);
10333 register_tracer(&nop_trace);
10335 /* Function tracing may start here (via kernel command line) */
10336 init_function_trace();
10338 /* All seems OK, enable tracing */
10339 tracing_disabled = 0;
10341 atomic_notifier_chain_register(&panic_notifier_list,
10342 &trace_panic_notifier);
10344 register_die_notifier(&trace_die_notifier);
10346 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10348 INIT_LIST_HEAD(&global_trace.systems);
10349 INIT_LIST_HEAD(&global_trace.events);
10350 INIT_LIST_HEAD(&global_trace.hist_vars);
10351 INIT_LIST_HEAD(&global_trace.err_log);
10352 list_add(&global_trace.list, &ftrace_trace_arrays);
10354 apply_trace_boot_options();
10356 register_snapshot_cmd();
10362 out_free_pipe_cpumask:
10363 free_cpumask_var(global_trace.pipe_cpumask);
10365 free_saved_cmdlines_buffer(savedcmd);
10366 out_free_temp_buffer:
10367 ring_buffer_free(temp_buffer);
10369 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10371 free_cpumask_var(global_trace.tracing_cpumask);
10372 out_free_buffer_mask:
10373 free_cpumask_var(tracing_buffer_mask);
10378 void __init ftrace_boot_snapshot(void)
10380 if (snapshot_at_boot) {
10381 tracing_snapshot();
10382 internal_trace_puts("** Boot snapshot taken **\n");
10386 void __init early_trace_init(void)
10388 if (tracepoint_printk) {
10389 tracepoint_print_iter =
10390 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10391 if (MEM_FAIL(!tracepoint_print_iter,
10392 "Failed to allocate trace iterator\n"))
10393 tracepoint_printk = 0;
10395 static_key_enable(&tracepoint_printk_key.key);
10397 tracer_alloc_buffers();
10402 void __init trace_init(void)
10404 trace_event_init();
10407 __init static void clear_boot_tracer(void)
10410 * The default tracer at boot buffer is an init section.
10411 * This function is called in lateinit. If we did not
10412 * find the boot tracer, then clear it out, to prevent
10413 * later registration from accessing the buffer that is
10414 * about to be freed.
10416 if (!default_bootup_tracer)
10419 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10420 default_bootup_tracer);
10421 default_bootup_tracer = NULL;
10424 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10425 __init static void tracing_set_default_clock(void)
10427 /* sched_clock_stable() is determined in late_initcall */
10428 if (!trace_boot_clock && !sched_clock_stable()) {
10429 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10430 pr_warn("Can not set tracing clock due to lockdown\n");
10434 printk(KERN_WARNING
10435 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10436 "If you want to keep using the local clock, then add:\n"
10437 " \"trace_clock=local\"\n"
10438 "on the kernel command line\n");
10439 tracing_set_clock(&global_trace, "global");
10443 static inline void tracing_set_default_clock(void) { }
10446 __init static int late_trace_init(void)
10448 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10449 static_key_disable(&tracepoint_printk_key.key);
10450 tracepoint_printk = 0;
10453 tracing_set_default_clock();
10454 clear_boot_tracer();
10458 late_initcall_sync(late_trace_init);