1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/panic_notifier.h>
43 #include <linux/poll.h>
44 #include <linux/nmi.h>
46 #include <linux/trace.h>
47 #include <linux/sched/clock.h>
48 #include <linux/sched/rt.h>
49 #include <linux/fsnotify.h>
50 #include <linux/irq_work.h>
51 #include <linux/workqueue.h>
54 #include "trace_output.h"
57 * On boot up, the ring buffer is set to the minimum size, so that
58 * we do not waste memory on systems that are not using tracing.
60 bool ring_buffer_expanded;
63 * We need to change this state when a selftest is running.
64 * A selftest will lurk into the ring-buffer to count the
65 * entries inserted during the selftest although some concurrent
66 * insertions into the ring-buffer such as trace_printk could occurred
67 * at the same time, giving false positive or negative results.
69 static bool __read_mostly tracing_selftest_running;
72 * If boot-time tracing including tracers/events via kernel cmdline
73 * is running, we do not want to run SELFTEST.
75 bool __read_mostly tracing_selftest_disabled;
77 #ifdef CONFIG_FTRACE_STARTUP_TEST
78 void __init disable_tracing_selftest(const char *reason)
80 if (!tracing_selftest_disabled) {
81 tracing_selftest_disabled = true;
82 pr_info("Ftrace startup test is disabled due to %s\n", reason);
87 /* Pipe tracepoints to printk */
88 struct trace_iterator *tracepoint_print_iter;
89 int tracepoint_printk;
90 static bool tracepoint_printk_stop_on_boot __initdata;
91 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
93 /* For tracers that don't implement custom flags */
94 static struct tracer_opt dummy_tracer_opt[] = {
99 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
105 * To prevent the comm cache from being overwritten when no
106 * tracing is active, only save the comm when a trace event
109 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
112 * Kill all tracing for good (never come back).
113 * It is initialized to 1 but will turn to zero if the initialization
114 * of the tracer is successful. But that is the only place that sets
117 static int tracing_disabled = 1;
119 cpumask_var_t __read_mostly tracing_buffer_mask;
122 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
124 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
125 * is set, then ftrace_dump is called. This will output the contents
126 * of the ftrace buffers to the console. This is very useful for
127 * capturing traces that lead to crashes and outputing it to a
130 * It is default off, but you can enable it with either specifying
131 * "ftrace_dump_on_oops" in the kernel command line, or setting
132 * /proc/sys/kernel/ftrace_dump_on_oops
133 * Set 1 if you want to dump buffers of all CPUs
134 * Set 2 if you want to dump the buffer of the CPU that triggered oops
137 enum ftrace_dump_mode ftrace_dump_on_oops;
139 /* When set, tracing will stop when a WARN*() is hit */
140 int __disable_trace_on_warning;
142 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
143 /* Map of enums to their values, for "eval_map" file */
144 struct trace_eval_map_head {
146 unsigned long length;
149 union trace_eval_map_item;
151 struct trace_eval_map_tail {
153 * "end" is first and points to NULL as it must be different
154 * than "mod" or "eval_string"
156 union trace_eval_map_item *next;
157 const char *end; /* points to NULL */
160 static DEFINE_MUTEX(trace_eval_mutex);
163 * The trace_eval_maps are saved in an array with two extra elements,
164 * one at the beginning, and one at the end. The beginning item contains
165 * the count of the saved maps (head.length), and the module they
166 * belong to if not built in (head.mod). The ending item contains a
167 * pointer to the next array of saved eval_map items.
169 union trace_eval_map_item {
170 struct trace_eval_map map;
171 struct trace_eval_map_head head;
172 struct trace_eval_map_tail tail;
175 static union trace_eval_map_item *trace_eval_maps;
176 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
178 int tracing_set_tracer(struct trace_array *tr, const char *buf);
179 static void ftrace_trace_userstack(struct trace_array *tr,
180 struct trace_buffer *buffer,
181 unsigned int trace_ctx);
183 #define MAX_TRACER_SIZE 100
184 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
185 static char *default_bootup_tracer;
187 static bool allocate_snapshot;
188 static bool snapshot_at_boot;
190 static int __init set_cmdline_ftrace(char *str)
192 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
193 default_bootup_tracer = bootup_tracer_buf;
194 /* We are using ftrace early, expand it */
195 ring_buffer_expanded = true;
198 __setup("ftrace=", set_cmdline_ftrace);
200 static int __init set_ftrace_dump_on_oops(char *str)
202 if (*str++ != '=' || !*str || !strcmp("1", str)) {
203 ftrace_dump_on_oops = DUMP_ALL;
207 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
208 ftrace_dump_on_oops = DUMP_ORIG;
214 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
216 static int __init stop_trace_on_warning(char *str)
218 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
219 __disable_trace_on_warning = 1;
222 __setup("traceoff_on_warning", stop_trace_on_warning);
224 static int __init boot_alloc_snapshot(char *str)
226 allocate_snapshot = true;
227 /* We also need the main ring buffer expanded */
228 ring_buffer_expanded = true;
231 __setup("alloc_snapshot", boot_alloc_snapshot);
234 static int __init boot_snapshot(char *str)
236 snapshot_at_boot = true;
237 boot_alloc_snapshot(str);
240 __setup("ftrace_boot_snapshot", boot_snapshot);
243 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
245 static int __init set_trace_boot_options(char *str)
247 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
250 __setup("trace_options=", set_trace_boot_options);
252 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
253 static char *trace_boot_clock __initdata;
255 static int __init set_trace_boot_clock(char *str)
257 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
258 trace_boot_clock = trace_boot_clock_buf;
261 __setup("trace_clock=", set_trace_boot_clock);
263 static int __init set_tracepoint_printk(char *str)
265 /* Ignore the "tp_printk_stop_on_boot" param */
269 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
270 tracepoint_printk = 1;
273 __setup("tp_printk", set_tracepoint_printk);
275 static int __init set_tracepoint_printk_stop(char *str)
277 tracepoint_printk_stop_on_boot = true;
280 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
282 unsigned long long ns2usecs(u64 nsec)
290 trace_process_export(struct trace_export *export,
291 struct ring_buffer_event *event, int flag)
293 struct trace_entry *entry;
294 unsigned int size = 0;
296 if (export->flags & flag) {
297 entry = ring_buffer_event_data(event);
298 size = ring_buffer_event_length(event);
299 export->write(export, entry, size);
303 static DEFINE_MUTEX(ftrace_export_lock);
305 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
307 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
308 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
309 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
311 static inline void ftrace_exports_enable(struct trace_export *export)
313 if (export->flags & TRACE_EXPORT_FUNCTION)
314 static_branch_inc(&trace_function_exports_enabled);
316 if (export->flags & TRACE_EXPORT_EVENT)
317 static_branch_inc(&trace_event_exports_enabled);
319 if (export->flags & TRACE_EXPORT_MARKER)
320 static_branch_inc(&trace_marker_exports_enabled);
323 static inline void ftrace_exports_disable(struct trace_export *export)
325 if (export->flags & TRACE_EXPORT_FUNCTION)
326 static_branch_dec(&trace_function_exports_enabled);
328 if (export->flags & TRACE_EXPORT_EVENT)
329 static_branch_dec(&trace_event_exports_enabled);
331 if (export->flags & TRACE_EXPORT_MARKER)
332 static_branch_dec(&trace_marker_exports_enabled);
335 static void ftrace_exports(struct ring_buffer_event *event, int flag)
337 struct trace_export *export;
339 preempt_disable_notrace();
341 export = rcu_dereference_raw_check(ftrace_exports_list);
343 trace_process_export(export, event, flag);
344 export = rcu_dereference_raw_check(export->next);
347 preempt_enable_notrace();
351 add_trace_export(struct trace_export **list, struct trace_export *export)
353 rcu_assign_pointer(export->next, *list);
355 * We are entering export into the list but another
356 * CPU might be walking that list. We need to make sure
357 * the export->next pointer is valid before another CPU sees
358 * the export pointer included into the list.
360 rcu_assign_pointer(*list, export);
364 rm_trace_export(struct trace_export **list, struct trace_export *export)
366 struct trace_export **p;
368 for (p = list; *p != NULL; p = &(*p)->next)
375 rcu_assign_pointer(*p, (*p)->next);
381 add_ftrace_export(struct trace_export **list, struct trace_export *export)
383 ftrace_exports_enable(export);
385 add_trace_export(list, export);
389 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
393 ret = rm_trace_export(list, export);
394 ftrace_exports_disable(export);
399 int register_ftrace_export(struct trace_export *export)
401 if (WARN_ON_ONCE(!export->write))
404 mutex_lock(&ftrace_export_lock);
406 add_ftrace_export(&ftrace_exports_list, export);
408 mutex_unlock(&ftrace_export_lock);
412 EXPORT_SYMBOL_GPL(register_ftrace_export);
414 int unregister_ftrace_export(struct trace_export *export)
418 mutex_lock(&ftrace_export_lock);
420 ret = rm_ftrace_export(&ftrace_exports_list, export);
422 mutex_unlock(&ftrace_export_lock);
426 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
428 /* trace_flags holds trace_options default values */
429 #define TRACE_DEFAULT_FLAGS \
430 (FUNCTION_DEFAULT_FLAGS | \
431 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
432 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
433 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
434 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
437 /* trace_options that are only supported by global_trace */
438 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
439 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
441 /* trace_flags that are default zero for instances */
442 #define ZEROED_TRACE_FLAGS \
443 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
446 * The global_trace is the descriptor that holds the top-level tracing
447 * buffers for the live tracing.
449 static struct trace_array global_trace = {
450 .trace_flags = TRACE_DEFAULT_FLAGS,
453 LIST_HEAD(ftrace_trace_arrays);
455 int trace_array_get(struct trace_array *this_tr)
457 struct trace_array *tr;
460 mutex_lock(&trace_types_lock);
461 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
468 mutex_unlock(&trace_types_lock);
473 static void __trace_array_put(struct trace_array *this_tr)
475 WARN_ON(!this_tr->ref);
480 * trace_array_put - Decrement the reference counter for this trace array.
481 * @this_tr : pointer to the trace array
483 * NOTE: Use this when we no longer need the trace array returned by
484 * trace_array_get_by_name(). This ensures the trace array can be later
488 void trace_array_put(struct trace_array *this_tr)
493 mutex_lock(&trace_types_lock);
494 __trace_array_put(this_tr);
495 mutex_unlock(&trace_types_lock);
497 EXPORT_SYMBOL_GPL(trace_array_put);
499 int tracing_check_open_get_tr(struct trace_array *tr)
503 ret = security_locked_down(LOCKDOWN_TRACEFS);
507 if (tracing_disabled)
510 if (tr && trace_array_get(tr) < 0)
516 int call_filter_check_discard(struct trace_event_call *call, void *rec,
517 struct trace_buffer *buffer,
518 struct ring_buffer_event *event)
520 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
521 !filter_match_preds(call->filter, rec)) {
522 __trace_event_discard_commit(buffer, event);
530 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
531 * @filtered_pids: The list of pids to check
532 * @search_pid: The PID to find in @filtered_pids
534 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
537 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
539 return trace_pid_list_is_set(filtered_pids, search_pid);
543 * trace_ignore_this_task - should a task be ignored for tracing
544 * @filtered_pids: The list of pids to check
545 * @filtered_no_pids: The list of pids not to be traced
546 * @task: The task that should be ignored if not filtered
548 * Checks if @task should be traced or not from @filtered_pids.
549 * Returns true if @task should *NOT* be traced.
550 * Returns false if @task should be traced.
553 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
554 struct trace_pid_list *filtered_no_pids,
555 struct task_struct *task)
558 * If filtered_no_pids is not empty, and the task's pid is listed
559 * in filtered_no_pids, then return true.
560 * Otherwise, if filtered_pids is empty, that means we can
561 * trace all tasks. If it has content, then only trace pids
562 * within filtered_pids.
565 return (filtered_pids &&
566 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
568 trace_find_filtered_pid(filtered_no_pids, task->pid));
572 * trace_filter_add_remove_task - Add or remove a task from a pid_list
573 * @pid_list: The list to modify
574 * @self: The current task for fork or NULL for exit
575 * @task: The task to add or remove
577 * If adding a task, if @self is defined, the task is only added if @self
578 * is also included in @pid_list. This happens on fork and tasks should
579 * only be added when the parent is listed. If @self is NULL, then the
580 * @task pid will be removed from the list, which would happen on exit
583 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
584 struct task_struct *self,
585 struct task_struct *task)
590 /* For forks, we only add if the forking task is listed */
592 if (!trace_find_filtered_pid(pid_list, self->pid))
596 /* "self" is set for forks, and NULL for exits */
598 trace_pid_list_set(pid_list, task->pid);
600 trace_pid_list_clear(pid_list, task->pid);
604 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
605 * @pid_list: The pid list to show
606 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
607 * @pos: The position of the file
609 * This is used by the seq_file "next" operation to iterate the pids
610 * listed in a trace_pid_list structure.
612 * Returns the pid+1 as we want to display pid of zero, but NULL would
613 * stop the iteration.
615 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
617 long pid = (unsigned long)v;
622 /* pid already is +1 of the actual previous bit */
623 if (trace_pid_list_next(pid_list, pid, &next) < 0)
628 /* Return pid + 1 to allow zero to be represented */
629 return (void *)(pid + 1);
633 * trace_pid_start - Used for seq_file to start reading pid lists
634 * @pid_list: The pid list to show
635 * @pos: The position of the file
637 * This is used by seq_file "start" operation to start the iteration
640 * Returns the pid+1 as we want to display pid of zero, but NULL would
641 * stop the iteration.
643 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
649 if (trace_pid_list_first(pid_list, &first) < 0)
654 /* Return pid + 1 so that zero can be the exit value */
655 for (pid++; pid && l < *pos;
656 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
662 * trace_pid_show - show the current pid in seq_file processing
663 * @m: The seq_file structure to write into
664 * @v: A void pointer of the pid (+1) value to display
666 * Can be directly used by seq_file operations to display the current
669 int trace_pid_show(struct seq_file *m, void *v)
671 unsigned long pid = (unsigned long)v - 1;
673 seq_printf(m, "%lu\n", pid);
677 /* 128 should be much more than enough */
678 #define PID_BUF_SIZE 127
680 int trace_pid_write(struct trace_pid_list *filtered_pids,
681 struct trace_pid_list **new_pid_list,
682 const char __user *ubuf, size_t cnt)
684 struct trace_pid_list *pid_list;
685 struct trace_parser parser;
693 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
697 * Always recreate a new array. The write is an all or nothing
698 * operation. Always create a new array when adding new pids by
699 * the user. If the operation fails, then the current list is
702 pid_list = trace_pid_list_alloc();
704 trace_parser_put(&parser);
709 /* copy the current bits to the new max */
710 ret = trace_pid_list_first(filtered_pids, &pid);
712 trace_pid_list_set(pid_list, pid);
713 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
723 ret = trace_get_user(&parser, ubuf, cnt, &pos);
731 if (!trace_parser_loaded(&parser))
735 if (kstrtoul(parser.buffer, 0, &val))
740 if (trace_pid_list_set(pid_list, pid) < 0) {
746 trace_parser_clear(&parser);
749 trace_parser_put(&parser);
752 trace_pid_list_free(pid_list);
757 /* Cleared the list of pids */
758 trace_pid_list_free(pid_list);
762 *new_pid_list = pid_list;
767 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
771 /* Early boot up does not have a buffer yet */
773 return trace_clock_local();
775 ts = ring_buffer_time_stamp(buf->buffer);
776 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
781 u64 ftrace_now(int cpu)
783 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
787 * tracing_is_enabled - Show if global_trace has been enabled
789 * Shows if the global trace has been enabled or not. It uses the
790 * mirror flag "buffer_disabled" to be used in fast paths such as for
791 * the irqsoff tracer. But it may be inaccurate due to races. If you
792 * need to know the accurate state, use tracing_is_on() which is a little
793 * slower, but accurate.
795 int tracing_is_enabled(void)
798 * For quick access (irqsoff uses this in fast path), just
799 * return the mirror variable of the state of the ring buffer.
800 * It's a little racy, but we don't really care.
803 return !global_trace.buffer_disabled;
807 * trace_buf_size is the size in bytes that is allocated
808 * for a buffer. Note, the number of bytes is always rounded
811 * This number is purposely set to a low number of 16384.
812 * If the dump on oops happens, it will be much appreciated
813 * to not have to wait for all that output. Anyway this can be
814 * boot time and run time configurable.
816 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
818 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
820 /* trace_types holds a link list of available tracers. */
821 static struct tracer *trace_types __read_mostly;
824 * trace_types_lock is used to protect the trace_types list.
826 DEFINE_MUTEX(trace_types_lock);
829 * serialize the access of the ring buffer
831 * ring buffer serializes readers, but it is low level protection.
832 * The validity of the events (which returns by ring_buffer_peek() ..etc)
833 * are not protected by ring buffer.
835 * The content of events may become garbage if we allow other process consumes
836 * these events concurrently:
837 * A) the page of the consumed events may become a normal page
838 * (not reader page) in ring buffer, and this page will be rewritten
839 * by events producer.
840 * B) The page of the consumed events may become a page for splice_read,
841 * and this page will be returned to system.
843 * These primitives allow multi process access to different cpu ring buffer
846 * These primitives don't distinguish read-only and read-consume access.
847 * Multi read-only access are also serialized.
851 static DECLARE_RWSEM(all_cpu_access_lock);
852 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
854 static inline void trace_access_lock(int cpu)
856 if (cpu == RING_BUFFER_ALL_CPUS) {
857 /* gain it for accessing the whole ring buffer. */
858 down_write(&all_cpu_access_lock);
860 /* gain it for accessing a cpu ring buffer. */
862 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
863 down_read(&all_cpu_access_lock);
865 /* Secondly block other access to this @cpu ring buffer. */
866 mutex_lock(&per_cpu(cpu_access_lock, cpu));
870 static inline void trace_access_unlock(int cpu)
872 if (cpu == RING_BUFFER_ALL_CPUS) {
873 up_write(&all_cpu_access_lock);
875 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
876 up_read(&all_cpu_access_lock);
880 static inline void trace_access_lock_init(void)
884 for_each_possible_cpu(cpu)
885 mutex_init(&per_cpu(cpu_access_lock, cpu));
890 static DEFINE_MUTEX(access_lock);
892 static inline void trace_access_lock(int cpu)
895 mutex_lock(&access_lock);
898 static inline void trace_access_unlock(int cpu)
901 mutex_unlock(&access_lock);
904 static inline void trace_access_lock_init(void)
910 #ifdef CONFIG_STACKTRACE
911 static void __ftrace_trace_stack(struct trace_buffer *buffer,
912 unsigned int trace_ctx,
913 int skip, struct pt_regs *regs);
914 static inline void ftrace_trace_stack(struct trace_array *tr,
915 struct trace_buffer *buffer,
916 unsigned int trace_ctx,
917 int skip, struct pt_regs *regs);
920 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
921 unsigned int trace_ctx,
922 int skip, struct pt_regs *regs)
925 static inline void ftrace_trace_stack(struct trace_array *tr,
926 struct trace_buffer *buffer,
927 unsigned long trace_ctx,
928 int skip, struct pt_regs *regs)
934 static __always_inline void
935 trace_event_setup(struct ring_buffer_event *event,
936 int type, unsigned int trace_ctx)
938 struct trace_entry *ent = ring_buffer_event_data(event);
940 tracing_generic_entry_update(ent, type, trace_ctx);
943 static __always_inline struct ring_buffer_event *
944 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
947 unsigned int trace_ctx)
949 struct ring_buffer_event *event;
951 event = ring_buffer_lock_reserve(buffer, len);
953 trace_event_setup(event, type, trace_ctx);
958 void tracer_tracing_on(struct trace_array *tr)
960 if (tr->array_buffer.buffer)
961 ring_buffer_record_on(tr->array_buffer.buffer);
963 * This flag is looked at when buffers haven't been allocated
964 * yet, or by some tracers (like irqsoff), that just want to
965 * know if the ring buffer has been disabled, but it can handle
966 * races of where it gets disabled but we still do a record.
967 * As the check is in the fast path of the tracers, it is more
968 * important to be fast than accurate.
970 tr->buffer_disabled = 0;
971 /* Make the flag seen by readers */
976 * tracing_on - enable tracing buffers
978 * This function enables tracing buffers that may have been
979 * disabled with tracing_off.
981 void tracing_on(void)
983 tracer_tracing_on(&global_trace);
985 EXPORT_SYMBOL_GPL(tracing_on);
988 static __always_inline void
989 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
991 __this_cpu_write(trace_taskinfo_save, true);
993 /* If this is the temp buffer, we need to commit fully */
994 if (this_cpu_read(trace_buffered_event) == event) {
995 /* Length is in event->array[0] */
996 ring_buffer_write(buffer, event->array[0], &event->array[1]);
997 /* Release the temp buffer */
998 this_cpu_dec(trace_buffered_event_cnt);
999 /* ring_buffer_unlock_commit() enables preemption */
1000 preempt_enable_notrace();
1002 ring_buffer_unlock_commit(buffer, event);
1006 * __trace_puts - write a constant string into the trace buffer.
1007 * @ip: The address of the caller
1008 * @str: The constant string to write
1009 * @size: The size of the string.
1011 int __trace_puts(unsigned long ip, const char *str, int size)
1013 struct ring_buffer_event *event;
1014 struct trace_buffer *buffer;
1015 struct print_entry *entry;
1016 unsigned int trace_ctx;
1019 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1022 if (unlikely(tracing_selftest_running || tracing_disabled))
1025 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1027 trace_ctx = tracing_gen_ctx();
1028 buffer = global_trace.array_buffer.buffer;
1029 ring_buffer_nest_start(buffer);
1030 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1037 entry = ring_buffer_event_data(event);
1040 memcpy(&entry->buf, str, size);
1042 /* Add a newline if necessary */
1043 if (entry->buf[size - 1] != '\n') {
1044 entry->buf[size] = '\n';
1045 entry->buf[size + 1] = '\0';
1047 entry->buf[size] = '\0';
1049 __buffer_unlock_commit(buffer, event);
1050 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1052 ring_buffer_nest_end(buffer);
1055 EXPORT_SYMBOL_GPL(__trace_puts);
1058 * __trace_bputs - write the pointer to a constant string into trace buffer
1059 * @ip: The address of the caller
1060 * @str: The constant string to write to the buffer to
1062 int __trace_bputs(unsigned long ip, const char *str)
1064 struct ring_buffer_event *event;
1065 struct trace_buffer *buffer;
1066 struct bputs_entry *entry;
1067 unsigned int trace_ctx;
1068 int size = sizeof(struct bputs_entry);
1071 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1074 if (unlikely(tracing_selftest_running || tracing_disabled))
1077 trace_ctx = tracing_gen_ctx();
1078 buffer = global_trace.array_buffer.buffer;
1080 ring_buffer_nest_start(buffer);
1081 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1086 entry = ring_buffer_event_data(event);
1090 __buffer_unlock_commit(buffer, event);
1091 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1095 ring_buffer_nest_end(buffer);
1098 EXPORT_SYMBOL_GPL(__trace_bputs);
1100 #ifdef CONFIG_TRACER_SNAPSHOT
1101 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1104 struct tracer *tracer = tr->current_trace;
1105 unsigned long flags;
1108 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1109 internal_trace_puts("*** snapshot is being ignored ***\n");
1113 if (!tr->allocated_snapshot) {
1114 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1115 internal_trace_puts("*** stopping trace here! ***\n");
1120 /* Note, snapshot can not be used when the tracer uses it */
1121 if (tracer->use_max_tr) {
1122 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1123 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
1127 local_irq_save(flags);
1128 update_max_tr(tr, current, smp_processor_id(), cond_data);
1129 local_irq_restore(flags);
1132 void tracing_snapshot_instance(struct trace_array *tr)
1134 tracing_snapshot_instance_cond(tr, NULL);
1138 * tracing_snapshot - take a snapshot of the current buffer.
1140 * This causes a swap between the snapshot buffer and the current live
1141 * tracing buffer. You can use this to take snapshots of the live
1142 * trace when some condition is triggered, but continue to trace.
1144 * Note, make sure to allocate the snapshot with either
1145 * a tracing_snapshot_alloc(), or by doing it manually
1146 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1148 * If the snapshot buffer is not allocated, it will stop tracing.
1149 * Basically making a permanent snapshot.
1151 void tracing_snapshot(void)
1153 struct trace_array *tr = &global_trace;
1155 tracing_snapshot_instance(tr);
1157 EXPORT_SYMBOL_GPL(tracing_snapshot);
1160 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1161 * @tr: The tracing instance to snapshot
1162 * @cond_data: The data to be tested conditionally, and possibly saved
1164 * This is the same as tracing_snapshot() except that the snapshot is
1165 * conditional - the snapshot will only happen if the
1166 * cond_snapshot.update() implementation receiving the cond_data
1167 * returns true, which means that the trace array's cond_snapshot
1168 * update() operation used the cond_data to determine whether the
1169 * snapshot should be taken, and if it was, presumably saved it along
1170 * with the snapshot.
1172 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1174 tracing_snapshot_instance_cond(tr, cond_data);
1176 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1179 * tracing_cond_snapshot_data - get the user data associated with a snapshot
1180 * @tr: The tracing instance
1182 * When the user enables a conditional snapshot using
1183 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1184 * with the snapshot. This accessor is used to retrieve it.
1186 * Should not be called from cond_snapshot.update(), since it takes
1187 * the tr->max_lock lock, which the code calling
1188 * cond_snapshot.update() has already done.
1190 * Returns the cond_data associated with the trace array's snapshot.
1192 void *tracing_cond_snapshot_data(struct trace_array *tr)
1194 void *cond_data = NULL;
1196 local_irq_disable();
1197 arch_spin_lock(&tr->max_lock);
1199 if (tr->cond_snapshot)
1200 cond_data = tr->cond_snapshot->cond_data;
1202 arch_spin_unlock(&tr->max_lock);
1207 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1209 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1210 struct array_buffer *size_buf, int cpu_id);
1211 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1213 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1217 if (!tr->allocated_snapshot) {
1219 /* allocate spare buffer */
1220 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1221 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1225 tr->allocated_snapshot = true;
1231 static void free_snapshot(struct trace_array *tr)
1234 * We don't free the ring buffer. instead, resize it because
1235 * The max_tr ring buffer has some state (e.g. ring->clock) and
1236 * we want preserve it.
1238 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1239 set_buffer_entries(&tr->max_buffer, 1);
1240 tracing_reset_online_cpus(&tr->max_buffer);
1241 tr->allocated_snapshot = false;
1245 * tracing_alloc_snapshot - allocate snapshot buffer.
1247 * This only allocates the snapshot buffer if it isn't already
1248 * allocated - it doesn't also take a snapshot.
1250 * This is meant to be used in cases where the snapshot buffer needs
1251 * to be set up for events that can't sleep but need to be able to
1252 * trigger a snapshot.
1254 int tracing_alloc_snapshot(void)
1256 struct trace_array *tr = &global_trace;
1259 ret = tracing_alloc_snapshot_instance(tr);
1264 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1267 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1269 * This is similar to tracing_snapshot(), but it will allocate the
1270 * snapshot buffer if it isn't already allocated. Use this only
1271 * where it is safe to sleep, as the allocation may sleep.
1273 * This causes a swap between the snapshot buffer and the current live
1274 * tracing buffer. You can use this to take snapshots of the live
1275 * trace when some condition is triggered, but continue to trace.
1277 void tracing_snapshot_alloc(void)
1281 ret = tracing_alloc_snapshot();
1287 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1290 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1291 * @tr: The tracing instance
1292 * @cond_data: User data to associate with the snapshot
1293 * @update: Implementation of the cond_snapshot update function
1295 * Check whether the conditional snapshot for the given instance has
1296 * already been enabled, or if the current tracer is already using a
1297 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1298 * save the cond_data and update function inside.
1300 * Returns 0 if successful, error otherwise.
1302 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1303 cond_update_fn_t update)
1305 struct cond_snapshot *cond_snapshot;
1308 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1312 cond_snapshot->cond_data = cond_data;
1313 cond_snapshot->update = update;
1315 mutex_lock(&trace_types_lock);
1317 ret = tracing_alloc_snapshot_instance(tr);
1321 if (tr->current_trace->use_max_tr) {
1327 * The cond_snapshot can only change to NULL without the
1328 * trace_types_lock. We don't care if we race with it going
1329 * to NULL, but we want to make sure that it's not set to
1330 * something other than NULL when we get here, which we can
1331 * do safely with only holding the trace_types_lock and not
1332 * having to take the max_lock.
1334 if (tr->cond_snapshot) {
1339 local_irq_disable();
1340 arch_spin_lock(&tr->max_lock);
1341 tr->cond_snapshot = cond_snapshot;
1342 arch_spin_unlock(&tr->max_lock);
1345 mutex_unlock(&trace_types_lock);
1350 mutex_unlock(&trace_types_lock);
1351 kfree(cond_snapshot);
1354 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1357 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1358 * @tr: The tracing instance
1360 * Check whether the conditional snapshot for the given instance is
1361 * enabled; if so, free the cond_snapshot associated with it,
1362 * otherwise return -EINVAL.
1364 * Returns 0 if successful, error otherwise.
1366 int tracing_snapshot_cond_disable(struct trace_array *tr)
1370 local_irq_disable();
1371 arch_spin_lock(&tr->max_lock);
1373 if (!tr->cond_snapshot)
1376 kfree(tr->cond_snapshot);
1377 tr->cond_snapshot = NULL;
1380 arch_spin_unlock(&tr->max_lock);
1385 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1387 void tracing_snapshot(void)
1389 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1391 EXPORT_SYMBOL_GPL(tracing_snapshot);
1392 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1394 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1396 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1397 int tracing_alloc_snapshot(void)
1399 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1402 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1403 void tracing_snapshot_alloc(void)
1408 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1409 void *tracing_cond_snapshot_data(struct trace_array *tr)
1413 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1414 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1418 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1419 int tracing_snapshot_cond_disable(struct trace_array *tr)
1423 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1424 #define free_snapshot(tr) do { } while (0)
1425 #endif /* CONFIG_TRACER_SNAPSHOT */
1427 void tracer_tracing_off(struct trace_array *tr)
1429 if (tr->array_buffer.buffer)
1430 ring_buffer_record_off(tr->array_buffer.buffer);
1432 * This flag is looked at when buffers haven't been allocated
1433 * yet, or by some tracers (like irqsoff), that just want to
1434 * know if the ring buffer has been disabled, but it can handle
1435 * races of where it gets disabled but we still do a record.
1436 * As the check is in the fast path of the tracers, it is more
1437 * important to be fast than accurate.
1439 tr->buffer_disabled = 1;
1440 /* Make the flag seen by readers */
1445 * tracing_off - turn off tracing buffers
1447 * This function stops the tracing buffers from recording data.
1448 * It does not disable any overhead the tracers themselves may
1449 * be causing. This function simply causes all recording to
1450 * the ring buffers to fail.
1452 void tracing_off(void)
1454 tracer_tracing_off(&global_trace);
1456 EXPORT_SYMBOL_GPL(tracing_off);
1458 void disable_trace_on_warning(void)
1460 if (__disable_trace_on_warning) {
1461 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1462 "Disabling tracing due to warning\n");
1468 * tracer_tracing_is_on - show real state of ring buffer enabled
1469 * @tr : the trace array to know if ring buffer is enabled
1471 * Shows real state of the ring buffer if it is enabled or not.
1473 bool tracer_tracing_is_on(struct trace_array *tr)
1475 if (tr->array_buffer.buffer)
1476 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1477 return !tr->buffer_disabled;
1481 * tracing_is_on - show state of ring buffers enabled
1483 int tracing_is_on(void)
1485 return tracer_tracing_is_on(&global_trace);
1487 EXPORT_SYMBOL_GPL(tracing_is_on);
1489 static int __init set_buf_size(char *str)
1491 unsigned long buf_size;
1495 buf_size = memparse(str, &str);
1497 * nr_entries can not be zero and the startup
1498 * tests require some buffer space. Therefore
1499 * ensure we have at least 4096 bytes of buffer.
1501 trace_buf_size = max(4096UL, buf_size);
1504 __setup("trace_buf_size=", set_buf_size);
1506 static int __init set_tracing_thresh(char *str)
1508 unsigned long threshold;
1513 ret = kstrtoul(str, 0, &threshold);
1516 tracing_thresh = threshold * 1000;
1519 __setup("tracing_thresh=", set_tracing_thresh);
1521 unsigned long nsecs_to_usecs(unsigned long nsecs)
1523 return nsecs / 1000;
1527 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1528 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1529 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1530 * of strings in the order that the evals (enum) were defined.
1535 /* These must match the bit positions in trace_iterator_flags */
1536 static const char *trace_options[] = {
1544 int in_ns; /* is this clock in nanoseconds? */
1545 } trace_clocks[] = {
1546 { trace_clock_local, "local", 1 },
1547 { trace_clock_global, "global", 1 },
1548 { trace_clock_counter, "counter", 0 },
1549 { trace_clock_jiffies, "uptime", 0 },
1550 { trace_clock, "perf", 1 },
1551 { ktime_get_mono_fast_ns, "mono", 1 },
1552 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1553 { ktime_get_boot_fast_ns, "boot", 1 },
1554 { ktime_get_tai_fast_ns, "tai", 1 },
1558 bool trace_clock_in_ns(struct trace_array *tr)
1560 if (trace_clocks[tr->clock_id].in_ns)
1567 * trace_parser_get_init - gets the buffer for trace parser
1569 int trace_parser_get_init(struct trace_parser *parser, int size)
1571 memset(parser, 0, sizeof(*parser));
1573 parser->buffer = kmalloc(size, GFP_KERNEL);
1574 if (!parser->buffer)
1577 parser->size = size;
1582 * trace_parser_put - frees the buffer for trace parser
1584 void trace_parser_put(struct trace_parser *parser)
1586 kfree(parser->buffer);
1587 parser->buffer = NULL;
1591 * trace_get_user - reads the user input string separated by space
1592 * (matched by isspace(ch))
1594 * For each string found the 'struct trace_parser' is updated,
1595 * and the function returns.
1597 * Returns number of bytes read.
1599 * See kernel/trace/trace.h for 'struct trace_parser' details.
1601 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1602 size_t cnt, loff_t *ppos)
1609 trace_parser_clear(parser);
1611 ret = get_user(ch, ubuf++);
1619 * The parser is not finished with the last write,
1620 * continue reading the user input without skipping spaces.
1622 if (!parser->cont) {
1623 /* skip white space */
1624 while (cnt && isspace(ch)) {
1625 ret = get_user(ch, ubuf++);
1634 /* only spaces were written */
1635 if (isspace(ch) || !ch) {
1642 /* read the non-space input */
1643 while (cnt && !isspace(ch) && ch) {
1644 if (parser->idx < parser->size - 1)
1645 parser->buffer[parser->idx++] = ch;
1650 ret = get_user(ch, ubuf++);
1657 /* We either got finished input or we have to wait for another call. */
1658 if (isspace(ch) || !ch) {
1659 parser->buffer[parser->idx] = 0;
1660 parser->cont = false;
1661 } else if (parser->idx < parser->size - 1) {
1662 parser->cont = true;
1663 parser->buffer[parser->idx++] = ch;
1664 /* Make sure the parsed string always terminates with '\0'. */
1665 parser->buffer[parser->idx] = 0;
1678 /* TODO add a seq_buf_to_buffer() */
1679 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1683 if (trace_seq_used(s) <= s->seq.readpos)
1686 len = trace_seq_used(s) - s->seq.readpos;
1689 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1691 s->seq.readpos += cnt;
1695 unsigned long __read_mostly tracing_thresh;
1697 #ifdef CONFIG_TRACER_MAX_TRACE
1698 static const struct file_operations tracing_max_lat_fops;
1700 #ifdef LATENCY_FS_NOTIFY
1702 static struct workqueue_struct *fsnotify_wq;
1704 static void latency_fsnotify_workfn(struct work_struct *work)
1706 struct trace_array *tr = container_of(work, struct trace_array,
1708 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1711 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1713 struct trace_array *tr = container_of(iwork, struct trace_array,
1715 queue_work(fsnotify_wq, &tr->fsnotify_work);
1718 static void trace_create_maxlat_file(struct trace_array *tr,
1719 struct dentry *d_tracer)
1721 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1722 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1723 tr->d_max_latency = trace_create_file("tracing_max_latency",
1725 d_tracer, &tr->max_latency,
1726 &tracing_max_lat_fops);
1729 __init static int latency_fsnotify_init(void)
1731 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1732 WQ_UNBOUND | WQ_HIGHPRI, 0);
1734 pr_err("Unable to allocate tr_max_lat_wq\n");
1740 late_initcall_sync(latency_fsnotify_init);
1742 void latency_fsnotify(struct trace_array *tr)
1747 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1748 * possible that we are called from __schedule() or do_idle(), which
1749 * could cause a deadlock.
1751 irq_work_queue(&tr->fsnotify_irqwork);
1754 #else /* !LATENCY_FS_NOTIFY */
1756 #define trace_create_maxlat_file(tr, d_tracer) \
1757 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1758 d_tracer, &tr->max_latency, &tracing_max_lat_fops)
1763 * Copy the new maximum trace into the separate maximum-trace
1764 * structure. (this way the maximum trace is permanently saved,
1765 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1768 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1770 struct array_buffer *trace_buf = &tr->array_buffer;
1771 struct array_buffer *max_buf = &tr->max_buffer;
1772 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1773 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1776 max_buf->time_start = data->preempt_timestamp;
1778 max_data->saved_latency = tr->max_latency;
1779 max_data->critical_start = data->critical_start;
1780 max_data->critical_end = data->critical_end;
1782 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1783 max_data->pid = tsk->pid;
1785 * If tsk == current, then use current_uid(), as that does not use
1786 * RCU. The irq tracer can be called out of RCU scope.
1789 max_data->uid = current_uid();
1791 max_data->uid = task_uid(tsk);
1793 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1794 max_data->policy = tsk->policy;
1795 max_data->rt_priority = tsk->rt_priority;
1797 /* record this tasks comm */
1798 tracing_record_cmdline(tsk);
1799 latency_fsnotify(tr);
1803 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1805 * @tsk: the task with the latency
1806 * @cpu: The cpu that initiated the trace.
1807 * @cond_data: User data associated with a conditional snapshot
1809 * Flip the buffers between the @tr and the max_tr and record information
1810 * about which task was the cause of this latency.
1813 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1819 WARN_ON_ONCE(!irqs_disabled());
1821 if (!tr->allocated_snapshot) {
1822 /* Only the nop tracer should hit this when disabling */
1823 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1827 arch_spin_lock(&tr->max_lock);
1829 /* Inherit the recordable setting from array_buffer */
1830 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1831 ring_buffer_record_on(tr->max_buffer.buffer);
1833 ring_buffer_record_off(tr->max_buffer.buffer);
1835 #ifdef CONFIG_TRACER_SNAPSHOT
1836 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1837 arch_spin_unlock(&tr->max_lock);
1841 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1843 __update_max_tr(tr, tsk, cpu);
1845 arch_spin_unlock(&tr->max_lock);
1849 * update_max_tr_single - only copy one trace over, and reset the rest
1851 * @tsk: task with the latency
1852 * @cpu: the cpu of the buffer to copy.
1854 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1857 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1864 WARN_ON_ONCE(!irqs_disabled());
1865 if (!tr->allocated_snapshot) {
1866 /* Only the nop tracer should hit this when disabling */
1867 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1871 arch_spin_lock(&tr->max_lock);
1873 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1875 if (ret == -EBUSY) {
1877 * We failed to swap the buffer due to a commit taking
1878 * place on this CPU. We fail to record, but we reset
1879 * the max trace buffer (no one writes directly to it)
1880 * and flag that it failed.
1882 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1883 "Failed to swap buffers due to commit in progress\n");
1886 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1888 __update_max_tr(tr, tsk, cpu);
1889 arch_spin_unlock(&tr->max_lock);
1892 #endif /* CONFIG_TRACER_MAX_TRACE */
1894 static int wait_on_pipe(struct trace_iterator *iter, int full)
1896 /* Iterators are static, they should be filled or empty */
1897 if (trace_buffer_iter(iter, iter->cpu_file))
1900 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1904 #ifdef CONFIG_FTRACE_STARTUP_TEST
1905 static bool selftests_can_run;
1907 struct trace_selftests {
1908 struct list_head list;
1909 struct tracer *type;
1912 static LIST_HEAD(postponed_selftests);
1914 static int save_selftest(struct tracer *type)
1916 struct trace_selftests *selftest;
1918 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1922 selftest->type = type;
1923 list_add(&selftest->list, &postponed_selftests);
1927 static int run_tracer_selftest(struct tracer *type)
1929 struct trace_array *tr = &global_trace;
1930 struct tracer *saved_tracer = tr->current_trace;
1933 if (!type->selftest || tracing_selftest_disabled)
1937 * If a tracer registers early in boot up (before scheduling is
1938 * initialized and such), then do not run its selftests yet.
1939 * Instead, run it a little later in the boot process.
1941 if (!selftests_can_run)
1942 return save_selftest(type);
1944 if (!tracing_is_on()) {
1945 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1951 * Run a selftest on this tracer.
1952 * Here we reset the trace buffer, and set the current
1953 * tracer to be this tracer. The tracer can then run some
1954 * internal tracing to verify that everything is in order.
1955 * If we fail, we do not register this tracer.
1957 tracing_reset_online_cpus(&tr->array_buffer);
1959 tr->current_trace = type;
1961 #ifdef CONFIG_TRACER_MAX_TRACE
1962 if (type->use_max_tr) {
1963 /* If we expanded the buffers, make sure the max is expanded too */
1964 if (ring_buffer_expanded)
1965 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1966 RING_BUFFER_ALL_CPUS);
1967 tr->allocated_snapshot = true;
1971 /* the test is responsible for initializing and enabling */
1972 pr_info("Testing tracer %s: ", type->name);
1973 ret = type->selftest(type, tr);
1974 /* the test is responsible for resetting too */
1975 tr->current_trace = saved_tracer;
1977 printk(KERN_CONT "FAILED!\n");
1978 /* Add the warning after printing 'FAILED' */
1982 /* Only reset on passing, to avoid touching corrupted buffers */
1983 tracing_reset_online_cpus(&tr->array_buffer);
1985 #ifdef CONFIG_TRACER_MAX_TRACE
1986 if (type->use_max_tr) {
1987 tr->allocated_snapshot = false;
1989 /* Shrink the max buffer again */
1990 if (ring_buffer_expanded)
1991 ring_buffer_resize(tr->max_buffer.buffer, 1,
1992 RING_BUFFER_ALL_CPUS);
1996 printk(KERN_CONT "PASSED\n");
2000 static __init int init_trace_selftests(void)
2002 struct trace_selftests *p, *n;
2003 struct tracer *t, **last;
2006 selftests_can_run = true;
2008 mutex_lock(&trace_types_lock);
2010 if (list_empty(&postponed_selftests))
2013 pr_info("Running postponed tracer tests:\n");
2015 tracing_selftest_running = true;
2016 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2017 /* This loop can take minutes when sanitizers are enabled, so
2018 * lets make sure we allow RCU processing.
2021 ret = run_tracer_selftest(p->type);
2022 /* If the test fails, then warn and remove from available_tracers */
2024 WARN(1, "tracer: %s failed selftest, disabling\n",
2026 last = &trace_types;
2027 for (t = trace_types; t; t = t->next) {
2038 tracing_selftest_running = false;
2041 mutex_unlock(&trace_types_lock);
2045 core_initcall(init_trace_selftests);
2047 static inline int run_tracer_selftest(struct tracer *type)
2051 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2053 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2055 static void __init apply_trace_boot_options(void);
2058 * register_tracer - register a tracer with the ftrace system.
2059 * @type: the plugin for the tracer
2061 * Register a new plugin tracer.
2063 int __init register_tracer(struct tracer *type)
2069 pr_info("Tracer must have a name\n");
2073 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2074 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2078 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2079 pr_warn("Can not register tracer %s due to lockdown\n",
2084 mutex_lock(&trace_types_lock);
2086 tracing_selftest_running = true;
2088 for (t = trace_types; t; t = t->next) {
2089 if (strcmp(type->name, t->name) == 0) {
2091 pr_info("Tracer %s already registered\n",
2098 if (!type->set_flag)
2099 type->set_flag = &dummy_set_flag;
2101 /*allocate a dummy tracer_flags*/
2102 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2107 type->flags->val = 0;
2108 type->flags->opts = dummy_tracer_opt;
2110 if (!type->flags->opts)
2111 type->flags->opts = dummy_tracer_opt;
2113 /* store the tracer for __set_tracer_option */
2114 type->flags->trace = type;
2116 ret = run_tracer_selftest(type);
2120 type->next = trace_types;
2122 add_tracer_options(&global_trace, type);
2125 tracing_selftest_running = false;
2126 mutex_unlock(&trace_types_lock);
2128 if (ret || !default_bootup_tracer)
2131 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2134 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2135 /* Do we want this tracer to start on bootup? */
2136 tracing_set_tracer(&global_trace, type->name);
2137 default_bootup_tracer = NULL;
2139 apply_trace_boot_options();
2141 /* disable other selftests, since this will break it. */
2142 disable_tracing_selftest("running a tracer");
2148 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2150 struct trace_buffer *buffer = buf->buffer;
2155 ring_buffer_record_disable(buffer);
2157 /* Make sure all commits have finished */
2159 ring_buffer_reset_cpu(buffer, cpu);
2161 ring_buffer_record_enable(buffer);
2164 void tracing_reset_online_cpus(struct array_buffer *buf)
2166 struct trace_buffer *buffer = buf->buffer;
2171 ring_buffer_record_disable(buffer);
2173 /* Make sure all commits have finished */
2176 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2178 ring_buffer_reset_online_cpus(buffer);
2180 ring_buffer_record_enable(buffer);
2183 /* Must have trace_types_lock held */
2184 void tracing_reset_all_online_cpus_unlocked(void)
2186 struct trace_array *tr;
2188 lockdep_assert_held(&trace_types_lock);
2190 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2191 if (!tr->clear_trace)
2193 tr->clear_trace = false;
2194 tracing_reset_online_cpus(&tr->array_buffer);
2195 #ifdef CONFIG_TRACER_MAX_TRACE
2196 tracing_reset_online_cpus(&tr->max_buffer);
2201 void tracing_reset_all_online_cpus(void)
2203 mutex_lock(&trace_types_lock);
2204 tracing_reset_all_online_cpus_unlocked();
2205 mutex_unlock(&trace_types_lock);
2209 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2210 * is the tgid last observed corresponding to pid=i.
2212 static int *tgid_map;
2214 /* The maximum valid index into tgid_map. */
2215 static size_t tgid_map_max;
2217 #define SAVED_CMDLINES_DEFAULT 128
2218 #define NO_CMDLINE_MAP UINT_MAX
2220 * Preemption must be disabled before acquiring trace_cmdline_lock.
2221 * The various trace_arrays' max_lock must be acquired in a context
2222 * where interrupt is disabled.
2224 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2225 struct saved_cmdlines_buffer {
2226 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2227 unsigned *map_cmdline_to_pid;
2228 unsigned cmdline_num;
2230 char *saved_cmdlines;
2232 static struct saved_cmdlines_buffer *savedcmd;
2234 static inline char *get_saved_cmdlines(int idx)
2236 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2239 static inline void set_cmdline(int idx, const char *cmdline)
2241 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2244 static int allocate_cmdlines_buffer(unsigned int val,
2245 struct saved_cmdlines_buffer *s)
2247 s->map_cmdline_to_pid = kmalloc_array(val,
2248 sizeof(*s->map_cmdline_to_pid),
2250 if (!s->map_cmdline_to_pid)
2253 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2254 if (!s->saved_cmdlines) {
2255 kfree(s->map_cmdline_to_pid);
2260 s->cmdline_num = val;
2261 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2262 sizeof(s->map_pid_to_cmdline));
2263 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2264 val * sizeof(*s->map_cmdline_to_pid));
2269 static int trace_create_savedcmd(void)
2273 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2277 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2287 int is_tracing_stopped(void)
2289 return global_trace.stop_count;
2293 * tracing_start - quick start of the tracer
2295 * If tracing is enabled but was stopped by tracing_stop,
2296 * this will start the tracer back up.
2298 void tracing_start(void)
2300 struct trace_buffer *buffer;
2301 unsigned long flags;
2303 if (tracing_disabled)
2306 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2307 if (--global_trace.stop_count) {
2308 if (global_trace.stop_count < 0) {
2309 /* Someone screwed up their debugging */
2311 global_trace.stop_count = 0;
2316 /* Prevent the buffers from switching */
2317 arch_spin_lock(&global_trace.max_lock);
2319 buffer = global_trace.array_buffer.buffer;
2321 ring_buffer_record_enable(buffer);
2323 #ifdef CONFIG_TRACER_MAX_TRACE
2324 buffer = global_trace.max_buffer.buffer;
2326 ring_buffer_record_enable(buffer);
2329 arch_spin_unlock(&global_trace.max_lock);
2332 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2335 static void tracing_start_tr(struct trace_array *tr)
2337 struct trace_buffer *buffer;
2338 unsigned long flags;
2340 if (tracing_disabled)
2343 /* If global, we need to also start the max tracer */
2344 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2345 return tracing_start();
2347 raw_spin_lock_irqsave(&tr->start_lock, flags);
2349 if (--tr->stop_count) {
2350 if (tr->stop_count < 0) {
2351 /* Someone screwed up their debugging */
2358 buffer = tr->array_buffer.buffer;
2360 ring_buffer_record_enable(buffer);
2363 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2367 * tracing_stop - quick stop of the tracer
2369 * Light weight way to stop tracing. Use in conjunction with
2372 void tracing_stop(void)
2374 struct trace_buffer *buffer;
2375 unsigned long flags;
2377 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2378 if (global_trace.stop_count++)
2381 /* Prevent the buffers from switching */
2382 arch_spin_lock(&global_trace.max_lock);
2384 buffer = global_trace.array_buffer.buffer;
2386 ring_buffer_record_disable(buffer);
2388 #ifdef CONFIG_TRACER_MAX_TRACE
2389 buffer = global_trace.max_buffer.buffer;
2391 ring_buffer_record_disable(buffer);
2394 arch_spin_unlock(&global_trace.max_lock);
2397 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2400 static void tracing_stop_tr(struct trace_array *tr)
2402 struct trace_buffer *buffer;
2403 unsigned long flags;
2405 /* If global, we need to also stop the max tracer */
2406 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2407 return tracing_stop();
2409 raw_spin_lock_irqsave(&tr->start_lock, flags);
2410 if (tr->stop_count++)
2413 buffer = tr->array_buffer.buffer;
2415 ring_buffer_record_disable(buffer);
2418 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2421 static int trace_save_cmdline(struct task_struct *tsk)
2425 /* treat recording of idle task as a success */
2429 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2432 * It's not the end of the world if we don't get
2433 * the lock, but we also don't want to spin
2434 * nor do we want to disable interrupts,
2435 * so if we miss here, then better luck next time.
2437 * This is called within the scheduler and wake up, so interrupts
2438 * had better been disabled and run queue lock been held.
2440 lockdep_assert_preemption_disabled();
2441 if (!arch_spin_trylock(&trace_cmdline_lock))
2444 idx = savedcmd->map_pid_to_cmdline[tpid];
2445 if (idx == NO_CMDLINE_MAP) {
2446 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2448 savedcmd->map_pid_to_cmdline[tpid] = idx;
2449 savedcmd->cmdline_idx = idx;
2452 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2453 set_cmdline(idx, tsk->comm);
2455 arch_spin_unlock(&trace_cmdline_lock);
2460 static void __trace_find_cmdline(int pid, char comm[])
2466 strcpy(comm, "<idle>");
2470 if (WARN_ON_ONCE(pid < 0)) {
2471 strcpy(comm, "<XXX>");
2475 tpid = pid & (PID_MAX_DEFAULT - 1);
2476 map = savedcmd->map_pid_to_cmdline[tpid];
2477 if (map != NO_CMDLINE_MAP) {
2478 tpid = savedcmd->map_cmdline_to_pid[map];
2480 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2484 strcpy(comm, "<...>");
2487 void trace_find_cmdline(int pid, char comm[])
2490 arch_spin_lock(&trace_cmdline_lock);
2492 __trace_find_cmdline(pid, comm);
2494 arch_spin_unlock(&trace_cmdline_lock);
2498 static int *trace_find_tgid_ptr(int pid)
2501 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2502 * if we observe a non-NULL tgid_map then we also observe the correct
2505 int *map = smp_load_acquire(&tgid_map);
2507 if (unlikely(!map || pid > tgid_map_max))
2513 int trace_find_tgid(int pid)
2515 int *ptr = trace_find_tgid_ptr(pid);
2517 return ptr ? *ptr : 0;
2520 static int trace_save_tgid(struct task_struct *tsk)
2524 /* treat recording of idle task as a success */
2528 ptr = trace_find_tgid_ptr(tsk->pid);
2536 static bool tracing_record_taskinfo_skip(int flags)
2538 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2540 if (!__this_cpu_read(trace_taskinfo_save))
2546 * tracing_record_taskinfo - record the task info of a task
2548 * @task: task to record
2549 * @flags: TRACE_RECORD_CMDLINE for recording comm
2550 * TRACE_RECORD_TGID for recording tgid
2552 void tracing_record_taskinfo(struct task_struct *task, int flags)
2556 if (tracing_record_taskinfo_skip(flags))
2560 * Record as much task information as possible. If some fail, continue
2561 * to try to record the others.
2563 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2564 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2566 /* If recording any information failed, retry again soon. */
2570 __this_cpu_write(trace_taskinfo_save, false);
2574 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2576 * @prev: previous task during sched_switch
2577 * @next: next task during sched_switch
2578 * @flags: TRACE_RECORD_CMDLINE for recording comm
2579 * TRACE_RECORD_TGID for recording tgid
2581 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2582 struct task_struct *next, int flags)
2586 if (tracing_record_taskinfo_skip(flags))
2590 * Record as much task information as possible. If some fail, continue
2591 * to try to record the others.
2593 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2594 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2595 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2596 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2598 /* If recording any information failed, retry again soon. */
2602 __this_cpu_write(trace_taskinfo_save, false);
2605 /* Helpers to record a specific task information */
2606 void tracing_record_cmdline(struct task_struct *task)
2608 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2611 void tracing_record_tgid(struct task_struct *task)
2613 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2617 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2618 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2619 * simplifies those functions and keeps them in sync.
2621 enum print_line_t trace_handle_return(struct trace_seq *s)
2623 return trace_seq_has_overflowed(s) ?
2624 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2626 EXPORT_SYMBOL_GPL(trace_handle_return);
2628 static unsigned short migration_disable_value(void)
2630 #if defined(CONFIG_SMP)
2631 return current->migration_disabled;
2637 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2639 unsigned int trace_flags = irqs_status;
2642 pc = preempt_count();
2645 trace_flags |= TRACE_FLAG_NMI;
2646 if (pc & HARDIRQ_MASK)
2647 trace_flags |= TRACE_FLAG_HARDIRQ;
2648 if (in_serving_softirq())
2649 trace_flags |= TRACE_FLAG_SOFTIRQ;
2650 if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2651 trace_flags |= TRACE_FLAG_BH_OFF;
2653 if (tif_need_resched())
2654 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2655 if (test_preempt_need_resched())
2656 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2657 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2658 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2661 struct ring_buffer_event *
2662 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2665 unsigned int trace_ctx)
2667 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2670 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2671 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2672 static int trace_buffered_event_ref;
2675 * trace_buffered_event_enable - enable buffering events
2677 * When events are being filtered, it is quicker to use a temporary
2678 * buffer to write the event data into if there's a likely chance
2679 * that it will not be committed. The discard of the ring buffer
2680 * is not as fast as committing, and is much slower than copying
2683 * When an event is to be filtered, allocate per cpu buffers to
2684 * write the event data into, and if the event is filtered and discarded
2685 * it is simply dropped, otherwise, the entire data is to be committed
2688 void trace_buffered_event_enable(void)
2690 struct ring_buffer_event *event;
2694 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2696 if (trace_buffered_event_ref++)
2699 for_each_tracing_cpu(cpu) {
2700 page = alloc_pages_node(cpu_to_node(cpu),
2701 GFP_KERNEL | __GFP_NORETRY, 0);
2705 event = page_address(page);
2706 memset(event, 0, sizeof(*event));
2708 per_cpu(trace_buffered_event, cpu) = event;
2711 if (cpu == smp_processor_id() &&
2712 __this_cpu_read(trace_buffered_event) !=
2713 per_cpu(trace_buffered_event, cpu))
2720 trace_buffered_event_disable();
2723 static void enable_trace_buffered_event(void *data)
2725 /* Probably not needed, but do it anyway */
2727 this_cpu_dec(trace_buffered_event_cnt);
2730 static void disable_trace_buffered_event(void *data)
2732 this_cpu_inc(trace_buffered_event_cnt);
2736 * trace_buffered_event_disable - disable buffering events
2738 * When a filter is removed, it is faster to not use the buffered
2739 * events, and to commit directly into the ring buffer. Free up
2740 * the temp buffers when there are no more users. This requires
2741 * special synchronization with current events.
2743 void trace_buffered_event_disable(void)
2747 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2749 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2752 if (--trace_buffered_event_ref)
2756 /* For each CPU, set the buffer as used. */
2757 smp_call_function_many(tracing_buffer_mask,
2758 disable_trace_buffered_event, NULL, 1);
2761 /* Wait for all current users to finish */
2764 for_each_tracing_cpu(cpu) {
2765 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2766 per_cpu(trace_buffered_event, cpu) = NULL;
2769 * Make sure trace_buffered_event is NULL before clearing
2770 * trace_buffered_event_cnt.
2775 /* Do the work on each cpu */
2776 smp_call_function_many(tracing_buffer_mask,
2777 enable_trace_buffered_event, NULL, 1);
2781 static struct trace_buffer *temp_buffer;
2783 struct ring_buffer_event *
2784 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2785 struct trace_event_file *trace_file,
2786 int type, unsigned long len,
2787 unsigned int trace_ctx)
2789 struct ring_buffer_event *entry;
2790 struct trace_array *tr = trace_file->tr;
2793 *current_rb = tr->array_buffer.buffer;
2795 if (!tr->no_filter_buffering_ref &&
2796 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2797 preempt_disable_notrace();
2799 * Filtering is on, so try to use the per cpu buffer first.
2800 * This buffer will simulate a ring_buffer_event,
2801 * where the type_len is zero and the array[0] will
2802 * hold the full length.
2803 * (see include/linux/ring-buffer.h for details on
2804 * how the ring_buffer_event is structured).
2806 * Using a temp buffer during filtering and copying it
2807 * on a matched filter is quicker than writing directly
2808 * into the ring buffer and then discarding it when
2809 * it doesn't match. That is because the discard
2810 * requires several atomic operations to get right.
2811 * Copying on match and doing nothing on a failed match
2812 * is still quicker than no copy on match, but having
2813 * to discard out of the ring buffer on a failed match.
2815 if ((entry = __this_cpu_read(trace_buffered_event))) {
2816 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2818 val = this_cpu_inc_return(trace_buffered_event_cnt);
2821 * Preemption is disabled, but interrupts and NMIs
2822 * can still come in now. If that happens after
2823 * the above increment, then it will have to go
2824 * back to the old method of allocating the event
2825 * on the ring buffer, and if the filter fails, it
2826 * will have to call ring_buffer_discard_commit()
2829 * Need to also check the unlikely case that the
2830 * length is bigger than the temp buffer size.
2831 * If that happens, then the reserve is pretty much
2832 * guaranteed to fail, as the ring buffer currently
2833 * only allows events less than a page. But that may
2834 * change in the future, so let the ring buffer reserve
2835 * handle the failure in that case.
2837 if (val == 1 && likely(len <= max_len)) {
2838 trace_event_setup(entry, type, trace_ctx);
2839 entry->array[0] = len;
2840 /* Return with preemption disabled */
2843 this_cpu_dec(trace_buffered_event_cnt);
2845 /* __trace_buffer_lock_reserve() disables preemption */
2846 preempt_enable_notrace();
2849 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2852 * If tracing is off, but we have triggers enabled
2853 * we still need to look at the event data. Use the temp_buffer
2854 * to store the trace event for the trigger to use. It's recursive
2855 * safe and will not be recorded anywhere.
2857 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2858 *current_rb = temp_buffer;
2859 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2864 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2866 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2867 static DEFINE_MUTEX(tracepoint_printk_mutex);
2869 static void output_printk(struct trace_event_buffer *fbuffer)
2871 struct trace_event_call *event_call;
2872 struct trace_event_file *file;
2873 struct trace_event *event;
2874 unsigned long flags;
2875 struct trace_iterator *iter = tracepoint_print_iter;
2877 /* We should never get here if iter is NULL */
2878 if (WARN_ON_ONCE(!iter))
2881 event_call = fbuffer->trace_file->event_call;
2882 if (!event_call || !event_call->event.funcs ||
2883 !event_call->event.funcs->trace)
2886 file = fbuffer->trace_file;
2887 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2888 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2889 !filter_match_preds(file->filter, fbuffer->entry)))
2892 event = &fbuffer->trace_file->event_call->event;
2894 raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2895 trace_seq_init(&iter->seq);
2896 iter->ent = fbuffer->entry;
2897 event_call->event.funcs->trace(iter, 0, event);
2898 trace_seq_putc(&iter->seq, 0);
2899 printk("%s", iter->seq.buffer);
2901 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2904 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2905 void *buffer, size_t *lenp,
2908 int save_tracepoint_printk;
2911 mutex_lock(&tracepoint_printk_mutex);
2912 save_tracepoint_printk = tracepoint_printk;
2914 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2917 * This will force exiting early, as tracepoint_printk
2918 * is always zero when tracepoint_printk_iter is not allocated
2920 if (!tracepoint_print_iter)
2921 tracepoint_printk = 0;
2923 if (save_tracepoint_printk == tracepoint_printk)
2926 if (tracepoint_printk)
2927 static_key_enable(&tracepoint_printk_key.key);
2929 static_key_disable(&tracepoint_printk_key.key);
2932 mutex_unlock(&tracepoint_printk_mutex);
2937 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2939 enum event_trigger_type tt = ETT_NONE;
2940 struct trace_event_file *file = fbuffer->trace_file;
2942 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2943 fbuffer->entry, &tt))
2946 if (static_key_false(&tracepoint_printk_key.key))
2947 output_printk(fbuffer);
2949 if (static_branch_unlikely(&trace_event_exports_enabled))
2950 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2952 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2953 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2957 event_triggers_post_call(file, tt);
2960 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2965 * trace_buffer_unlock_commit_regs()
2966 * trace_event_buffer_commit()
2967 * trace_event_raw_event_xxx()
2969 # define STACK_SKIP 3
2971 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2972 struct trace_buffer *buffer,
2973 struct ring_buffer_event *event,
2974 unsigned int trace_ctx,
2975 struct pt_regs *regs)
2977 __buffer_unlock_commit(buffer, event);
2980 * If regs is not set, then skip the necessary functions.
2981 * Note, we can still get here via blktrace, wakeup tracer
2982 * and mmiotrace, but that's ok if they lose a function or
2983 * two. They are not that meaningful.
2985 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2986 ftrace_trace_userstack(tr, buffer, trace_ctx);
2990 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2993 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2994 struct ring_buffer_event *event)
2996 __buffer_unlock_commit(buffer, event);
3000 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
3001 parent_ip, unsigned int trace_ctx)
3003 struct trace_event_call *call = &event_function;
3004 struct trace_buffer *buffer = tr->array_buffer.buffer;
3005 struct ring_buffer_event *event;
3006 struct ftrace_entry *entry;
3008 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
3012 entry = ring_buffer_event_data(event);
3014 entry->parent_ip = parent_ip;
3016 if (!call_filter_check_discard(call, entry, buffer, event)) {
3017 if (static_branch_unlikely(&trace_function_exports_enabled))
3018 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
3019 __buffer_unlock_commit(buffer, event);
3023 #ifdef CONFIG_STACKTRACE
3025 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
3026 #define FTRACE_KSTACK_NESTING 4
3028 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
3030 struct ftrace_stack {
3031 unsigned long calls[FTRACE_KSTACK_ENTRIES];
3035 struct ftrace_stacks {
3036 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
3039 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
3040 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
3042 static void __ftrace_trace_stack(struct trace_buffer *buffer,
3043 unsigned int trace_ctx,
3044 int skip, struct pt_regs *regs)
3046 struct trace_event_call *call = &event_kernel_stack;
3047 struct ring_buffer_event *event;
3048 unsigned int size, nr_entries;
3049 struct ftrace_stack *fstack;
3050 struct stack_entry *entry;
3054 * Add one, for this function and the call to save_stack_trace()
3055 * If regs is set, then these functions will not be in the way.
3057 #ifndef CONFIG_UNWINDER_ORC
3062 preempt_disable_notrace();
3064 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3066 /* This should never happen. If it does, yell once and skip */
3067 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3071 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3072 * interrupt will either see the value pre increment or post
3073 * increment. If the interrupt happens pre increment it will have
3074 * restored the counter when it returns. We just need a barrier to
3075 * keep gcc from moving things around.
3079 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3080 size = ARRAY_SIZE(fstack->calls);
3083 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3086 nr_entries = stack_trace_save(fstack->calls, size, skip);
3089 size = nr_entries * sizeof(unsigned long);
3090 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3091 (sizeof(*entry) - sizeof(entry->caller)) + size,
3095 entry = ring_buffer_event_data(event);
3097 memcpy(&entry->caller, fstack->calls, size);
3098 entry->size = nr_entries;
3100 if (!call_filter_check_discard(call, entry, buffer, event))
3101 __buffer_unlock_commit(buffer, event);
3104 /* Again, don't let gcc optimize things here */
3106 __this_cpu_dec(ftrace_stack_reserve);
3107 preempt_enable_notrace();
3111 static inline void ftrace_trace_stack(struct trace_array *tr,
3112 struct trace_buffer *buffer,
3113 unsigned int trace_ctx,
3114 int skip, struct pt_regs *regs)
3116 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3119 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3122 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3125 struct trace_buffer *buffer = tr->array_buffer.buffer;
3127 if (rcu_is_watching()) {
3128 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3133 * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
3134 * but if the above rcu_is_watching() failed, then the NMI
3135 * triggered someplace critical, and ct_irq_enter() should
3136 * not be called from NMI.
3138 if (unlikely(in_nmi()))
3141 ct_irq_enter_irqson();
3142 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3143 ct_irq_exit_irqson();
3147 * trace_dump_stack - record a stack back trace in the trace buffer
3148 * @skip: Number of functions to skip (helper handlers)
3150 void trace_dump_stack(int skip)
3152 if (tracing_disabled || tracing_selftest_running)
3155 #ifndef CONFIG_UNWINDER_ORC
3156 /* Skip 1 to skip this function. */
3159 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3160 tracing_gen_ctx(), skip, NULL);
3162 EXPORT_SYMBOL_GPL(trace_dump_stack);
3164 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3165 static DEFINE_PER_CPU(int, user_stack_count);
3168 ftrace_trace_userstack(struct trace_array *tr,
3169 struct trace_buffer *buffer, unsigned int trace_ctx)
3171 struct trace_event_call *call = &event_user_stack;
3172 struct ring_buffer_event *event;
3173 struct userstack_entry *entry;
3175 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3179 * NMIs can not handle page faults, even with fix ups.
3180 * The save user stack can (and often does) fault.
3182 if (unlikely(in_nmi()))
3186 * prevent recursion, since the user stack tracing may
3187 * trigger other kernel events.
3190 if (__this_cpu_read(user_stack_count))
3193 __this_cpu_inc(user_stack_count);
3195 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3196 sizeof(*entry), trace_ctx);
3198 goto out_drop_count;
3199 entry = ring_buffer_event_data(event);
3201 entry->tgid = current->tgid;
3202 memset(&entry->caller, 0, sizeof(entry->caller));
3204 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3205 if (!call_filter_check_discard(call, entry, buffer, event))
3206 __buffer_unlock_commit(buffer, event);
3209 __this_cpu_dec(user_stack_count);
3213 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3214 static void ftrace_trace_userstack(struct trace_array *tr,
3215 struct trace_buffer *buffer,
3216 unsigned int trace_ctx)
3219 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3221 #endif /* CONFIG_STACKTRACE */
3224 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3225 unsigned long long delta)
3227 entry->bottom_delta_ts = delta & U32_MAX;
3228 entry->top_delta_ts = (delta >> 32);
3231 void trace_last_func_repeats(struct trace_array *tr,
3232 struct trace_func_repeats *last_info,
3233 unsigned int trace_ctx)
3235 struct trace_buffer *buffer = tr->array_buffer.buffer;
3236 struct func_repeats_entry *entry;
3237 struct ring_buffer_event *event;
3240 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3241 sizeof(*entry), trace_ctx);
3245 delta = ring_buffer_event_time_stamp(buffer, event) -
3246 last_info->ts_last_call;
3248 entry = ring_buffer_event_data(event);
3249 entry->ip = last_info->ip;
3250 entry->parent_ip = last_info->parent_ip;
3251 entry->count = last_info->count;
3252 func_repeats_set_delta_ts(entry, delta);
3254 __buffer_unlock_commit(buffer, event);
3257 /* created for use with alloc_percpu */
3258 struct trace_buffer_struct {
3260 char buffer[4][TRACE_BUF_SIZE];
3263 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3266 * This allows for lockless recording. If we're nested too deeply, then
3267 * this returns NULL.
3269 static char *get_trace_buf(void)
3271 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3273 if (!trace_percpu_buffer || buffer->nesting >= 4)
3278 /* Interrupts must see nesting incremented before we use the buffer */
3280 return &buffer->buffer[buffer->nesting - 1][0];
3283 static void put_trace_buf(void)
3285 /* Don't let the decrement of nesting leak before this */
3287 this_cpu_dec(trace_percpu_buffer->nesting);
3290 static int alloc_percpu_trace_buffer(void)
3292 struct trace_buffer_struct __percpu *buffers;
3294 if (trace_percpu_buffer)
3297 buffers = alloc_percpu(struct trace_buffer_struct);
3298 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3301 trace_percpu_buffer = buffers;
3305 static int buffers_allocated;
3307 void trace_printk_init_buffers(void)
3309 if (buffers_allocated)
3312 if (alloc_percpu_trace_buffer())
3315 /* trace_printk() is for debug use only. Don't use it in production. */
3318 pr_warn("**********************************************************\n");
3319 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3321 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3323 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3324 pr_warn("** unsafe for production use. **\n");
3326 pr_warn("** If you see this message and you are not debugging **\n");
3327 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3329 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3330 pr_warn("**********************************************************\n");
3332 /* Expand the buffers to set size */
3333 tracing_update_buffers();
3335 buffers_allocated = 1;
3338 * trace_printk_init_buffers() can be called by modules.
3339 * If that happens, then we need to start cmdline recording
3340 * directly here. If the global_trace.buffer is already
3341 * allocated here, then this was called by module code.
3343 if (global_trace.array_buffer.buffer)
3344 tracing_start_cmdline_record();
3346 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3348 void trace_printk_start_comm(void)
3350 /* Start tracing comms if trace printk is set */
3351 if (!buffers_allocated)
3353 tracing_start_cmdline_record();
3356 static void trace_printk_start_stop_comm(int enabled)
3358 if (!buffers_allocated)
3362 tracing_start_cmdline_record();
3364 tracing_stop_cmdline_record();
3368 * trace_vbprintk - write binary msg to tracing buffer
3369 * @ip: The address of the caller
3370 * @fmt: The string format to write to the buffer
3371 * @args: Arguments for @fmt
3373 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3375 struct trace_event_call *call = &event_bprint;
3376 struct ring_buffer_event *event;
3377 struct trace_buffer *buffer;
3378 struct trace_array *tr = &global_trace;
3379 struct bprint_entry *entry;
3380 unsigned int trace_ctx;
3384 if (unlikely(tracing_selftest_running || tracing_disabled))
3387 /* Don't pollute graph traces with trace_vprintk internals */
3388 pause_graph_tracing();
3390 trace_ctx = tracing_gen_ctx();
3391 preempt_disable_notrace();
3393 tbuffer = get_trace_buf();
3399 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3401 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3404 size = sizeof(*entry) + sizeof(u32) * len;
3405 buffer = tr->array_buffer.buffer;
3406 ring_buffer_nest_start(buffer);
3407 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3411 entry = ring_buffer_event_data(event);
3415 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3416 if (!call_filter_check_discard(call, entry, buffer, event)) {
3417 __buffer_unlock_commit(buffer, event);
3418 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3422 ring_buffer_nest_end(buffer);
3427 preempt_enable_notrace();
3428 unpause_graph_tracing();
3432 EXPORT_SYMBOL_GPL(trace_vbprintk);
3436 __trace_array_vprintk(struct trace_buffer *buffer,
3437 unsigned long ip, const char *fmt, va_list args)
3439 struct trace_event_call *call = &event_print;
3440 struct ring_buffer_event *event;
3442 struct print_entry *entry;
3443 unsigned int trace_ctx;
3446 if (tracing_disabled || tracing_selftest_running)
3449 /* Don't pollute graph traces with trace_vprintk internals */
3450 pause_graph_tracing();
3452 trace_ctx = tracing_gen_ctx();
3453 preempt_disable_notrace();
3456 tbuffer = get_trace_buf();
3462 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3464 size = sizeof(*entry) + len + 1;
3465 ring_buffer_nest_start(buffer);
3466 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3470 entry = ring_buffer_event_data(event);
3473 memcpy(&entry->buf, tbuffer, len + 1);
3474 if (!call_filter_check_discard(call, entry, buffer, event)) {
3475 __buffer_unlock_commit(buffer, event);
3476 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3480 ring_buffer_nest_end(buffer);
3484 preempt_enable_notrace();
3485 unpause_graph_tracing();
3491 int trace_array_vprintk(struct trace_array *tr,
3492 unsigned long ip, const char *fmt, va_list args)
3494 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3498 * trace_array_printk - Print a message to a specific instance
3499 * @tr: The instance trace_array descriptor
3500 * @ip: The instruction pointer that this is called from.
3501 * @fmt: The format to print (printf format)
3503 * If a subsystem sets up its own instance, they have the right to
3504 * printk strings into their tracing instance buffer using this
3505 * function. Note, this function will not write into the top level
3506 * buffer (use trace_printk() for that), as writing into the top level
3507 * buffer should only have events that can be individually disabled.
3508 * trace_printk() is only used for debugging a kernel, and should not
3509 * be ever incorporated in normal use.
3511 * trace_array_printk() can be used, as it will not add noise to the
3512 * top level tracing buffer.
3514 * Note, trace_array_init_printk() must be called on @tr before this
3518 int trace_array_printk(struct trace_array *tr,
3519 unsigned long ip, const char *fmt, ...)
3527 /* This is only allowed for created instances */
3528 if (tr == &global_trace)
3531 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3535 ret = trace_array_vprintk(tr, ip, fmt, ap);
3539 EXPORT_SYMBOL_GPL(trace_array_printk);
3542 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3543 * @tr: The trace array to initialize the buffers for
3545 * As trace_array_printk() only writes into instances, they are OK to
3546 * have in the kernel (unlike trace_printk()). This needs to be called
3547 * before trace_array_printk() can be used on a trace_array.
3549 int trace_array_init_printk(struct trace_array *tr)
3554 /* This is only allowed for created instances */
3555 if (tr == &global_trace)
3558 return alloc_percpu_trace_buffer();
3560 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3563 int trace_array_printk_buf(struct trace_buffer *buffer,
3564 unsigned long ip, const char *fmt, ...)
3569 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3573 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3579 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3581 return trace_array_vprintk(&global_trace, ip, fmt, args);
3583 EXPORT_SYMBOL_GPL(trace_vprintk);
3585 static void trace_iterator_increment(struct trace_iterator *iter)
3587 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3591 ring_buffer_iter_advance(buf_iter);
3594 static struct trace_entry *
3595 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3596 unsigned long *lost_events)
3598 struct ring_buffer_event *event;
3599 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3602 event = ring_buffer_iter_peek(buf_iter, ts);
3604 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3605 (unsigned long)-1 : 0;
3607 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3612 iter->ent_size = ring_buffer_event_length(event);
3613 return ring_buffer_event_data(event);
3619 static struct trace_entry *
3620 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3621 unsigned long *missing_events, u64 *ent_ts)
3623 struct trace_buffer *buffer = iter->array_buffer->buffer;
3624 struct trace_entry *ent, *next = NULL;
3625 unsigned long lost_events = 0, next_lost = 0;
3626 int cpu_file = iter->cpu_file;
3627 u64 next_ts = 0, ts;
3633 * If we are in a per_cpu trace file, don't bother by iterating over
3634 * all cpu and peek directly.
3636 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3637 if (ring_buffer_empty_cpu(buffer, cpu_file))
3639 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3641 *ent_cpu = cpu_file;
3646 for_each_tracing_cpu(cpu) {
3648 if (ring_buffer_empty_cpu(buffer, cpu))
3651 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3654 * Pick the entry with the smallest timestamp:
3656 if (ent && (!next || ts < next_ts)) {
3660 next_lost = lost_events;
3661 next_size = iter->ent_size;
3665 iter->ent_size = next_size;
3668 *ent_cpu = next_cpu;
3674 *missing_events = next_lost;
3679 #define STATIC_FMT_BUF_SIZE 128
3680 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3682 static char *trace_iter_expand_format(struct trace_iterator *iter)
3687 * iter->tr is NULL when used with tp_printk, which makes
3688 * this get called where it is not safe to call krealloc().
3690 if (!iter->tr || iter->fmt == static_fmt_buf)
3693 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3696 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3703 /* Returns true if the string is safe to dereference from an event */
3704 static bool trace_safe_str(struct trace_iterator *iter, const char *str,
3707 unsigned long addr = (unsigned long)str;
3708 struct trace_event *trace_event;
3709 struct trace_event_call *event;
3711 /* Ignore strings with no length */
3715 /* OK if part of the event data */
3716 if ((addr >= (unsigned long)iter->ent) &&
3717 (addr < (unsigned long)iter->ent + iter->ent_size))
3720 /* OK if part of the temp seq buffer */
3721 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3722 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3725 /* Core rodata can not be freed */
3726 if (is_kernel_rodata(addr))
3729 if (trace_is_tracepoint_string(str))
3733 * Now this could be a module event, referencing core module
3734 * data, which is OK.
3739 trace_event = ftrace_find_event(iter->ent->type);
3743 event = container_of(trace_event, struct trace_event_call, event);
3744 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3747 /* Would rather have rodata, but this will suffice */
3748 if (within_module_core(addr, event->module))
3754 static const char *show_buffer(struct trace_seq *s)
3756 struct seq_buf *seq = &s->seq;
3758 seq_buf_terminate(seq);
3763 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3765 static int test_can_verify_check(const char *fmt, ...)
3772 * The verifier is dependent on vsnprintf() modifies the va_list
3773 * passed to it, where it is sent as a reference. Some architectures
3774 * (like x86_32) passes it by value, which means that vsnprintf()
3775 * does not modify the va_list passed to it, and the verifier
3776 * would then need to be able to understand all the values that
3777 * vsnprintf can use. If it is passed by value, then the verifier
3781 vsnprintf(buf, 16, "%d", ap);
3782 ret = va_arg(ap, int);
3788 static void test_can_verify(void)
3790 if (!test_can_verify_check("%d %d", 0, 1)) {
3791 pr_info("trace event string verifier disabled\n");
3792 static_branch_inc(&trace_no_verify);
3797 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3798 * @iter: The iterator that holds the seq buffer and the event being printed
3799 * @fmt: The format used to print the event
3800 * @ap: The va_list holding the data to print from @fmt.
3802 * This writes the data into the @iter->seq buffer using the data from
3803 * @fmt and @ap. If the format has a %s, then the source of the string
3804 * is examined to make sure it is safe to print, otherwise it will
3805 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3808 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3811 const char *p = fmt;
3815 if (WARN_ON_ONCE(!fmt))
3818 if (static_branch_unlikely(&trace_no_verify))
3821 /* Don't bother checking when doing a ftrace_dump() */
3822 if (iter->fmt == static_fmt_buf)
3831 /* We only care about %s and variants */
3832 for (i = 0; p[i]; i++) {
3833 if (i + 1 >= iter->fmt_size) {
3835 * If we can't expand the copy buffer,
3838 if (!trace_iter_expand_format(iter))
3842 if (p[i] == '\\' && p[i+1]) {
3847 /* Need to test cases like %08.*s */
3848 for (j = 1; p[i+j]; j++) {
3849 if (isdigit(p[i+j]) ||
3852 if (p[i+j] == '*') {
3864 /* If no %s found then just print normally */
3868 /* Copy up to the %s, and print that */
3869 strncpy(iter->fmt, p, i);
3870 iter->fmt[i] = '\0';
3871 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3874 * If iter->seq is full, the above call no longer guarantees
3875 * that ap is in sync with fmt processing, and further calls
3876 * to va_arg() can return wrong positional arguments.
3878 * Ensure that ap is no longer used in this case.
3880 if (iter->seq.full) {
3886 len = va_arg(ap, int);
3888 /* The ap now points to the string data of the %s */
3889 str = va_arg(ap, const char *);
3892 * If you hit this warning, it is likely that the
3893 * trace event in question used %s on a string that
3894 * was saved at the time of the event, but may not be
3895 * around when the trace is read. Use __string(),
3896 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3897 * instead. See samples/trace_events/trace-events-sample.h
3900 if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
3901 "fmt: '%s' current_buffer: '%s'",
3902 fmt, show_buffer(&iter->seq))) {
3905 /* Try to safely read the string */
3907 if (len + 1 > iter->fmt_size)
3908 len = iter->fmt_size - 1;
3911 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3915 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3919 trace_seq_printf(&iter->seq, "(0x%px)", str);
3921 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3923 str = "[UNSAFE-MEMORY]";
3924 strcpy(iter->fmt, "%s");
3926 strncpy(iter->fmt, p + i, j + 1);
3927 iter->fmt[j+1] = '\0';
3930 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3932 trace_seq_printf(&iter->seq, iter->fmt, str);
3938 trace_seq_vprintf(&iter->seq, p, ap);
3941 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3943 const char *p, *new_fmt;
3946 if (WARN_ON_ONCE(!fmt))
3949 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3953 new_fmt = q = iter->fmt;
3955 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3956 if (!trace_iter_expand_format(iter))
3959 q += iter->fmt - new_fmt;
3960 new_fmt = iter->fmt;
3965 /* Replace %p with %px */
3969 } else if (p[0] == 'p' && !isalnum(p[1])) {
3980 #define STATIC_TEMP_BUF_SIZE 128
3981 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3983 /* Find the next real entry, without updating the iterator itself */
3984 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3985 int *ent_cpu, u64 *ent_ts)
3987 /* __find_next_entry will reset ent_size */
3988 int ent_size = iter->ent_size;
3989 struct trace_entry *entry;
3992 * If called from ftrace_dump(), then the iter->temp buffer
3993 * will be the static_temp_buf and not created from kmalloc.
3994 * If the entry size is greater than the buffer, we can
3995 * not save it. Just return NULL in that case. This is only
3996 * used to add markers when two consecutive events' time
3997 * stamps have a large delta. See trace_print_lat_context()
3999 if (iter->temp == static_temp_buf &&
4000 STATIC_TEMP_BUF_SIZE < ent_size)
4004 * The __find_next_entry() may call peek_next_entry(), which may
4005 * call ring_buffer_peek() that may make the contents of iter->ent
4006 * undefined. Need to copy iter->ent now.
4008 if (iter->ent && iter->ent != iter->temp) {
4009 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
4010 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
4012 temp = kmalloc(iter->ent_size, GFP_KERNEL);
4017 iter->temp_size = iter->ent_size;
4019 memcpy(iter->temp, iter->ent, iter->ent_size);
4020 iter->ent = iter->temp;
4022 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
4023 /* Put back the original ent_size */
4024 iter->ent_size = ent_size;
4029 /* Find the next real entry, and increment the iterator to the next entry */
4030 void *trace_find_next_entry_inc(struct trace_iterator *iter)
4032 iter->ent = __find_next_entry(iter, &iter->cpu,
4033 &iter->lost_events, &iter->ts);
4036 trace_iterator_increment(iter);
4038 return iter->ent ? iter : NULL;
4041 static void trace_consume(struct trace_iterator *iter)
4043 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
4044 &iter->lost_events);
4047 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
4049 struct trace_iterator *iter = m->private;
4053 WARN_ON_ONCE(iter->leftover);
4057 /* can't go backwards */
4062 ent = trace_find_next_entry_inc(iter);
4066 while (ent && iter->idx < i)
4067 ent = trace_find_next_entry_inc(iter);
4074 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4076 struct ring_buffer_iter *buf_iter;
4077 unsigned long entries = 0;
4080 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4082 buf_iter = trace_buffer_iter(iter, cpu);
4086 ring_buffer_iter_reset(buf_iter);
4089 * We could have the case with the max latency tracers
4090 * that a reset never took place on a cpu. This is evident
4091 * by the timestamp being before the start of the buffer.
4093 while (ring_buffer_iter_peek(buf_iter, &ts)) {
4094 if (ts >= iter->array_buffer->time_start)
4097 ring_buffer_iter_advance(buf_iter);
4100 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4104 * The current tracer is copied to avoid a global locking
4107 static void *s_start(struct seq_file *m, loff_t *pos)
4109 struct trace_iterator *iter = m->private;
4110 struct trace_array *tr = iter->tr;
4111 int cpu_file = iter->cpu_file;
4117 * copy the tracer to avoid using a global lock all around.
4118 * iter->trace is a copy of current_trace, the pointer to the
4119 * name may be used instead of a strcmp(), as iter->trace->name
4120 * will point to the same string as current_trace->name.
4122 mutex_lock(&trace_types_lock);
4123 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
4124 *iter->trace = *tr->current_trace;
4125 mutex_unlock(&trace_types_lock);
4127 #ifdef CONFIG_TRACER_MAX_TRACE
4128 if (iter->snapshot && iter->trace->use_max_tr)
4129 return ERR_PTR(-EBUSY);
4132 if (*pos != iter->pos) {
4137 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4138 for_each_tracing_cpu(cpu)
4139 tracing_iter_reset(iter, cpu);
4141 tracing_iter_reset(iter, cpu_file);
4144 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4149 * If we overflowed the seq_file before, then we want
4150 * to just reuse the trace_seq buffer again.
4156 p = s_next(m, p, &l);
4160 trace_event_read_lock();
4161 trace_access_lock(cpu_file);
4165 static void s_stop(struct seq_file *m, void *p)
4167 struct trace_iterator *iter = m->private;
4169 #ifdef CONFIG_TRACER_MAX_TRACE
4170 if (iter->snapshot && iter->trace->use_max_tr)
4174 trace_access_unlock(iter->cpu_file);
4175 trace_event_read_unlock();
4179 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4180 unsigned long *entries, int cpu)
4182 unsigned long count;
4184 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4186 * If this buffer has skipped entries, then we hold all
4187 * entries for the trace and we need to ignore the
4188 * ones before the time stamp.
4190 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4191 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4192 /* total is the same as the entries */
4196 ring_buffer_overrun_cpu(buf->buffer, cpu);
4201 get_total_entries(struct array_buffer *buf,
4202 unsigned long *total, unsigned long *entries)
4210 for_each_tracing_cpu(cpu) {
4211 get_total_entries_cpu(buf, &t, &e, cpu);
4217 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4219 unsigned long total, entries;
4224 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4229 unsigned long trace_total_entries(struct trace_array *tr)
4231 unsigned long total, entries;
4236 get_total_entries(&tr->array_buffer, &total, &entries);
4241 static void print_lat_help_header(struct seq_file *m)
4243 seq_puts(m, "# _------=> CPU# \n"
4244 "# / _-----=> irqs-off/BH-disabled\n"
4245 "# | / _----=> need-resched \n"
4246 "# || / _---=> hardirq/softirq \n"
4247 "# ||| / _--=> preempt-depth \n"
4248 "# |||| / _-=> migrate-disable \n"
4249 "# ||||| / delay \n"
4250 "# cmd pid |||||| time | caller \n"
4251 "# \\ / |||||| \\ | / \n");
4254 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4256 unsigned long total;
4257 unsigned long entries;
4259 get_total_entries(buf, &total, &entries);
4260 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4261 entries, total, num_online_cpus());
4265 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4268 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4270 print_event_info(buf, m);
4272 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4273 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4276 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4279 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4280 static const char space[] = " ";
4281 int prec = tgid ? 12 : 2;
4283 print_event_info(buf, m);
4285 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
4286 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4287 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4288 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4289 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4290 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4291 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4292 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
4296 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4298 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4299 struct array_buffer *buf = iter->array_buffer;
4300 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4301 struct tracer *type = iter->trace;
4302 unsigned long entries;
4303 unsigned long total;
4304 const char *name = type->name;
4306 get_total_entries(buf, &total, &entries);
4308 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4310 seq_puts(m, "# -----------------------------------"
4311 "---------------------------------\n");
4312 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4313 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4314 nsecs_to_usecs(data->saved_latency),
4318 preempt_model_none() ? "server" :
4319 preempt_model_voluntary() ? "desktop" :
4320 preempt_model_full() ? "preempt" :
4321 preempt_model_rt() ? "preempt_rt" :
4323 /* These are reserved for later use */
4326 seq_printf(m, " #P:%d)\n", num_online_cpus());
4330 seq_puts(m, "# -----------------\n");
4331 seq_printf(m, "# | task: %.16s-%d "
4332 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4333 data->comm, data->pid,
4334 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4335 data->policy, data->rt_priority);
4336 seq_puts(m, "# -----------------\n");
4338 if (data->critical_start) {
4339 seq_puts(m, "# => started at: ");
4340 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4341 trace_print_seq(m, &iter->seq);
4342 seq_puts(m, "\n# => ended at: ");
4343 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4344 trace_print_seq(m, &iter->seq);
4345 seq_puts(m, "\n#\n");
4351 static void test_cpu_buff_start(struct trace_iterator *iter)
4353 struct trace_seq *s = &iter->seq;
4354 struct trace_array *tr = iter->tr;
4356 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4359 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4362 if (cpumask_available(iter->started) &&
4363 cpumask_test_cpu(iter->cpu, iter->started))
4366 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4369 if (cpumask_available(iter->started))
4370 cpumask_set_cpu(iter->cpu, iter->started);
4372 /* Don't print started cpu buffer for the first entry of the trace */
4374 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4378 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4380 struct trace_array *tr = iter->tr;
4381 struct trace_seq *s = &iter->seq;
4382 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4383 struct trace_entry *entry;
4384 struct trace_event *event;
4388 test_cpu_buff_start(iter);
4390 event = ftrace_find_event(entry->type);
4392 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4393 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4394 trace_print_lat_context(iter);
4396 trace_print_context(iter);
4399 if (trace_seq_has_overflowed(s))
4400 return TRACE_TYPE_PARTIAL_LINE;
4403 return event->funcs->trace(iter, sym_flags, event);
4405 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4407 return trace_handle_return(s);
4410 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4412 struct trace_array *tr = iter->tr;
4413 struct trace_seq *s = &iter->seq;
4414 struct trace_entry *entry;
4415 struct trace_event *event;
4419 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4420 trace_seq_printf(s, "%d %d %llu ",
4421 entry->pid, iter->cpu, iter->ts);
4423 if (trace_seq_has_overflowed(s))
4424 return TRACE_TYPE_PARTIAL_LINE;
4426 event = ftrace_find_event(entry->type);
4428 return event->funcs->raw(iter, 0, event);
4430 trace_seq_printf(s, "%d ?\n", entry->type);
4432 return trace_handle_return(s);
4435 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4437 struct trace_array *tr = iter->tr;
4438 struct trace_seq *s = &iter->seq;
4439 unsigned char newline = '\n';
4440 struct trace_entry *entry;
4441 struct trace_event *event;
4445 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4446 SEQ_PUT_HEX_FIELD(s, entry->pid);
4447 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4448 SEQ_PUT_HEX_FIELD(s, iter->ts);
4449 if (trace_seq_has_overflowed(s))
4450 return TRACE_TYPE_PARTIAL_LINE;
4453 event = ftrace_find_event(entry->type);
4455 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4456 if (ret != TRACE_TYPE_HANDLED)
4460 SEQ_PUT_FIELD(s, newline);
4462 return trace_handle_return(s);
4465 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4467 struct trace_array *tr = iter->tr;
4468 struct trace_seq *s = &iter->seq;
4469 struct trace_entry *entry;
4470 struct trace_event *event;
4474 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4475 SEQ_PUT_FIELD(s, entry->pid);
4476 SEQ_PUT_FIELD(s, iter->cpu);
4477 SEQ_PUT_FIELD(s, iter->ts);
4478 if (trace_seq_has_overflowed(s))
4479 return TRACE_TYPE_PARTIAL_LINE;
4482 event = ftrace_find_event(entry->type);
4483 return event ? event->funcs->binary(iter, 0, event) :
4487 int trace_empty(struct trace_iterator *iter)
4489 struct ring_buffer_iter *buf_iter;
4492 /* If we are looking at one CPU buffer, only check that one */
4493 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4494 cpu = iter->cpu_file;
4495 buf_iter = trace_buffer_iter(iter, cpu);
4497 if (!ring_buffer_iter_empty(buf_iter))
4500 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4506 for_each_tracing_cpu(cpu) {
4507 buf_iter = trace_buffer_iter(iter, cpu);
4509 if (!ring_buffer_iter_empty(buf_iter))
4512 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4520 /* Called with trace_event_read_lock() held. */
4521 enum print_line_t print_trace_line(struct trace_iterator *iter)
4523 struct trace_array *tr = iter->tr;
4524 unsigned long trace_flags = tr->trace_flags;
4525 enum print_line_t ret;
4527 if (iter->lost_events) {
4528 if (iter->lost_events == (unsigned long)-1)
4529 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4532 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4533 iter->cpu, iter->lost_events);
4534 if (trace_seq_has_overflowed(&iter->seq))
4535 return TRACE_TYPE_PARTIAL_LINE;
4538 if (iter->trace && iter->trace->print_line) {
4539 ret = iter->trace->print_line(iter);
4540 if (ret != TRACE_TYPE_UNHANDLED)
4544 if (iter->ent->type == TRACE_BPUTS &&
4545 trace_flags & TRACE_ITER_PRINTK &&
4546 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4547 return trace_print_bputs_msg_only(iter);
4549 if (iter->ent->type == TRACE_BPRINT &&
4550 trace_flags & TRACE_ITER_PRINTK &&
4551 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4552 return trace_print_bprintk_msg_only(iter);
4554 if (iter->ent->type == TRACE_PRINT &&
4555 trace_flags & TRACE_ITER_PRINTK &&
4556 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4557 return trace_print_printk_msg_only(iter);
4559 if (trace_flags & TRACE_ITER_BIN)
4560 return print_bin_fmt(iter);
4562 if (trace_flags & TRACE_ITER_HEX)
4563 return print_hex_fmt(iter);
4565 if (trace_flags & TRACE_ITER_RAW)
4566 return print_raw_fmt(iter);
4568 return print_trace_fmt(iter);
4571 void trace_latency_header(struct seq_file *m)
4573 struct trace_iterator *iter = m->private;
4574 struct trace_array *tr = iter->tr;
4576 /* print nothing if the buffers are empty */
4577 if (trace_empty(iter))
4580 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4581 print_trace_header(m, iter);
4583 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4584 print_lat_help_header(m);
4587 void trace_default_header(struct seq_file *m)
4589 struct trace_iterator *iter = m->private;
4590 struct trace_array *tr = iter->tr;
4591 unsigned long trace_flags = tr->trace_flags;
4593 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4596 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4597 /* print nothing if the buffers are empty */
4598 if (trace_empty(iter))
4600 print_trace_header(m, iter);
4601 if (!(trace_flags & TRACE_ITER_VERBOSE))
4602 print_lat_help_header(m);
4604 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4605 if (trace_flags & TRACE_ITER_IRQ_INFO)
4606 print_func_help_header_irq(iter->array_buffer,
4609 print_func_help_header(iter->array_buffer, m,
4615 static void test_ftrace_alive(struct seq_file *m)
4617 if (!ftrace_is_dead())
4619 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4620 "# MAY BE MISSING FUNCTION EVENTS\n");
4623 #ifdef CONFIG_TRACER_MAX_TRACE
4624 static void show_snapshot_main_help(struct seq_file *m)
4626 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4627 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4628 "# Takes a snapshot of the main buffer.\n"
4629 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4630 "# (Doesn't have to be '2' works with any number that\n"
4631 "# is not a '0' or '1')\n");
4634 static void show_snapshot_percpu_help(struct seq_file *m)
4636 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4637 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4638 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4639 "# Takes a snapshot of the main buffer for this cpu.\n");
4641 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4642 "# Must use main snapshot file to allocate.\n");
4644 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4645 "# (Doesn't have to be '2' works with any number that\n"
4646 "# is not a '0' or '1')\n");
4649 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4651 if (iter->tr->allocated_snapshot)
4652 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4654 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4656 seq_puts(m, "# Snapshot commands:\n");
4657 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4658 show_snapshot_main_help(m);
4660 show_snapshot_percpu_help(m);
4663 /* Should never be called */
4664 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4667 static int s_show(struct seq_file *m, void *v)
4669 struct trace_iterator *iter = v;
4672 if (iter->ent == NULL) {
4674 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4676 test_ftrace_alive(m);
4678 if (iter->snapshot && trace_empty(iter))
4679 print_snapshot_help(m, iter);
4680 else if (iter->trace && iter->trace->print_header)
4681 iter->trace->print_header(m);
4683 trace_default_header(m);
4685 } else if (iter->leftover) {
4687 * If we filled the seq_file buffer earlier, we
4688 * want to just show it now.
4690 ret = trace_print_seq(m, &iter->seq);
4692 /* ret should this time be zero, but you never know */
4693 iter->leftover = ret;
4696 print_trace_line(iter);
4697 ret = trace_print_seq(m, &iter->seq);
4699 * If we overflow the seq_file buffer, then it will
4700 * ask us for this data again at start up.
4702 * ret is 0 if seq_file write succeeded.
4705 iter->leftover = ret;
4712 * Should be used after trace_array_get(), trace_types_lock
4713 * ensures that i_cdev was already initialized.
4715 static inline int tracing_get_cpu(struct inode *inode)
4717 if (inode->i_cdev) /* See trace_create_cpu_file() */
4718 return (long)inode->i_cdev - 1;
4719 return RING_BUFFER_ALL_CPUS;
4722 static const struct seq_operations tracer_seq_ops = {
4729 static struct trace_iterator *
4730 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4732 struct trace_array *tr = inode->i_private;
4733 struct trace_iterator *iter;
4736 if (tracing_disabled)
4737 return ERR_PTR(-ENODEV);
4739 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4741 return ERR_PTR(-ENOMEM);
4743 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4745 if (!iter->buffer_iter)
4749 * trace_find_next_entry() may need to save off iter->ent.
4750 * It will place it into the iter->temp buffer. As most
4751 * events are less than 128, allocate a buffer of that size.
4752 * If one is greater, then trace_find_next_entry() will
4753 * allocate a new buffer to adjust for the bigger iter->ent.
4754 * It's not critical if it fails to get allocated here.
4756 iter->temp = kmalloc(128, GFP_KERNEL);
4758 iter->temp_size = 128;
4761 * trace_event_printf() may need to modify given format
4762 * string to replace %p with %px so that it shows real address
4763 * instead of hash value. However, that is only for the event
4764 * tracing, other tracer may not need. Defer the allocation
4765 * until it is needed.
4771 * We make a copy of the current tracer to avoid concurrent
4772 * changes on it while we are reading.
4774 mutex_lock(&trace_types_lock);
4775 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4779 *iter->trace = *tr->current_trace;
4781 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4786 #ifdef CONFIG_TRACER_MAX_TRACE
4787 /* Currently only the top directory has a snapshot */
4788 if (tr->current_trace->print_max || snapshot)
4789 iter->array_buffer = &tr->max_buffer;
4792 iter->array_buffer = &tr->array_buffer;
4793 iter->snapshot = snapshot;
4795 iter->cpu_file = tracing_get_cpu(inode);
4796 mutex_init(&iter->mutex);
4798 /* Notify the tracer early; before we stop tracing. */
4799 if (iter->trace->open)
4800 iter->trace->open(iter);
4802 /* Annotate start of buffers if we had overruns */
4803 if (ring_buffer_overruns(iter->array_buffer->buffer))
4804 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4806 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4807 if (trace_clocks[tr->clock_id].in_ns)
4808 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4811 * If pause-on-trace is enabled, then stop the trace while
4812 * dumping, unless this is the "snapshot" file
4814 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4815 tracing_stop_tr(tr);
4817 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4818 for_each_tracing_cpu(cpu) {
4819 iter->buffer_iter[cpu] =
4820 ring_buffer_read_prepare(iter->array_buffer->buffer,
4823 ring_buffer_read_prepare_sync();
4824 for_each_tracing_cpu(cpu) {
4825 ring_buffer_read_start(iter->buffer_iter[cpu]);
4826 tracing_iter_reset(iter, cpu);
4829 cpu = iter->cpu_file;
4830 iter->buffer_iter[cpu] =
4831 ring_buffer_read_prepare(iter->array_buffer->buffer,
4833 ring_buffer_read_prepare_sync();
4834 ring_buffer_read_start(iter->buffer_iter[cpu]);
4835 tracing_iter_reset(iter, cpu);
4838 mutex_unlock(&trace_types_lock);
4843 mutex_unlock(&trace_types_lock);
4846 kfree(iter->buffer_iter);
4848 seq_release_private(inode, file);
4849 return ERR_PTR(-ENOMEM);
4852 int tracing_open_generic(struct inode *inode, struct file *filp)
4856 ret = tracing_check_open_get_tr(NULL);
4860 filp->private_data = inode->i_private;
4864 bool tracing_is_disabled(void)
4866 return (tracing_disabled) ? true: false;
4870 * Open and update trace_array ref count.
4871 * Must have the current trace_array passed to it.
4873 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4875 struct trace_array *tr = inode->i_private;
4878 ret = tracing_check_open_get_tr(tr);
4882 filp->private_data = inode->i_private;
4887 static int tracing_mark_open(struct inode *inode, struct file *filp)
4889 stream_open(inode, filp);
4890 return tracing_open_generic_tr(inode, filp);
4893 static int tracing_release(struct inode *inode, struct file *file)
4895 struct trace_array *tr = inode->i_private;
4896 struct seq_file *m = file->private_data;
4897 struct trace_iterator *iter;
4900 if (!(file->f_mode & FMODE_READ)) {
4901 trace_array_put(tr);
4905 /* Writes do not use seq_file */
4907 mutex_lock(&trace_types_lock);
4909 for_each_tracing_cpu(cpu) {
4910 if (iter->buffer_iter[cpu])
4911 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4914 if (iter->trace && iter->trace->close)
4915 iter->trace->close(iter);
4917 if (!iter->snapshot && tr->stop_count)
4918 /* reenable tracing if it was previously enabled */
4919 tracing_start_tr(tr);
4921 __trace_array_put(tr);
4923 mutex_unlock(&trace_types_lock);
4925 mutex_destroy(&iter->mutex);
4926 free_cpumask_var(iter->started);
4930 kfree(iter->buffer_iter);
4931 seq_release_private(inode, file);
4936 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4938 struct trace_array *tr = inode->i_private;
4940 trace_array_put(tr);
4944 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4946 struct trace_array *tr = inode->i_private;
4948 trace_array_put(tr);
4950 return single_release(inode, file);
4953 static int tracing_open(struct inode *inode, struct file *file)
4955 struct trace_array *tr = inode->i_private;
4956 struct trace_iterator *iter;
4959 ret = tracing_check_open_get_tr(tr);
4963 /* If this file was open for write, then erase contents */
4964 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4965 int cpu = tracing_get_cpu(inode);
4966 struct array_buffer *trace_buf = &tr->array_buffer;
4968 #ifdef CONFIG_TRACER_MAX_TRACE
4969 if (tr->current_trace->print_max)
4970 trace_buf = &tr->max_buffer;
4973 if (cpu == RING_BUFFER_ALL_CPUS)
4974 tracing_reset_online_cpus(trace_buf);
4976 tracing_reset_cpu(trace_buf, cpu);
4979 if (file->f_mode & FMODE_READ) {
4980 iter = __tracing_open(inode, file, false);
4982 ret = PTR_ERR(iter);
4983 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4984 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4988 trace_array_put(tr);
4994 * Some tracers are not suitable for instance buffers.
4995 * A tracer is always available for the global array (toplevel)
4996 * or if it explicitly states that it is.
4999 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
5001 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
5004 /* Find the next tracer that this trace array may use */
5005 static struct tracer *
5006 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
5008 while (t && !trace_ok_for_array(t, tr))
5015 t_next(struct seq_file *m, void *v, loff_t *pos)
5017 struct trace_array *tr = m->private;
5018 struct tracer *t = v;
5023 t = get_tracer_for_array(tr, t->next);
5028 static void *t_start(struct seq_file *m, loff_t *pos)
5030 struct trace_array *tr = m->private;
5034 mutex_lock(&trace_types_lock);
5036 t = get_tracer_for_array(tr, trace_types);
5037 for (; t && l < *pos; t = t_next(m, t, &l))
5043 static void t_stop(struct seq_file *m, void *p)
5045 mutex_unlock(&trace_types_lock);
5048 static int t_show(struct seq_file *m, void *v)
5050 struct tracer *t = v;
5055 seq_puts(m, t->name);
5064 static const struct seq_operations show_traces_seq_ops = {
5071 static int show_traces_open(struct inode *inode, struct file *file)
5073 struct trace_array *tr = inode->i_private;
5077 ret = tracing_check_open_get_tr(tr);
5081 ret = seq_open(file, &show_traces_seq_ops);
5083 trace_array_put(tr);
5087 m = file->private_data;
5093 static int show_traces_release(struct inode *inode, struct file *file)
5095 struct trace_array *tr = inode->i_private;
5097 trace_array_put(tr);
5098 return seq_release(inode, file);
5102 tracing_write_stub(struct file *filp, const char __user *ubuf,
5103 size_t count, loff_t *ppos)
5108 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5112 if (file->f_mode & FMODE_READ)
5113 ret = seq_lseek(file, offset, whence);
5115 file->f_pos = ret = 0;
5120 static const struct file_operations tracing_fops = {
5121 .open = tracing_open,
5123 .write = tracing_write_stub,
5124 .llseek = tracing_lseek,
5125 .release = tracing_release,
5128 static const struct file_operations show_traces_fops = {
5129 .open = show_traces_open,
5131 .llseek = seq_lseek,
5132 .release = show_traces_release,
5136 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5137 size_t count, loff_t *ppos)
5139 struct trace_array *tr = file_inode(filp)->i_private;
5143 len = snprintf(NULL, 0, "%*pb\n",
5144 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5145 mask_str = kmalloc(len, GFP_KERNEL);
5149 len = snprintf(mask_str, len, "%*pb\n",
5150 cpumask_pr_args(tr->tracing_cpumask));
5155 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5163 int tracing_set_cpumask(struct trace_array *tr,
5164 cpumask_var_t tracing_cpumask_new)
5171 local_irq_disable();
5172 arch_spin_lock(&tr->max_lock);
5173 for_each_tracing_cpu(cpu) {
5175 * Increase/decrease the disabled counter if we are
5176 * about to flip a bit in the cpumask:
5178 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5179 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5180 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5181 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5183 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5184 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5185 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5186 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5189 arch_spin_unlock(&tr->max_lock);
5192 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5198 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5199 size_t count, loff_t *ppos)
5201 struct trace_array *tr = file_inode(filp)->i_private;
5202 cpumask_var_t tracing_cpumask_new;
5205 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5208 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5212 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5216 free_cpumask_var(tracing_cpumask_new);
5221 free_cpumask_var(tracing_cpumask_new);
5226 static const struct file_operations tracing_cpumask_fops = {
5227 .open = tracing_open_generic_tr,
5228 .read = tracing_cpumask_read,
5229 .write = tracing_cpumask_write,
5230 .release = tracing_release_generic_tr,
5231 .llseek = generic_file_llseek,
5234 static int tracing_trace_options_show(struct seq_file *m, void *v)
5236 struct tracer_opt *trace_opts;
5237 struct trace_array *tr = m->private;
5241 mutex_lock(&trace_types_lock);
5242 tracer_flags = tr->current_trace->flags->val;
5243 trace_opts = tr->current_trace->flags->opts;
5245 for (i = 0; trace_options[i]; i++) {
5246 if (tr->trace_flags & (1 << i))
5247 seq_printf(m, "%s\n", trace_options[i]);
5249 seq_printf(m, "no%s\n", trace_options[i]);
5252 for (i = 0; trace_opts[i].name; i++) {
5253 if (tracer_flags & trace_opts[i].bit)
5254 seq_printf(m, "%s\n", trace_opts[i].name);
5256 seq_printf(m, "no%s\n", trace_opts[i].name);
5258 mutex_unlock(&trace_types_lock);
5263 static int __set_tracer_option(struct trace_array *tr,
5264 struct tracer_flags *tracer_flags,
5265 struct tracer_opt *opts, int neg)
5267 struct tracer *trace = tracer_flags->trace;
5270 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5275 tracer_flags->val &= ~opts->bit;
5277 tracer_flags->val |= opts->bit;
5281 /* Try to assign a tracer specific option */
5282 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5284 struct tracer *trace = tr->current_trace;
5285 struct tracer_flags *tracer_flags = trace->flags;
5286 struct tracer_opt *opts = NULL;
5289 for (i = 0; tracer_flags->opts[i].name; i++) {
5290 opts = &tracer_flags->opts[i];
5292 if (strcmp(cmp, opts->name) == 0)
5293 return __set_tracer_option(tr, trace->flags, opts, neg);
5299 /* Some tracers require overwrite to stay enabled */
5300 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5302 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5308 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5312 if ((mask == TRACE_ITER_RECORD_TGID) ||
5313 (mask == TRACE_ITER_RECORD_CMD))
5314 lockdep_assert_held(&event_mutex);
5316 /* do nothing if flag is already set */
5317 if (!!(tr->trace_flags & mask) == !!enabled)
5320 /* Give the tracer a chance to approve the change */
5321 if (tr->current_trace->flag_changed)
5322 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5326 tr->trace_flags |= mask;
5328 tr->trace_flags &= ~mask;
5330 if (mask == TRACE_ITER_RECORD_CMD)
5331 trace_event_enable_cmd_record(enabled);
5333 if (mask == TRACE_ITER_RECORD_TGID) {
5335 tgid_map_max = pid_max;
5336 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5340 * Pairs with smp_load_acquire() in
5341 * trace_find_tgid_ptr() to ensure that if it observes
5342 * the tgid_map we just allocated then it also observes
5343 * the corresponding tgid_map_max value.
5345 smp_store_release(&tgid_map, map);
5348 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5352 trace_event_enable_tgid_record(enabled);
5355 if (mask == TRACE_ITER_EVENT_FORK)
5356 trace_event_follow_fork(tr, enabled);
5358 if (mask == TRACE_ITER_FUNC_FORK)
5359 ftrace_pid_follow_fork(tr, enabled);
5361 if (mask == TRACE_ITER_OVERWRITE) {
5362 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5363 #ifdef CONFIG_TRACER_MAX_TRACE
5364 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5368 if (mask == TRACE_ITER_PRINTK) {
5369 trace_printk_start_stop_comm(enabled);
5370 trace_printk_control(enabled);
5376 int trace_set_options(struct trace_array *tr, char *option)
5381 size_t orig_len = strlen(option);
5384 cmp = strstrip(option);
5386 len = str_has_prefix(cmp, "no");
5392 mutex_lock(&event_mutex);
5393 mutex_lock(&trace_types_lock);
5395 ret = match_string(trace_options, -1, cmp);
5396 /* If no option could be set, test the specific tracer options */
5398 ret = set_tracer_option(tr, cmp, neg);
5400 ret = set_tracer_flag(tr, 1 << ret, !neg);
5402 mutex_unlock(&trace_types_lock);
5403 mutex_unlock(&event_mutex);
5406 * If the first trailing whitespace is replaced with '\0' by strstrip,
5407 * turn it back into a space.
5409 if (orig_len > strlen(option))
5410 option[strlen(option)] = ' ';
5415 static void __init apply_trace_boot_options(void)
5417 char *buf = trace_boot_options_buf;
5421 option = strsep(&buf, ",");
5427 trace_set_options(&global_trace, option);
5429 /* Put back the comma to allow this to be called again */
5436 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5437 size_t cnt, loff_t *ppos)
5439 struct seq_file *m = filp->private_data;
5440 struct trace_array *tr = m->private;
5444 if (cnt >= sizeof(buf))
5447 if (copy_from_user(buf, ubuf, cnt))
5452 ret = trace_set_options(tr, buf);
5461 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5463 struct trace_array *tr = inode->i_private;
5466 ret = tracing_check_open_get_tr(tr);
5470 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5472 trace_array_put(tr);
5477 static const struct file_operations tracing_iter_fops = {
5478 .open = tracing_trace_options_open,
5480 .llseek = seq_lseek,
5481 .release = tracing_single_release_tr,
5482 .write = tracing_trace_options_write,
5485 static const char readme_msg[] =
5486 "tracing mini-HOWTO:\n\n"
5487 "# echo 0 > tracing_on : quick way to disable tracing\n"
5488 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5489 " Important files:\n"
5490 " trace\t\t\t- The static contents of the buffer\n"
5491 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5492 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5493 " current_tracer\t- function and latency tracers\n"
5494 " available_tracers\t- list of configured tracers for current_tracer\n"
5495 " error_log\t- error log for failed commands (that support it)\n"
5496 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5497 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5498 " trace_clock\t\t- change the clock used to order events\n"
5499 " local: Per cpu clock but may not be synced across CPUs\n"
5500 " global: Synced across CPUs but slows tracing down.\n"
5501 " counter: Not a clock, but just an increment\n"
5502 " uptime: Jiffy counter from time of boot\n"
5503 " perf: Same clock that perf events use\n"
5504 #ifdef CONFIG_X86_64
5505 " x86-tsc: TSC cycle counter\n"
5507 "\n timestamp_mode\t- view the mode used to timestamp events\n"
5508 " delta: Delta difference against a buffer-wide timestamp\n"
5509 " absolute: Absolute (standalone) timestamp\n"
5510 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5511 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5512 " tracing_cpumask\t- Limit which CPUs to trace\n"
5513 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5514 "\t\t\t Remove sub-buffer with rmdir\n"
5515 " trace_options\t\t- Set format or modify how tracing happens\n"
5516 "\t\t\t Disable an option by prefixing 'no' to the\n"
5517 "\t\t\t option name\n"
5518 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5519 #ifdef CONFIG_DYNAMIC_FTRACE
5520 "\n available_filter_functions - list of functions that can be filtered on\n"
5521 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5522 "\t\t\t functions\n"
5523 "\t accepts: func_full_name or glob-matching-pattern\n"
5524 "\t modules: Can select a group via module\n"
5525 "\t Format: :mod:<module-name>\n"
5526 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5527 "\t triggers: a command to perform when function is hit\n"
5528 "\t Format: <function>:<trigger>[:count]\n"
5529 "\t trigger: traceon, traceoff\n"
5530 "\t\t enable_event:<system>:<event>\n"
5531 "\t\t disable_event:<system>:<event>\n"
5532 #ifdef CONFIG_STACKTRACE
5535 #ifdef CONFIG_TRACER_SNAPSHOT
5540 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5541 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5542 "\t The first one will disable tracing every time do_fault is hit\n"
5543 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5544 "\t The first time do trap is hit and it disables tracing, the\n"
5545 "\t counter will decrement to 2. If tracing is already disabled,\n"
5546 "\t the counter will not decrement. It only decrements when the\n"
5547 "\t trigger did work\n"
5548 "\t To remove trigger without count:\n"
5549 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5550 "\t To remove trigger with a count:\n"
5551 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5552 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5553 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5554 "\t modules: Can select a group via module command :mod:\n"
5555 "\t Does not accept triggers\n"
5556 #endif /* CONFIG_DYNAMIC_FTRACE */
5557 #ifdef CONFIG_FUNCTION_TRACER
5558 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5560 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5563 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5564 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5565 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5566 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5568 #ifdef CONFIG_TRACER_SNAPSHOT
5569 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5570 "\t\t\t snapshot buffer. Read the contents for more\n"
5571 "\t\t\t information\n"
5573 #ifdef CONFIG_STACK_TRACER
5574 " stack_trace\t\t- Shows the max stack trace when active\n"
5575 " stack_max_size\t- Shows current max stack size that was traced\n"
5576 "\t\t\t Write into this file to reset the max size (trigger a\n"
5577 "\t\t\t new trace)\n"
5578 #ifdef CONFIG_DYNAMIC_FTRACE
5579 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5582 #endif /* CONFIG_STACK_TRACER */
5583 #ifdef CONFIG_DYNAMIC_EVENTS
5584 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5585 "\t\t\t Write into this file to define/undefine new trace events.\n"
5587 #ifdef CONFIG_KPROBE_EVENTS
5588 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5589 "\t\t\t Write into this file to define/undefine new trace events.\n"
5591 #ifdef CONFIG_UPROBE_EVENTS
5592 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5593 "\t\t\t Write into this file to define/undefine new trace events.\n"
5595 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5596 "\t accepts: event-definitions (one definition per line)\n"
5597 "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n"
5598 "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
5599 #ifdef CONFIG_HIST_TRIGGERS
5600 "\t s:[synthetic/]<event> <field> [<field>]\n"
5602 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]\n"
5603 "\t -:[<group>/][<event>]\n"
5604 #ifdef CONFIG_KPROBE_EVENTS
5605 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5606 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5608 #ifdef CONFIG_UPROBE_EVENTS
5609 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5611 "\t args: <name>=fetcharg[:type]\n"
5612 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5613 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5614 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5616 "\t $stack<index>, $stack, $retval, $comm,\n"
5618 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5619 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5620 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5621 "\t <type>\\[<array-size>\\]\n"
5622 #ifdef CONFIG_HIST_TRIGGERS
5623 "\t field: <stype> <name>;\n"
5624 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5625 "\t [unsigned] char/int/long\n"
5627 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5628 "\t of the <attached-group>/<attached-event>.\n"
5630 " events/\t\t- Directory containing all trace event subsystems:\n"
5631 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5632 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5633 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5635 " filter\t\t- If set, only events passing filter are traced\n"
5636 " events/<system>/<event>/\t- Directory containing control files for\n"
5638 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5639 " filter\t\t- If set, only events passing filter are traced\n"
5640 " trigger\t\t- If set, a command to perform when event is hit\n"
5641 "\t Format: <trigger>[:count][if <filter>]\n"
5642 "\t trigger: traceon, traceoff\n"
5643 "\t enable_event:<system>:<event>\n"
5644 "\t disable_event:<system>:<event>\n"
5645 #ifdef CONFIG_HIST_TRIGGERS
5646 "\t enable_hist:<system>:<event>\n"
5647 "\t disable_hist:<system>:<event>\n"
5649 #ifdef CONFIG_STACKTRACE
5652 #ifdef CONFIG_TRACER_SNAPSHOT
5655 #ifdef CONFIG_HIST_TRIGGERS
5656 "\t\t hist (see below)\n"
5658 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5659 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5660 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5661 "\t events/block/block_unplug/trigger\n"
5662 "\t The first disables tracing every time block_unplug is hit.\n"
5663 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5664 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5665 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5666 "\t Like function triggers, the counter is only decremented if it\n"
5667 "\t enabled or disabled tracing.\n"
5668 "\t To remove a trigger without a count:\n"
5669 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5670 "\t To remove a trigger with a count:\n"
5671 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5672 "\t Filters can be ignored when removing a trigger.\n"
5673 #ifdef CONFIG_HIST_TRIGGERS
5674 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5675 "\t Format: hist:keys=<field1[,field2,...]>\n"
5676 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5677 "\t [:values=<field1[,field2,...]>]\n"
5678 "\t [:sort=<field1[,field2,...]>]\n"
5679 "\t [:size=#entries]\n"
5680 "\t [:pause][:continue][:clear]\n"
5681 "\t [:name=histname1]\n"
5682 "\t [:<handler>.<action>]\n"
5683 "\t [if <filter>]\n\n"
5684 "\t Note, special fields can be used as well:\n"
5685 "\t common_timestamp - to record current timestamp\n"
5686 "\t common_cpu - to record the CPU the event happened on\n"
5688 "\t A hist trigger variable can be:\n"
5689 "\t - a reference to a field e.g. x=current_timestamp,\n"
5690 "\t - a reference to another variable e.g. y=$x,\n"
5691 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5692 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5694 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5695 "\t multiplication(*) and division(/) operators. An operand can be either a\n"
5696 "\t variable reference, field or numeric literal.\n"
5698 "\t When a matching event is hit, an entry is added to a hash\n"
5699 "\t table using the key(s) and value(s) named, and the value of a\n"
5700 "\t sum called 'hitcount' is incremented. Keys and values\n"
5701 "\t correspond to fields in the event's format description. Keys\n"
5702 "\t can be any field, or the special string 'stacktrace'.\n"
5703 "\t Compound keys consisting of up to two fields can be specified\n"
5704 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5705 "\t fields. Sort keys consisting of up to two fields can be\n"
5706 "\t specified using the 'sort' keyword. The sort direction can\n"
5707 "\t be modified by appending '.descending' or '.ascending' to a\n"
5708 "\t sort field. The 'size' parameter can be used to specify more\n"
5709 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5710 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5711 "\t its histogram data will be shared with other triggers of the\n"
5712 "\t same name, and trigger hits will update this common data.\n\n"
5713 "\t Reading the 'hist' file for the event will dump the hash\n"
5714 "\t table in its entirety to stdout. If there are multiple hist\n"
5715 "\t triggers attached to an event, there will be a table for each\n"
5716 "\t trigger in the output. The table displayed for a named\n"
5717 "\t trigger will be the same as any other instance having the\n"
5718 "\t same name. The default format used to display a given field\n"
5719 "\t can be modified by appending any of the following modifiers\n"
5720 "\t to the field name, as applicable:\n\n"
5721 "\t .hex display a number as a hex value\n"
5722 "\t .sym display an address as a symbol\n"
5723 "\t .sym-offset display an address as a symbol and offset\n"
5724 "\t .execname display a common_pid as a program name\n"
5725 "\t .syscall display a syscall id as a syscall name\n"
5726 "\t .log2 display log2 value rather than raw number\n"
5727 "\t .buckets=size display values in groups of size rather than raw number\n"
5728 "\t .usecs display a common_timestamp in microseconds\n\n"
5729 "\t The 'pause' parameter can be used to pause an existing hist\n"
5730 "\t trigger or to start a hist trigger but not log any events\n"
5731 "\t until told to do so. 'continue' can be used to start or\n"
5732 "\t restart a paused hist trigger.\n\n"
5733 "\t The 'clear' parameter will clear the contents of a running\n"
5734 "\t hist trigger and leave its current paused/active state\n"
5736 "\t The enable_hist and disable_hist triggers can be used to\n"
5737 "\t have one event conditionally start and stop another event's\n"
5738 "\t already-attached hist trigger. The syntax is analogous to\n"
5739 "\t the enable_event and disable_event triggers.\n\n"
5740 "\t Hist trigger handlers and actions are executed whenever a\n"
5741 "\t a histogram entry is added or updated. They take the form:\n\n"
5742 "\t <handler>.<action>\n\n"
5743 "\t The available handlers are:\n\n"
5744 "\t onmatch(matching.event) - invoke on addition or update\n"
5745 "\t onmax(var) - invoke if var exceeds current max\n"
5746 "\t onchange(var) - invoke action if var changes\n\n"
5747 "\t The available actions are:\n\n"
5748 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5749 "\t save(field,...) - save current event fields\n"
5750 #ifdef CONFIG_TRACER_SNAPSHOT
5751 "\t snapshot() - snapshot the trace buffer\n\n"
5753 #ifdef CONFIG_SYNTH_EVENTS
5754 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5755 "\t Write into this file to define/undefine new synthetic events.\n"
5756 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
5762 tracing_readme_read(struct file *filp, char __user *ubuf,
5763 size_t cnt, loff_t *ppos)
5765 return simple_read_from_buffer(ubuf, cnt, ppos,
5766 readme_msg, strlen(readme_msg));
5769 static const struct file_operations tracing_readme_fops = {
5770 .open = tracing_open_generic,
5771 .read = tracing_readme_read,
5772 .llseek = generic_file_llseek,
5775 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5779 return trace_find_tgid_ptr(pid);
5782 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5786 return trace_find_tgid_ptr(pid);
5789 static void saved_tgids_stop(struct seq_file *m, void *v)
5793 static int saved_tgids_show(struct seq_file *m, void *v)
5795 int *entry = (int *)v;
5796 int pid = entry - tgid_map;
5802 seq_printf(m, "%d %d\n", pid, tgid);
5806 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5807 .start = saved_tgids_start,
5808 .stop = saved_tgids_stop,
5809 .next = saved_tgids_next,
5810 .show = saved_tgids_show,
5813 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5817 ret = tracing_check_open_get_tr(NULL);
5821 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5825 static const struct file_operations tracing_saved_tgids_fops = {
5826 .open = tracing_saved_tgids_open,
5828 .llseek = seq_lseek,
5829 .release = seq_release,
5832 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5834 unsigned int *ptr = v;
5836 if (*pos || m->count)
5841 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5843 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5852 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5858 arch_spin_lock(&trace_cmdline_lock);
5860 v = &savedcmd->map_cmdline_to_pid[0];
5862 v = saved_cmdlines_next(m, v, &l);
5870 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5872 arch_spin_unlock(&trace_cmdline_lock);
5876 static int saved_cmdlines_show(struct seq_file *m, void *v)
5878 char buf[TASK_COMM_LEN];
5879 unsigned int *pid = v;
5881 __trace_find_cmdline(*pid, buf);
5882 seq_printf(m, "%d %s\n", *pid, buf);
5886 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5887 .start = saved_cmdlines_start,
5888 .next = saved_cmdlines_next,
5889 .stop = saved_cmdlines_stop,
5890 .show = saved_cmdlines_show,
5893 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5897 ret = tracing_check_open_get_tr(NULL);
5901 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5904 static const struct file_operations tracing_saved_cmdlines_fops = {
5905 .open = tracing_saved_cmdlines_open,
5907 .llseek = seq_lseek,
5908 .release = seq_release,
5912 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5913 size_t cnt, loff_t *ppos)
5919 arch_spin_lock(&trace_cmdline_lock);
5920 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5921 arch_spin_unlock(&trace_cmdline_lock);
5924 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5927 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5929 kfree(s->saved_cmdlines);
5930 kfree(s->map_cmdline_to_pid);
5934 static int tracing_resize_saved_cmdlines(unsigned int val)
5936 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5938 s = kmalloc(sizeof(*s), GFP_KERNEL);
5942 if (allocate_cmdlines_buffer(val, s) < 0) {
5948 arch_spin_lock(&trace_cmdline_lock);
5949 savedcmd_temp = savedcmd;
5951 arch_spin_unlock(&trace_cmdline_lock);
5953 free_saved_cmdlines_buffer(savedcmd_temp);
5959 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5960 size_t cnt, loff_t *ppos)
5965 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5969 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5970 if (!val || val > PID_MAX_DEFAULT)
5973 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5982 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5983 .open = tracing_open_generic,
5984 .read = tracing_saved_cmdlines_size_read,
5985 .write = tracing_saved_cmdlines_size_write,
5988 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5989 static union trace_eval_map_item *
5990 update_eval_map(union trace_eval_map_item *ptr)
5992 if (!ptr->map.eval_string) {
5993 if (ptr->tail.next) {
5994 ptr = ptr->tail.next;
5995 /* Set ptr to the next real item (skip head) */
6003 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
6005 union trace_eval_map_item *ptr = v;
6008 * Paranoid! If ptr points to end, we don't want to increment past it.
6009 * This really should never happen.
6012 ptr = update_eval_map(ptr);
6013 if (WARN_ON_ONCE(!ptr))
6017 ptr = update_eval_map(ptr);
6022 static void *eval_map_start(struct seq_file *m, loff_t *pos)
6024 union trace_eval_map_item *v;
6027 mutex_lock(&trace_eval_mutex);
6029 v = trace_eval_maps;
6033 while (v && l < *pos) {
6034 v = eval_map_next(m, v, &l);
6040 static void eval_map_stop(struct seq_file *m, void *v)
6042 mutex_unlock(&trace_eval_mutex);
6045 static int eval_map_show(struct seq_file *m, void *v)
6047 union trace_eval_map_item *ptr = v;
6049 seq_printf(m, "%s %ld (%s)\n",
6050 ptr->map.eval_string, ptr->map.eval_value,
6056 static const struct seq_operations tracing_eval_map_seq_ops = {
6057 .start = eval_map_start,
6058 .next = eval_map_next,
6059 .stop = eval_map_stop,
6060 .show = eval_map_show,
6063 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
6067 ret = tracing_check_open_get_tr(NULL);
6071 return seq_open(filp, &tracing_eval_map_seq_ops);
6074 static const struct file_operations tracing_eval_map_fops = {
6075 .open = tracing_eval_map_open,
6077 .llseek = seq_lseek,
6078 .release = seq_release,
6081 static inline union trace_eval_map_item *
6082 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
6084 /* Return tail of array given the head */
6085 return ptr + ptr->head.length + 1;
6089 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
6092 struct trace_eval_map **stop;
6093 struct trace_eval_map **map;
6094 union trace_eval_map_item *map_array;
6095 union trace_eval_map_item *ptr;
6100 * The trace_eval_maps contains the map plus a head and tail item,
6101 * where the head holds the module and length of array, and the
6102 * tail holds a pointer to the next list.
6104 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6106 pr_warn("Unable to allocate trace eval mapping\n");
6110 mutex_lock(&trace_eval_mutex);
6112 if (!trace_eval_maps)
6113 trace_eval_maps = map_array;
6115 ptr = trace_eval_maps;
6117 ptr = trace_eval_jmp_to_tail(ptr);
6118 if (!ptr->tail.next)
6120 ptr = ptr->tail.next;
6123 ptr->tail.next = map_array;
6125 map_array->head.mod = mod;
6126 map_array->head.length = len;
6129 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6130 map_array->map = **map;
6133 memset(map_array, 0, sizeof(*map_array));
6135 mutex_unlock(&trace_eval_mutex);
6138 static void trace_create_eval_file(struct dentry *d_tracer)
6140 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
6141 NULL, &tracing_eval_map_fops);
6144 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
6145 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6146 static inline void trace_insert_eval_map_file(struct module *mod,
6147 struct trace_eval_map **start, int len) { }
6148 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6150 static void trace_insert_eval_map(struct module *mod,
6151 struct trace_eval_map **start, int len)
6153 struct trace_eval_map **map;
6160 trace_event_eval_update(map, len);
6162 trace_insert_eval_map_file(mod, start, len);
6166 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6167 size_t cnt, loff_t *ppos)
6169 struct trace_array *tr = filp->private_data;
6170 char buf[MAX_TRACER_SIZE+2];
6173 mutex_lock(&trace_types_lock);
6174 r = sprintf(buf, "%s\n", tr->current_trace->name);
6175 mutex_unlock(&trace_types_lock);
6177 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6180 int tracer_init(struct tracer *t, struct trace_array *tr)
6182 tracing_reset_online_cpus(&tr->array_buffer);
6186 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6190 for_each_tracing_cpu(cpu)
6191 per_cpu_ptr(buf->data, cpu)->entries = val;
6194 #ifdef CONFIG_TRACER_MAX_TRACE
6195 /* resize @tr's buffer to the size of @size_tr's entries */
6196 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6197 struct array_buffer *size_buf, int cpu_id)
6201 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6202 for_each_tracing_cpu(cpu) {
6203 ret = ring_buffer_resize(trace_buf->buffer,
6204 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6207 per_cpu_ptr(trace_buf->data, cpu)->entries =
6208 per_cpu_ptr(size_buf->data, cpu)->entries;
6211 ret = ring_buffer_resize(trace_buf->buffer,
6212 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6214 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6215 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6220 #endif /* CONFIG_TRACER_MAX_TRACE */
6222 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6223 unsigned long size, int cpu)
6228 * If kernel or user changes the size of the ring buffer
6229 * we use the size that was given, and we can forget about
6230 * expanding it later.
6232 ring_buffer_expanded = true;
6234 /* May be called before buffers are initialized */
6235 if (!tr->array_buffer.buffer)
6238 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6242 #ifdef CONFIG_TRACER_MAX_TRACE
6243 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
6244 !tr->current_trace->use_max_tr)
6247 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6249 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6250 &tr->array_buffer, cpu);
6253 * AARGH! We are left with different
6254 * size max buffer!!!!
6255 * The max buffer is our "snapshot" buffer.
6256 * When a tracer needs a snapshot (one of the
6257 * latency tracers), it swaps the max buffer
6258 * with the saved snap shot. We succeeded to
6259 * update the size of the main buffer, but failed to
6260 * update the size of the max buffer. But when we tried
6261 * to reset the main buffer to the original size, we
6262 * failed there too. This is very unlikely to
6263 * happen, but if it does, warn and kill all
6267 tracing_disabled = 1;
6272 if (cpu == RING_BUFFER_ALL_CPUS)
6273 set_buffer_entries(&tr->max_buffer, size);
6275 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
6278 #endif /* CONFIG_TRACER_MAX_TRACE */
6280 if (cpu == RING_BUFFER_ALL_CPUS)
6281 set_buffer_entries(&tr->array_buffer, size);
6283 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
6288 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6289 unsigned long size, int cpu_id)
6293 mutex_lock(&trace_types_lock);
6295 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6296 /* make sure, this cpu is enabled in the mask */
6297 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6303 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6308 mutex_unlock(&trace_types_lock);
6315 * tracing_update_buffers - used by tracing facility to expand ring buffers
6317 * To save on memory when the tracing is never used on a system with it
6318 * configured in. The ring buffers are set to a minimum size. But once
6319 * a user starts to use the tracing facility, then they need to grow
6320 * to their default size.
6322 * This function is to be called when a tracer is about to be used.
6324 int tracing_update_buffers(void)
6328 mutex_lock(&trace_types_lock);
6329 if (!ring_buffer_expanded)
6330 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6331 RING_BUFFER_ALL_CPUS);
6332 mutex_unlock(&trace_types_lock);
6337 struct trace_option_dentry;
6340 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6343 * Used to clear out the tracer before deletion of an instance.
6344 * Must have trace_types_lock held.
6346 static void tracing_set_nop(struct trace_array *tr)
6348 if (tr->current_trace == &nop_trace)
6351 tr->current_trace->enabled--;
6353 if (tr->current_trace->reset)
6354 tr->current_trace->reset(tr);
6356 tr->current_trace = &nop_trace;
6359 static bool tracer_options_updated;
6361 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6363 /* Only enable if the directory has been created already. */
6367 /* Only create trace option files after update_tracer_options finish */
6368 if (!tracer_options_updated)
6371 create_trace_option_files(tr, t);
6374 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6377 #ifdef CONFIG_TRACER_MAX_TRACE
6382 mutex_lock(&trace_types_lock);
6384 if (!ring_buffer_expanded) {
6385 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6386 RING_BUFFER_ALL_CPUS);
6392 for (t = trace_types; t; t = t->next) {
6393 if (strcmp(t->name, buf) == 0)
6400 if (t == tr->current_trace)
6403 #ifdef CONFIG_TRACER_SNAPSHOT
6404 if (t->use_max_tr) {
6405 local_irq_disable();
6406 arch_spin_lock(&tr->max_lock);
6407 if (tr->cond_snapshot)
6409 arch_spin_unlock(&tr->max_lock);
6415 /* Some tracers won't work on kernel command line */
6416 if (system_state < SYSTEM_RUNNING && t->noboot) {
6417 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6422 /* Some tracers are only allowed for the top level buffer */
6423 if (!trace_ok_for_array(t, tr)) {
6428 /* If trace pipe files are being read, we can't change the tracer */
6429 if (tr->trace_ref) {
6434 trace_branch_disable();
6436 tr->current_trace->enabled--;
6438 if (tr->current_trace->reset)
6439 tr->current_trace->reset(tr);
6441 #ifdef CONFIG_TRACER_MAX_TRACE
6442 had_max_tr = tr->current_trace->use_max_tr;
6444 /* Current trace needs to be nop_trace before synchronize_rcu */
6445 tr->current_trace = &nop_trace;
6447 if (had_max_tr && !t->use_max_tr) {
6449 * We need to make sure that the update_max_tr sees that
6450 * current_trace changed to nop_trace to keep it from
6451 * swapping the buffers after we resize it.
6452 * The update_max_tr is called from interrupts disabled
6453 * so a synchronized_sched() is sufficient.
6459 if (t->use_max_tr && !tr->allocated_snapshot) {
6460 ret = tracing_alloc_snapshot_instance(tr);
6465 tr->current_trace = &nop_trace;
6469 ret = tracer_init(t, tr);
6474 tr->current_trace = t;
6475 tr->current_trace->enabled++;
6476 trace_branch_enable(tr);
6478 mutex_unlock(&trace_types_lock);
6484 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6485 size_t cnt, loff_t *ppos)
6487 struct trace_array *tr = filp->private_data;
6488 char buf[MAX_TRACER_SIZE+1];
6495 if (cnt > MAX_TRACER_SIZE)
6496 cnt = MAX_TRACER_SIZE;
6498 if (copy_from_user(buf, ubuf, cnt))
6505 err = tracing_set_tracer(tr, name);
6515 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6516 size_t cnt, loff_t *ppos)
6521 r = snprintf(buf, sizeof(buf), "%ld\n",
6522 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6523 if (r > sizeof(buf))
6525 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6529 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6530 size_t cnt, loff_t *ppos)
6535 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6545 tracing_thresh_read(struct file *filp, char __user *ubuf,
6546 size_t cnt, loff_t *ppos)
6548 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6552 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6553 size_t cnt, loff_t *ppos)
6555 struct trace_array *tr = filp->private_data;
6558 mutex_lock(&trace_types_lock);
6559 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6563 if (tr->current_trace->update_thresh) {
6564 ret = tr->current_trace->update_thresh(tr);
6571 mutex_unlock(&trace_types_lock);
6576 #ifdef CONFIG_TRACER_MAX_TRACE
6579 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6580 size_t cnt, loff_t *ppos)
6582 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6586 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6587 size_t cnt, loff_t *ppos)
6589 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6594 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6596 struct trace_array *tr = inode->i_private;
6597 struct trace_iterator *iter;
6600 ret = tracing_check_open_get_tr(tr);
6604 mutex_lock(&trace_types_lock);
6606 /* create a buffer to store the information to pass to userspace */
6607 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6610 __trace_array_put(tr);
6614 trace_seq_init(&iter->seq);
6615 iter->trace = tr->current_trace;
6617 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6622 /* trace pipe does not show start of buffer */
6623 cpumask_setall(iter->started);
6625 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6626 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6628 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6629 if (trace_clocks[tr->clock_id].in_ns)
6630 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6633 iter->array_buffer = &tr->array_buffer;
6634 iter->cpu_file = tracing_get_cpu(inode);
6635 mutex_init(&iter->mutex);
6636 filp->private_data = iter;
6638 if (iter->trace->pipe_open)
6639 iter->trace->pipe_open(iter);
6641 nonseekable_open(inode, filp);
6645 mutex_unlock(&trace_types_lock);
6650 __trace_array_put(tr);
6651 mutex_unlock(&trace_types_lock);
6655 static int tracing_release_pipe(struct inode *inode, struct file *file)
6657 struct trace_iterator *iter = file->private_data;
6658 struct trace_array *tr = inode->i_private;
6660 mutex_lock(&trace_types_lock);
6664 if (iter->trace->pipe_close)
6665 iter->trace->pipe_close(iter);
6667 mutex_unlock(&trace_types_lock);
6669 free_cpumask_var(iter->started);
6671 mutex_destroy(&iter->mutex);
6674 trace_array_put(tr);
6680 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6682 struct trace_array *tr = iter->tr;
6684 /* Iterators are static, they should be filled or empty */
6685 if (trace_buffer_iter(iter, iter->cpu_file))
6686 return EPOLLIN | EPOLLRDNORM;
6688 if (tr->trace_flags & TRACE_ITER_BLOCK)
6690 * Always select as readable when in blocking mode
6692 return EPOLLIN | EPOLLRDNORM;
6694 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6695 filp, poll_table, iter->tr->buffer_percent);
6699 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6701 struct trace_iterator *iter = filp->private_data;
6703 return trace_poll(iter, filp, poll_table);
6706 /* Must be called with iter->mutex held. */
6707 static int tracing_wait_pipe(struct file *filp)
6709 struct trace_iterator *iter = filp->private_data;
6712 while (trace_empty(iter)) {
6714 if ((filp->f_flags & O_NONBLOCK)) {
6719 * We block until we read something and tracing is disabled.
6720 * We still block if tracing is disabled, but we have never
6721 * read anything. This allows a user to cat this file, and
6722 * then enable tracing. But after we have read something,
6723 * we give an EOF when tracing is again disabled.
6725 * iter->pos will be 0 if we haven't read anything.
6727 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6730 mutex_unlock(&iter->mutex);
6732 ret = wait_on_pipe(iter, 0);
6734 mutex_lock(&iter->mutex);
6747 tracing_read_pipe(struct file *filp, char __user *ubuf,
6748 size_t cnt, loff_t *ppos)
6750 struct trace_iterator *iter = filp->private_data;
6754 * Avoid more than one consumer on a single file descriptor
6755 * This is just a matter of traces coherency, the ring buffer itself
6758 mutex_lock(&iter->mutex);
6760 /* return any leftover data */
6761 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6765 trace_seq_init(&iter->seq);
6767 if (iter->trace->read) {
6768 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6774 sret = tracing_wait_pipe(filp);
6778 /* stop when tracing is finished */
6779 if (trace_empty(iter)) {
6784 if (cnt >= PAGE_SIZE)
6785 cnt = PAGE_SIZE - 1;
6787 /* reset all but tr, trace, and overruns */
6788 trace_iterator_reset(iter);
6789 cpumask_clear(iter->started);
6790 trace_seq_init(&iter->seq);
6792 trace_event_read_lock();
6793 trace_access_lock(iter->cpu_file);
6794 while (trace_find_next_entry_inc(iter) != NULL) {
6795 enum print_line_t ret;
6796 int save_len = iter->seq.seq.len;
6798 ret = print_trace_line(iter);
6799 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6801 * If one print_trace_line() fills entire trace_seq in one shot,
6802 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6803 * In this case, we need to consume it, otherwise, loop will peek
6804 * this event next time, resulting in an infinite loop.
6806 if (save_len == 0) {
6808 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6809 trace_consume(iter);
6813 /* In other cases, don't print partial lines */
6814 iter->seq.seq.len = save_len;
6817 if (ret != TRACE_TYPE_NO_CONSUME)
6818 trace_consume(iter);
6820 if (trace_seq_used(&iter->seq) >= cnt)
6824 * Setting the full flag means we reached the trace_seq buffer
6825 * size and we should leave by partial output condition above.
6826 * One of the trace_seq_* functions is not used properly.
6828 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6831 trace_access_unlock(iter->cpu_file);
6832 trace_event_read_unlock();
6834 /* Now copy what we have to the user */
6835 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6836 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6837 trace_seq_init(&iter->seq);
6840 * If there was nothing to send to user, in spite of consuming trace
6841 * entries, go back to wait for more entries.
6847 mutex_unlock(&iter->mutex);
6852 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6855 __free_page(spd->pages[idx]);
6859 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6865 /* Seq buffer is page-sized, exactly what we need. */
6867 save_len = iter->seq.seq.len;
6868 ret = print_trace_line(iter);
6870 if (trace_seq_has_overflowed(&iter->seq)) {
6871 iter->seq.seq.len = save_len;
6876 * This should not be hit, because it should only
6877 * be set if the iter->seq overflowed. But check it
6878 * anyway to be safe.
6880 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6881 iter->seq.seq.len = save_len;
6885 count = trace_seq_used(&iter->seq) - save_len;
6888 iter->seq.seq.len = save_len;
6892 if (ret != TRACE_TYPE_NO_CONSUME)
6893 trace_consume(iter);
6895 if (!trace_find_next_entry_inc(iter)) {
6905 static ssize_t tracing_splice_read_pipe(struct file *filp,
6907 struct pipe_inode_info *pipe,
6911 struct page *pages_def[PIPE_DEF_BUFFERS];
6912 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6913 struct trace_iterator *iter = filp->private_data;
6914 struct splice_pipe_desc spd = {
6916 .partial = partial_def,
6917 .nr_pages = 0, /* This gets updated below. */
6918 .nr_pages_max = PIPE_DEF_BUFFERS,
6919 .ops = &default_pipe_buf_ops,
6920 .spd_release = tracing_spd_release_pipe,
6926 if (splice_grow_spd(pipe, &spd))
6929 mutex_lock(&iter->mutex);
6931 if (iter->trace->splice_read) {
6932 ret = iter->trace->splice_read(iter, filp,
6933 ppos, pipe, len, flags);
6938 ret = tracing_wait_pipe(filp);
6942 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6947 trace_event_read_lock();
6948 trace_access_lock(iter->cpu_file);
6950 /* Fill as many pages as possible. */
6951 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6952 spd.pages[i] = alloc_page(GFP_KERNEL);
6956 rem = tracing_fill_pipe_page(rem, iter);
6958 /* Copy the data into the page, so we can start over. */
6959 ret = trace_seq_to_buffer(&iter->seq,
6960 page_address(spd.pages[i]),
6961 trace_seq_used(&iter->seq));
6963 __free_page(spd.pages[i]);
6966 spd.partial[i].offset = 0;
6967 spd.partial[i].len = trace_seq_used(&iter->seq);
6969 trace_seq_init(&iter->seq);
6972 trace_access_unlock(iter->cpu_file);
6973 trace_event_read_unlock();
6974 mutex_unlock(&iter->mutex);
6979 ret = splice_to_pipe(pipe, &spd);
6983 splice_shrink_spd(&spd);
6987 mutex_unlock(&iter->mutex);
6992 tracing_entries_read(struct file *filp, char __user *ubuf,
6993 size_t cnt, loff_t *ppos)
6995 struct inode *inode = file_inode(filp);
6996 struct trace_array *tr = inode->i_private;
6997 int cpu = tracing_get_cpu(inode);
7002 mutex_lock(&trace_types_lock);
7004 if (cpu == RING_BUFFER_ALL_CPUS) {
7005 int cpu, buf_size_same;
7010 /* check if all cpu sizes are same */
7011 for_each_tracing_cpu(cpu) {
7012 /* fill in the size from first enabled cpu */
7014 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
7015 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
7021 if (buf_size_same) {
7022 if (!ring_buffer_expanded)
7023 r = sprintf(buf, "%lu (expanded: %lu)\n",
7025 trace_buf_size >> 10);
7027 r = sprintf(buf, "%lu\n", size >> 10);
7029 r = sprintf(buf, "X\n");
7031 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
7033 mutex_unlock(&trace_types_lock);
7035 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7040 tracing_entries_write(struct file *filp, const char __user *ubuf,
7041 size_t cnt, loff_t *ppos)
7043 struct inode *inode = file_inode(filp);
7044 struct trace_array *tr = inode->i_private;
7048 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7052 /* must have at least 1 entry */
7056 /* value is in KB */
7058 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
7068 tracing_total_entries_read(struct file *filp, char __user *ubuf,
7069 size_t cnt, loff_t *ppos)
7071 struct trace_array *tr = filp->private_data;
7074 unsigned long size = 0, expanded_size = 0;
7076 mutex_lock(&trace_types_lock);
7077 for_each_tracing_cpu(cpu) {
7078 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7079 if (!ring_buffer_expanded)
7080 expanded_size += trace_buf_size >> 10;
7082 if (ring_buffer_expanded)
7083 r = sprintf(buf, "%lu\n", size);
7085 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
7086 mutex_unlock(&trace_types_lock);
7088 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7092 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7093 size_t cnt, loff_t *ppos)
7096 * There is no need to read what the user has written, this function
7097 * is just to make sure that there is no error when "echo" is used
7106 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7108 struct trace_array *tr = inode->i_private;
7110 /* disable tracing ? */
7111 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7112 tracer_tracing_off(tr);
7113 /* resize the ring buffer to 0 */
7114 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7116 trace_array_put(tr);
7122 tracing_mark_write(struct file *filp, const char __user *ubuf,
7123 size_t cnt, loff_t *fpos)
7125 struct trace_array *tr = filp->private_data;
7126 struct ring_buffer_event *event;
7127 enum event_trigger_type tt = ETT_NONE;
7128 struct trace_buffer *buffer;
7129 struct print_entry *entry;
7134 /* Used in tracing_mark_raw_write() as well */
7135 #define FAULTED_STR "<faulted>"
7136 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7138 if (tracing_disabled)
7141 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7144 if (cnt > TRACE_BUF_SIZE)
7145 cnt = TRACE_BUF_SIZE;
7147 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7149 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7151 /* If less than "<faulted>", then make sure we can still add that */
7152 if (cnt < FAULTED_SIZE)
7153 size += FAULTED_SIZE - cnt;
7155 buffer = tr->array_buffer.buffer;
7156 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7158 if (unlikely(!event))
7159 /* Ring buffer disabled, return as if not open for write */
7162 entry = ring_buffer_event_data(event);
7163 entry->ip = _THIS_IP_;
7165 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7167 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7173 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7174 /* do not add \n before testing triggers, but add \0 */
7175 entry->buf[cnt] = '\0';
7176 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7179 if (entry->buf[cnt - 1] != '\n') {
7180 entry->buf[cnt] = '\n';
7181 entry->buf[cnt + 1] = '\0';
7183 entry->buf[cnt] = '\0';
7185 if (static_branch_unlikely(&trace_marker_exports_enabled))
7186 ftrace_exports(event, TRACE_EXPORT_MARKER);
7187 __buffer_unlock_commit(buffer, event);
7190 event_triggers_post_call(tr->trace_marker_file, tt);
7195 /* Limit it for now to 3K (including tag) */
7196 #define RAW_DATA_MAX_SIZE (1024*3)
7199 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7200 size_t cnt, loff_t *fpos)
7202 struct trace_array *tr = filp->private_data;
7203 struct ring_buffer_event *event;
7204 struct trace_buffer *buffer;
7205 struct raw_data_entry *entry;
7210 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7212 if (tracing_disabled)
7215 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7218 /* The marker must at least have a tag id */
7219 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7222 if (cnt > TRACE_BUF_SIZE)
7223 cnt = TRACE_BUF_SIZE;
7225 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7227 size = sizeof(*entry) + cnt;
7228 if (cnt < FAULT_SIZE_ID)
7229 size += FAULT_SIZE_ID - cnt;
7231 buffer = tr->array_buffer.buffer;
7232 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7235 /* Ring buffer disabled, return as if not open for write */
7238 entry = ring_buffer_event_data(event);
7240 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7243 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7248 __buffer_unlock_commit(buffer, event);
7253 static int tracing_clock_show(struct seq_file *m, void *v)
7255 struct trace_array *tr = m->private;
7258 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7260 "%s%s%s%s", i ? " " : "",
7261 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7262 i == tr->clock_id ? "]" : "");
7268 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7272 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7273 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7276 if (i == ARRAY_SIZE(trace_clocks))
7279 mutex_lock(&trace_types_lock);
7283 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7286 * New clock may not be consistent with the previous clock.
7287 * Reset the buffer so that it doesn't have incomparable timestamps.
7289 tracing_reset_online_cpus(&tr->array_buffer);
7291 #ifdef CONFIG_TRACER_MAX_TRACE
7292 if (tr->max_buffer.buffer)
7293 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7294 tracing_reset_online_cpus(&tr->max_buffer);
7297 mutex_unlock(&trace_types_lock);
7302 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7303 size_t cnt, loff_t *fpos)
7305 struct seq_file *m = filp->private_data;
7306 struct trace_array *tr = m->private;
7308 const char *clockstr;
7311 if (cnt >= sizeof(buf))
7314 if (copy_from_user(buf, ubuf, cnt))
7319 clockstr = strstrip(buf);
7321 ret = tracing_set_clock(tr, clockstr);
7330 static int tracing_clock_open(struct inode *inode, struct file *file)
7332 struct trace_array *tr = inode->i_private;
7335 ret = tracing_check_open_get_tr(tr);
7339 ret = single_open(file, tracing_clock_show, inode->i_private);
7341 trace_array_put(tr);
7346 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7348 struct trace_array *tr = m->private;
7350 mutex_lock(&trace_types_lock);
7352 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7353 seq_puts(m, "delta [absolute]\n");
7355 seq_puts(m, "[delta] absolute\n");
7357 mutex_unlock(&trace_types_lock);
7362 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7364 struct trace_array *tr = inode->i_private;
7367 ret = tracing_check_open_get_tr(tr);
7371 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7373 trace_array_put(tr);
7378 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7380 if (rbe == this_cpu_read(trace_buffered_event))
7381 return ring_buffer_time_stamp(buffer);
7383 return ring_buffer_event_time_stamp(buffer, rbe);
7387 * Set or disable using the per CPU trace_buffer_event when possible.
7389 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7393 mutex_lock(&trace_types_lock);
7395 if (set && tr->no_filter_buffering_ref++)
7399 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7404 --tr->no_filter_buffering_ref;
7407 mutex_unlock(&trace_types_lock);
7412 struct ftrace_buffer_info {
7413 struct trace_iterator iter;
7415 unsigned int spare_cpu;
7419 #ifdef CONFIG_TRACER_SNAPSHOT
7420 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7422 struct trace_array *tr = inode->i_private;
7423 struct trace_iterator *iter;
7427 ret = tracing_check_open_get_tr(tr);
7431 if (file->f_mode & FMODE_READ) {
7432 iter = __tracing_open(inode, file, true);
7434 ret = PTR_ERR(iter);
7436 /* Writes still need the seq_file to hold the private data */
7438 m = kzalloc(sizeof(*m), GFP_KERNEL);
7441 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7449 iter->array_buffer = &tr->max_buffer;
7450 iter->cpu_file = tracing_get_cpu(inode);
7452 file->private_data = m;
7456 trace_array_put(tr);
7462 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7465 struct seq_file *m = filp->private_data;
7466 struct trace_iterator *iter = m->private;
7467 struct trace_array *tr = iter->tr;
7471 ret = tracing_update_buffers();
7475 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7479 mutex_lock(&trace_types_lock);
7481 if (tr->current_trace->use_max_tr) {
7486 local_irq_disable();
7487 arch_spin_lock(&tr->max_lock);
7488 if (tr->cond_snapshot)
7490 arch_spin_unlock(&tr->max_lock);
7497 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7501 if (tr->allocated_snapshot)
7505 /* Only allow per-cpu swap if the ring buffer supports it */
7506 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7507 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7512 if (tr->allocated_snapshot)
7513 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7514 &tr->array_buffer, iter->cpu_file);
7516 ret = tracing_alloc_snapshot_instance(tr);
7519 local_irq_disable();
7520 /* Now, we're going to swap */
7521 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7522 update_max_tr(tr, current, smp_processor_id(), NULL);
7524 update_max_tr_single(tr, current, iter->cpu_file);
7528 if (tr->allocated_snapshot) {
7529 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7530 tracing_reset_online_cpus(&tr->max_buffer);
7532 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7542 mutex_unlock(&trace_types_lock);
7546 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7548 struct seq_file *m = file->private_data;
7551 ret = tracing_release(inode, file);
7553 if (file->f_mode & FMODE_READ)
7556 /* If write only, the seq_file is just a stub */
7564 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7565 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7566 size_t count, loff_t *ppos);
7567 static int tracing_buffers_release(struct inode *inode, struct file *file);
7568 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7569 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7571 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7573 struct ftrace_buffer_info *info;
7576 /* The following checks for tracefs lockdown */
7577 ret = tracing_buffers_open(inode, filp);
7581 info = filp->private_data;
7583 if (info->iter.trace->use_max_tr) {
7584 tracing_buffers_release(inode, filp);
7588 info->iter.snapshot = true;
7589 info->iter.array_buffer = &info->iter.tr->max_buffer;
7594 #endif /* CONFIG_TRACER_SNAPSHOT */
7597 static const struct file_operations tracing_thresh_fops = {
7598 .open = tracing_open_generic,
7599 .read = tracing_thresh_read,
7600 .write = tracing_thresh_write,
7601 .llseek = generic_file_llseek,
7604 #ifdef CONFIG_TRACER_MAX_TRACE
7605 static const struct file_operations tracing_max_lat_fops = {
7606 .open = tracing_open_generic,
7607 .read = tracing_max_lat_read,
7608 .write = tracing_max_lat_write,
7609 .llseek = generic_file_llseek,
7613 static const struct file_operations set_tracer_fops = {
7614 .open = tracing_open_generic,
7615 .read = tracing_set_trace_read,
7616 .write = tracing_set_trace_write,
7617 .llseek = generic_file_llseek,
7620 static const struct file_operations tracing_pipe_fops = {
7621 .open = tracing_open_pipe,
7622 .poll = tracing_poll_pipe,
7623 .read = tracing_read_pipe,
7624 .splice_read = tracing_splice_read_pipe,
7625 .release = tracing_release_pipe,
7626 .llseek = no_llseek,
7629 static const struct file_operations tracing_entries_fops = {
7630 .open = tracing_open_generic_tr,
7631 .read = tracing_entries_read,
7632 .write = tracing_entries_write,
7633 .llseek = generic_file_llseek,
7634 .release = tracing_release_generic_tr,
7637 static const struct file_operations tracing_total_entries_fops = {
7638 .open = tracing_open_generic_tr,
7639 .read = tracing_total_entries_read,
7640 .llseek = generic_file_llseek,
7641 .release = tracing_release_generic_tr,
7644 static const struct file_operations tracing_free_buffer_fops = {
7645 .open = tracing_open_generic_tr,
7646 .write = tracing_free_buffer_write,
7647 .release = tracing_free_buffer_release,
7650 static const struct file_operations tracing_mark_fops = {
7651 .open = tracing_mark_open,
7652 .write = tracing_mark_write,
7653 .release = tracing_release_generic_tr,
7656 static const struct file_operations tracing_mark_raw_fops = {
7657 .open = tracing_mark_open,
7658 .write = tracing_mark_raw_write,
7659 .release = tracing_release_generic_tr,
7662 static const struct file_operations trace_clock_fops = {
7663 .open = tracing_clock_open,
7665 .llseek = seq_lseek,
7666 .release = tracing_single_release_tr,
7667 .write = tracing_clock_write,
7670 static const struct file_operations trace_time_stamp_mode_fops = {
7671 .open = tracing_time_stamp_mode_open,
7673 .llseek = seq_lseek,
7674 .release = tracing_single_release_tr,
7677 #ifdef CONFIG_TRACER_SNAPSHOT
7678 static const struct file_operations snapshot_fops = {
7679 .open = tracing_snapshot_open,
7681 .write = tracing_snapshot_write,
7682 .llseek = tracing_lseek,
7683 .release = tracing_snapshot_release,
7686 static const struct file_operations snapshot_raw_fops = {
7687 .open = snapshot_raw_open,
7688 .read = tracing_buffers_read,
7689 .release = tracing_buffers_release,
7690 .splice_read = tracing_buffers_splice_read,
7691 .llseek = no_llseek,
7694 #endif /* CONFIG_TRACER_SNAPSHOT */
7697 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7698 * @filp: The active open file structure
7699 * @ubuf: The userspace provided buffer to read value into
7700 * @cnt: The maximum number of bytes to read
7701 * @ppos: The current "file" position
7703 * This function implements the write interface for a struct trace_min_max_param.
7704 * The filp->private_data must point to a trace_min_max_param structure that
7705 * defines where to write the value, the min and the max acceptable values,
7706 * and a lock to protect the write.
7709 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7711 struct trace_min_max_param *param = filp->private_data;
7718 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7723 mutex_lock(param->lock);
7725 if (param->min && val < *param->min)
7728 if (param->max && val > *param->max)
7735 mutex_unlock(param->lock);
7744 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7745 * @filp: The active open file structure
7746 * @ubuf: The userspace provided buffer to read value into
7747 * @cnt: The maximum number of bytes to read
7748 * @ppos: The current "file" position
7750 * This function implements the read interface for a struct trace_min_max_param.
7751 * The filp->private_data must point to a trace_min_max_param struct with valid
7755 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7757 struct trace_min_max_param *param = filp->private_data;
7758 char buf[U64_STR_SIZE];
7767 if (cnt > sizeof(buf))
7770 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7772 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7775 const struct file_operations trace_min_max_fops = {
7776 .open = tracing_open_generic,
7777 .read = trace_min_max_read,
7778 .write = trace_min_max_write,
7781 #define TRACING_LOG_ERRS_MAX 8
7782 #define TRACING_LOG_LOC_MAX 128
7784 #define CMD_PREFIX " Command: "
7787 const char **errs; /* ptr to loc-specific array of err strings */
7788 u8 type; /* index into errs -> specific err string */
7789 u16 pos; /* caret position */
7793 struct tracing_log_err {
7794 struct list_head list;
7795 struct err_info info;
7796 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7797 char *cmd; /* what caused err */
7800 static DEFINE_MUTEX(tracing_err_log_lock);
7802 static struct tracing_log_err *alloc_tracing_log_err(int len)
7804 struct tracing_log_err *err;
7806 err = kzalloc(sizeof(*err), GFP_KERNEL);
7808 return ERR_PTR(-ENOMEM);
7810 err->cmd = kzalloc(len, GFP_KERNEL);
7813 return ERR_PTR(-ENOMEM);
7819 static void free_tracing_log_err(struct tracing_log_err *err)
7825 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
7828 struct tracing_log_err *err;
7831 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7832 err = alloc_tracing_log_err(len);
7833 if (PTR_ERR(err) != -ENOMEM)
7834 tr->n_err_log_entries++;
7838 cmd = kzalloc(len, GFP_KERNEL);
7840 return ERR_PTR(-ENOMEM);
7841 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7844 list_del(&err->list);
7850 * err_pos - find the position of a string within a command for error careting
7851 * @cmd: The tracing command that caused the error
7852 * @str: The string to position the caret at within @cmd
7854 * Finds the position of the first occurrence of @str within @cmd. The
7855 * return value can be passed to tracing_log_err() for caret placement
7858 * Returns the index within @cmd of the first occurrence of @str or 0
7859 * if @str was not found.
7861 unsigned int err_pos(char *cmd, const char *str)
7865 if (WARN_ON(!strlen(cmd)))
7868 found = strstr(cmd, str);
7876 * tracing_log_err - write an error to the tracing error log
7877 * @tr: The associated trace array for the error (NULL for top level array)
7878 * @loc: A string describing where the error occurred
7879 * @cmd: The tracing command that caused the error
7880 * @errs: The array of loc-specific static error strings
7881 * @type: The index into errs[], which produces the specific static err string
7882 * @pos: The position the caret should be placed in the cmd
7884 * Writes an error into tracing/error_log of the form:
7886 * <loc>: error: <text>
7890 * tracing/error_log is a small log file containing the last
7891 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7892 * unless there has been a tracing error, and the error log can be
7893 * cleared and have its memory freed by writing the empty string in
7894 * truncation mode to it i.e. echo > tracing/error_log.
7896 * NOTE: the @errs array along with the @type param are used to
7897 * produce a static error string - this string is not copied and saved
7898 * when the error is logged - only a pointer to it is saved. See
7899 * existing callers for examples of how static strings are typically
7900 * defined for use with tracing_log_err().
7902 void tracing_log_err(struct trace_array *tr,
7903 const char *loc, const char *cmd,
7904 const char **errs, u8 type, u16 pos)
7906 struct tracing_log_err *err;
7912 len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
7914 mutex_lock(&tracing_err_log_lock);
7915 err = get_tracing_log_err(tr, len);
7916 if (PTR_ERR(err) == -ENOMEM) {
7917 mutex_unlock(&tracing_err_log_lock);
7921 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7922 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
7924 err->info.errs = errs;
7925 err->info.type = type;
7926 err->info.pos = pos;
7927 err->info.ts = local_clock();
7929 list_add_tail(&err->list, &tr->err_log);
7930 mutex_unlock(&tracing_err_log_lock);
7933 static void clear_tracing_err_log(struct trace_array *tr)
7935 struct tracing_log_err *err, *next;
7937 mutex_lock(&tracing_err_log_lock);
7938 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7939 list_del(&err->list);
7940 free_tracing_log_err(err);
7943 tr->n_err_log_entries = 0;
7944 mutex_unlock(&tracing_err_log_lock);
7947 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7949 struct trace_array *tr = m->private;
7951 mutex_lock(&tracing_err_log_lock);
7953 return seq_list_start(&tr->err_log, *pos);
7956 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7958 struct trace_array *tr = m->private;
7960 return seq_list_next(v, &tr->err_log, pos);
7963 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7965 mutex_unlock(&tracing_err_log_lock);
7968 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
7972 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7974 for (i = 0; i < pos; i++)
7979 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7981 struct tracing_log_err *err = v;
7984 const char *err_text = err->info.errs[err->info.type];
7985 u64 sec = err->info.ts;
7988 nsec = do_div(sec, NSEC_PER_SEC);
7989 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7990 err->loc, err_text);
7991 seq_printf(m, "%s", err->cmd);
7992 tracing_err_log_show_pos(m, err->info.pos);
7998 static const struct seq_operations tracing_err_log_seq_ops = {
7999 .start = tracing_err_log_seq_start,
8000 .next = tracing_err_log_seq_next,
8001 .stop = tracing_err_log_seq_stop,
8002 .show = tracing_err_log_seq_show
8005 static int tracing_err_log_open(struct inode *inode, struct file *file)
8007 struct trace_array *tr = inode->i_private;
8010 ret = tracing_check_open_get_tr(tr);
8014 /* If this file was opened for write, then erase contents */
8015 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
8016 clear_tracing_err_log(tr);
8018 if (file->f_mode & FMODE_READ) {
8019 ret = seq_open(file, &tracing_err_log_seq_ops);
8021 struct seq_file *m = file->private_data;
8024 trace_array_put(tr);
8030 static ssize_t tracing_err_log_write(struct file *file,
8031 const char __user *buffer,
8032 size_t count, loff_t *ppos)
8037 static int tracing_err_log_release(struct inode *inode, struct file *file)
8039 struct trace_array *tr = inode->i_private;
8041 trace_array_put(tr);
8043 if (file->f_mode & FMODE_READ)
8044 seq_release(inode, file);
8049 static const struct file_operations tracing_err_log_fops = {
8050 .open = tracing_err_log_open,
8051 .write = tracing_err_log_write,
8053 .llseek = seq_lseek,
8054 .release = tracing_err_log_release,
8057 static int tracing_buffers_open(struct inode *inode, struct file *filp)
8059 struct trace_array *tr = inode->i_private;
8060 struct ftrace_buffer_info *info;
8063 ret = tracing_check_open_get_tr(tr);
8067 info = kvzalloc(sizeof(*info), GFP_KERNEL);
8069 trace_array_put(tr);
8073 mutex_lock(&trace_types_lock);
8076 info->iter.cpu_file = tracing_get_cpu(inode);
8077 info->iter.trace = tr->current_trace;
8078 info->iter.array_buffer = &tr->array_buffer;
8080 /* Force reading ring buffer for first read */
8081 info->read = (unsigned int)-1;
8083 filp->private_data = info;
8087 mutex_unlock(&trace_types_lock);
8089 ret = nonseekable_open(inode, filp);
8091 trace_array_put(tr);
8097 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8099 struct ftrace_buffer_info *info = filp->private_data;
8100 struct trace_iterator *iter = &info->iter;
8102 return trace_poll(iter, filp, poll_table);
8106 tracing_buffers_read(struct file *filp, char __user *ubuf,
8107 size_t count, loff_t *ppos)
8109 struct ftrace_buffer_info *info = filp->private_data;
8110 struct trace_iterator *iter = &info->iter;
8117 #ifdef CONFIG_TRACER_MAX_TRACE
8118 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8123 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8125 if (IS_ERR(info->spare)) {
8126 ret = PTR_ERR(info->spare);
8129 info->spare_cpu = iter->cpu_file;
8135 /* Do we have previous read data to read? */
8136 if (info->read < PAGE_SIZE)
8140 trace_access_lock(iter->cpu_file);
8141 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8145 trace_access_unlock(iter->cpu_file);
8148 if (trace_empty(iter)) {
8149 if ((filp->f_flags & O_NONBLOCK))
8152 ret = wait_on_pipe(iter, 0);
8163 size = PAGE_SIZE - info->read;
8167 ret = copy_to_user(ubuf, info->spare + info->read, size);
8179 static int tracing_buffers_release(struct inode *inode, struct file *file)
8181 struct ftrace_buffer_info *info = file->private_data;
8182 struct trace_iterator *iter = &info->iter;
8184 mutex_lock(&trace_types_lock);
8186 iter->tr->trace_ref--;
8188 __trace_array_put(iter->tr);
8191 /* Make sure the waiters see the new wait_index */
8194 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8197 ring_buffer_free_read_page(iter->array_buffer->buffer,
8198 info->spare_cpu, info->spare);
8201 mutex_unlock(&trace_types_lock);
8207 struct trace_buffer *buffer;
8210 refcount_t refcount;
8213 static void buffer_ref_release(struct buffer_ref *ref)
8215 if (!refcount_dec_and_test(&ref->refcount))
8217 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8221 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8222 struct pipe_buffer *buf)
8224 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8226 buffer_ref_release(ref);
8230 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8231 struct pipe_buffer *buf)
8233 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8235 if (refcount_read(&ref->refcount) > INT_MAX/2)
8238 refcount_inc(&ref->refcount);
8242 /* Pipe buffer operations for a buffer. */
8243 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8244 .release = buffer_pipe_buf_release,
8245 .get = buffer_pipe_buf_get,
8249 * Callback from splice_to_pipe(), if we need to release some pages
8250 * at the end of the spd in case we error'ed out in filling the pipe.
8252 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8254 struct buffer_ref *ref =
8255 (struct buffer_ref *)spd->partial[i].private;
8257 buffer_ref_release(ref);
8258 spd->partial[i].private = 0;
8262 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8263 struct pipe_inode_info *pipe, size_t len,
8266 struct ftrace_buffer_info *info = file->private_data;
8267 struct trace_iterator *iter = &info->iter;
8268 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8269 struct page *pages_def[PIPE_DEF_BUFFERS];
8270 struct splice_pipe_desc spd = {
8272 .partial = partial_def,
8273 .nr_pages_max = PIPE_DEF_BUFFERS,
8274 .ops = &buffer_pipe_buf_ops,
8275 .spd_release = buffer_spd_release,
8277 struct buffer_ref *ref;
8281 #ifdef CONFIG_TRACER_MAX_TRACE
8282 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8286 if (*ppos & (PAGE_SIZE - 1))
8289 if (len & (PAGE_SIZE - 1)) {
8290 if (len < PAGE_SIZE)
8295 if (splice_grow_spd(pipe, &spd))
8299 trace_access_lock(iter->cpu_file);
8300 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8302 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8306 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8312 refcount_set(&ref->refcount, 1);
8313 ref->buffer = iter->array_buffer->buffer;
8314 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8315 if (IS_ERR(ref->page)) {
8316 ret = PTR_ERR(ref->page);
8321 ref->cpu = iter->cpu_file;
8323 r = ring_buffer_read_page(ref->buffer, &ref->page,
8324 len, iter->cpu_file, 1);
8326 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8332 page = virt_to_page(ref->page);
8334 spd.pages[i] = page;
8335 spd.partial[i].len = PAGE_SIZE;
8336 spd.partial[i].offset = 0;
8337 spd.partial[i].private = (unsigned long)ref;
8341 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8344 trace_access_unlock(iter->cpu_file);
8347 /* did we read anything? */
8348 if (!spd.nr_pages) {
8355 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8358 wait_index = READ_ONCE(iter->wait_index);
8360 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8364 /* No need to wait after waking up when tracing is off */
8365 if (!tracer_tracing_is_on(iter->tr))
8368 /* Make sure we see the new wait_index */
8370 if (wait_index != iter->wait_index)
8376 ret = splice_to_pipe(pipe, &spd);
8378 splice_shrink_spd(&spd);
8383 /* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */
8384 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8386 struct ftrace_buffer_info *info = file->private_data;
8387 struct trace_iterator *iter = &info->iter;
8390 return -ENOIOCTLCMD;
8392 mutex_lock(&trace_types_lock);
8395 /* Make sure the waiters see the new wait_index */
8398 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8400 mutex_unlock(&trace_types_lock);
8404 static const struct file_operations tracing_buffers_fops = {
8405 .open = tracing_buffers_open,
8406 .read = tracing_buffers_read,
8407 .poll = tracing_buffers_poll,
8408 .release = tracing_buffers_release,
8409 .splice_read = tracing_buffers_splice_read,
8410 .unlocked_ioctl = tracing_buffers_ioctl,
8411 .llseek = no_llseek,
8415 tracing_stats_read(struct file *filp, char __user *ubuf,
8416 size_t count, loff_t *ppos)
8418 struct inode *inode = file_inode(filp);
8419 struct trace_array *tr = inode->i_private;
8420 struct array_buffer *trace_buf = &tr->array_buffer;
8421 int cpu = tracing_get_cpu(inode);
8422 struct trace_seq *s;
8424 unsigned long long t;
8425 unsigned long usec_rem;
8427 s = kmalloc(sizeof(*s), GFP_KERNEL);
8433 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8434 trace_seq_printf(s, "entries: %ld\n", cnt);
8436 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8437 trace_seq_printf(s, "overrun: %ld\n", cnt);
8439 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8440 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8442 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8443 trace_seq_printf(s, "bytes: %ld\n", cnt);
8445 if (trace_clocks[tr->clock_id].in_ns) {
8446 /* local or global for trace_clock */
8447 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8448 usec_rem = do_div(t, USEC_PER_SEC);
8449 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8452 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8453 usec_rem = do_div(t, USEC_PER_SEC);
8454 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8456 /* counter or tsc mode for trace_clock */
8457 trace_seq_printf(s, "oldest event ts: %llu\n",
8458 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8460 trace_seq_printf(s, "now ts: %llu\n",
8461 ring_buffer_time_stamp(trace_buf->buffer));
8464 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8465 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8467 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8468 trace_seq_printf(s, "read events: %ld\n", cnt);
8470 count = simple_read_from_buffer(ubuf, count, ppos,
8471 s->buffer, trace_seq_used(s));
8478 static const struct file_operations tracing_stats_fops = {
8479 .open = tracing_open_generic_tr,
8480 .read = tracing_stats_read,
8481 .llseek = generic_file_llseek,
8482 .release = tracing_release_generic_tr,
8485 #ifdef CONFIG_DYNAMIC_FTRACE
8488 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8489 size_t cnt, loff_t *ppos)
8495 /* 256 should be plenty to hold the amount needed */
8496 buf = kmalloc(256, GFP_KERNEL);
8500 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8501 ftrace_update_tot_cnt,
8502 ftrace_number_of_pages,
8503 ftrace_number_of_groups);
8505 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8510 static const struct file_operations tracing_dyn_info_fops = {
8511 .open = tracing_open_generic,
8512 .read = tracing_read_dyn_info,
8513 .llseek = generic_file_llseek,
8515 #endif /* CONFIG_DYNAMIC_FTRACE */
8517 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8519 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8520 struct trace_array *tr, struct ftrace_probe_ops *ops,
8523 tracing_snapshot_instance(tr);
8527 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8528 struct trace_array *tr, struct ftrace_probe_ops *ops,
8531 struct ftrace_func_mapper *mapper = data;
8535 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8545 tracing_snapshot_instance(tr);
8549 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8550 struct ftrace_probe_ops *ops, void *data)
8552 struct ftrace_func_mapper *mapper = data;
8555 seq_printf(m, "%ps:", (void *)ip);
8557 seq_puts(m, "snapshot");
8560 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8563 seq_printf(m, ":count=%ld\n", *count);
8565 seq_puts(m, ":unlimited\n");
8571 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8572 unsigned long ip, void *init_data, void **data)
8574 struct ftrace_func_mapper *mapper = *data;
8577 mapper = allocate_ftrace_func_mapper();
8583 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8587 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8588 unsigned long ip, void *data)
8590 struct ftrace_func_mapper *mapper = data;
8595 free_ftrace_func_mapper(mapper, NULL);
8599 ftrace_func_mapper_remove_ip(mapper, ip);
8602 static struct ftrace_probe_ops snapshot_probe_ops = {
8603 .func = ftrace_snapshot,
8604 .print = ftrace_snapshot_print,
8607 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8608 .func = ftrace_count_snapshot,
8609 .print = ftrace_snapshot_print,
8610 .init = ftrace_snapshot_init,
8611 .free = ftrace_snapshot_free,
8615 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8616 char *glob, char *cmd, char *param, int enable)
8618 struct ftrace_probe_ops *ops;
8619 void *count = (void *)-1;
8626 /* hash funcs only work with set_ftrace_filter */
8630 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8633 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8638 number = strsep(¶m, ":");
8640 if (!strlen(number))
8644 * We use the callback data field (which is a pointer)
8647 ret = kstrtoul(number, 0, (unsigned long *)&count);
8652 ret = tracing_alloc_snapshot_instance(tr);
8656 ret = register_ftrace_function_probe(glob, tr, ops, count);
8659 return ret < 0 ? ret : 0;
8662 static struct ftrace_func_command ftrace_snapshot_cmd = {
8664 .func = ftrace_trace_snapshot_callback,
8667 static __init int register_snapshot_cmd(void)
8669 return register_ftrace_command(&ftrace_snapshot_cmd);
8672 static inline __init int register_snapshot_cmd(void) { return 0; }
8673 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8675 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8677 if (WARN_ON(!tr->dir))
8678 return ERR_PTR(-ENODEV);
8680 /* Top directory uses NULL as the parent */
8681 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8684 /* All sub buffers have a descriptor */
8688 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8690 struct dentry *d_tracer;
8693 return tr->percpu_dir;
8695 d_tracer = tracing_get_dentry(tr);
8696 if (IS_ERR(d_tracer))
8699 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8701 MEM_FAIL(!tr->percpu_dir,
8702 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8704 return tr->percpu_dir;
8707 static struct dentry *
8708 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8709 void *data, long cpu, const struct file_operations *fops)
8711 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8713 if (ret) /* See tracing_get_cpu() */
8714 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8719 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8721 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8722 struct dentry *d_cpu;
8723 char cpu_dir[30]; /* 30 characters should be more than enough */
8728 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8729 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8731 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8735 /* per cpu trace_pipe */
8736 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8737 tr, cpu, &tracing_pipe_fops);
8740 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8741 tr, cpu, &tracing_fops);
8743 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8744 tr, cpu, &tracing_buffers_fops);
8746 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8747 tr, cpu, &tracing_stats_fops);
8749 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8750 tr, cpu, &tracing_entries_fops);
8752 #ifdef CONFIG_TRACER_SNAPSHOT
8753 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8754 tr, cpu, &snapshot_fops);
8756 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8757 tr, cpu, &snapshot_raw_fops);
8761 #ifdef CONFIG_FTRACE_SELFTEST
8762 /* Let selftest have access to static functions in this file */
8763 #include "trace_selftest.c"
8767 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8770 struct trace_option_dentry *topt = filp->private_data;
8773 if (topt->flags->val & topt->opt->bit)
8778 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8782 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8785 struct trace_option_dentry *topt = filp->private_data;
8789 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8793 if (val != 0 && val != 1)
8796 if (!!(topt->flags->val & topt->opt->bit) != val) {
8797 mutex_lock(&trace_types_lock);
8798 ret = __set_tracer_option(topt->tr, topt->flags,
8800 mutex_unlock(&trace_types_lock);
8811 static const struct file_operations trace_options_fops = {
8812 .open = tracing_open_generic,
8813 .read = trace_options_read,
8814 .write = trace_options_write,
8815 .llseek = generic_file_llseek,
8819 * In order to pass in both the trace_array descriptor as well as the index
8820 * to the flag that the trace option file represents, the trace_array
8821 * has a character array of trace_flags_index[], which holds the index
8822 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8823 * The address of this character array is passed to the flag option file
8824 * read/write callbacks.
8826 * In order to extract both the index and the trace_array descriptor,
8827 * get_tr_index() uses the following algorithm.
8831 * As the pointer itself contains the address of the index (remember
8834 * Then to get the trace_array descriptor, by subtracting that index
8835 * from the ptr, we get to the start of the index itself.
8837 * ptr - idx == &index[0]
8839 * Then a simple container_of() from that pointer gets us to the
8840 * trace_array descriptor.
8842 static void get_tr_index(void *data, struct trace_array **ptr,
8843 unsigned int *pindex)
8845 *pindex = *(unsigned char *)data;
8847 *ptr = container_of(data - *pindex, struct trace_array,
8852 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8855 void *tr_index = filp->private_data;
8856 struct trace_array *tr;
8860 get_tr_index(tr_index, &tr, &index);
8862 if (tr->trace_flags & (1 << index))
8867 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8871 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8874 void *tr_index = filp->private_data;
8875 struct trace_array *tr;
8880 get_tr_index(tr_index, &tr, &index);
8882 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8886 if (val != 0 && val != 1)
8889 mutex_lock(&event_mutex);
8890 mutex_lock(&trace_types_lock);
8891 ret = set_tracer_flag(tr, 1 << index, val);
8892 mutex_unlock(&trace_types_lock);
8893 mutex_unlock(&event_mutex);
8903 static const struct file_operations trace_options_core_fops = {
8904 .open = tracing_open_generic,
8905 .read = trace_options_core_read,
8906 .write = trace_options_core_write,
8907 .llseek = generic_file_llseek,
8910 struct dentry *trace_create_file(const char *name,
8912 struct dentry *parent,
8914 const struct file_operations *fops)
8918 ret = tracefs_create_file(name, mode, parent, data, fops);
8920 pr_warn("Could not create tracefs '%s' entry\n", name);
8926 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8928 struct dentry *d_tracer;
8933 d_tracer = tracing_get_dentry(tr);
8934 if (IS_ERR(d_tracer))
8937 tr->options = tracefs_create_dir("options", d_tracer);
8939 pr_warn("Could not create tracefs directory 'options'\n");
8947 create_trace_option_file(struct trace_array *tr,
8948 struct trace_option_dentry *topt,
8949 struct tracer_flags *flags,
8950 struct tracer_opt *opt)
8952 struct dentry *t_options;
8954 t_options = trace_options_init_dentry(tr);
8958 topt->flags = flags;
8962 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
8963 t_options, topt, &trace_options_fops);
8968 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8970 struct trace_option_dentry *topts;
8971 struct trace_options *tr_topts;
8972 struct tracer_flags *flags;
8973 struct tracer_opt *opts;
8980 flags = tracer->flags;
8982 if (!flags || !flags->opts)
8986 * If this is an instance, only create flags for tracers
8987 * the instance may have.
8989 if (!trace_ok_for_array(tracer, tr))
8992 for (i = 0; i < tr->nr_topts; i++) {
8993 /* Make sure there's no duplicate flags. */
8994 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
9000 for (cnt = 0; opts[cnt].name; cnt++)
9003 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
9007 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
9014 tr->topts = tr_topts;
9015 tr->topts[tr->nr_topts].tracer = tracer;
9016 tr->topts[tr->nr_topts].topts = topts;
9019 for (cnt = 0; opts[cnt].name; cnt++) {
9020 create_trace_option_file(tr, &topts[cnt], flags,
9022 MEM_FAIL(topts[cnt].entry == NULL,
9023 "Failed to create trace option: %s",
9028 static struct dentry *
9029 create_trace_option_core_file(struct trace_array *tr,
9030 const char *option, long index)
9032 struct dentry *t_options;
9034 t_options = trace_options_init_dentry(tr);
9038 return trace_create_file(option, TRACE_MODE_WRITE, t_options,
9039 (void *)&tr->trace_flags_index[index],
9040 &trace_options_core_fops);
9043 static void create_trace_options_dir(struct trace_array *tr)
9045 struct dentry *t_options;
9046 bool top_level = tr == &global_trace;
9049 t_options = trace_options_init_dentry(tr);
9053 for (i = 0; trace_options[i]; i++) {
9055 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
9056 create_trace_option_core_file(tr, trace_options[i], i);
9061 rb_simple_read(struct file *filp, char __user *ubuf,
9062 size_t cnt, loff_t *ppos)
9064 struct trace_array *tr = filp->private_data;
9068 r = tracer_tracing_is_on(tr);
9069 r = sprintf(buf, "%d\n", r);
9071 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9075 rb_simple_write(struct file *filp, const char __user *ubuf,
9076 size_t cnt, loff_t *ppos)
9078 struct trace_array *tr = filp->private_data;
9079 struct trace_buffer *buffer = tr->array_buffer.buffer;
9083 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9088 mutex_lock(&trace_types_lock);
9089 if (!!val == tracer_tracing_is_on(tr)) {
9090 val = 0; /* do nothing */
9092 tracer_tracing_on(tr);
9093 if (tr->current_trace->start)
9094 tr->current_trace->start(tr);
9096 tracer_tracing_off(tr);
9097 if (tr->current_trace->stop)
9098 tr->current_trace->stop(tr);
9099 /* Wake up any waiters */
9100 ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
9102 mutex_unlock(&trace_types_lock);
9110 static const struct file_operations rb_simple_fops = {
9111 .open = tracing_open_generic_tr,
9112 .read = rb_simple_read,
9113 .write = rb_simple_write,
9114 .release = tracing_release_generic_tr,
9115 .llseek = default_llseek,
9119 buffer_percent_read(struct file *filp, char __user *ubuf,
9120 size_t cnt, loff_t *ppos)
9122 struct trace_array *tr = filp->private_data;
9126 r = tr->buffer_percent;
9127 r = sprintf(buf, "%d\n", r);
9129 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9133 buffer_percent_write(struct file *filp, const char __user *ubuf,
9134 size_t cnt, loff_t *ppos)
9136 struct trace_array *tr = filp->private_data;
9140 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9150 tr->buffer_percent = val;
9157 static const struct file_operations buffer_percent_fops = {
9158 .open = tracing_open_generic_tr,
9159 .read = buffer_percent_read,
9160 .write = buffer_percent_write,
9161 .release = tracing_release_generic_tr,
9162 .llseek = default_llseek,
9165 static struct dentry *trace_instance_dir;
9168 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9171 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9173 enum ring_buffer_flags rb_flags;
9175 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9179 buf->buffer = ring_buffer_alloc(size, rb_flags);
9183 buf->data = alloc_percpu(struct trace_array_cpu);
9185 ring_buffer_free(buf->buffer);
9190 /* Allocate the first page for all buffers */
9191 set_buffer_entries(&tr->array_buffer,
9192 ring_buffer_size(tr->array_buffer.buffer, 0));
9197 static void free_trace_buffer(struct array_buffer *buf)
9200 ring_buffer_free(buf->buffer);
9202 free_percpu(buf->data);
9207 static int allocate_trace_buffers(struct trace_array *tr, int size)
9211 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9215 #ifdef CONFIG_TRACER_MAX_TRACE
9216 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9217 allocate_snapshot ? size : 1);
9218 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9219 free_trace_buffer(&tr->array_buffer);
9222 tr->allocated_snapshot = allocate_snapshot;
9225 * Only the top level trace array gets its snapshot allocated
9226 * from the kernel command line.
9228 allocate_snapshot = false;
9234 static void free_trace_buffers(struct trace_array *tr)
9239 free_trace_buffer(&tr->array_buffer);
9241 #ifdef CONFIG_TRACER_MAX_TRACE
9242 free_trace_buffer(&tr->max_buffer);
9246 static void init_trace_flags_index(struct trace_array *tr)
9250 /* Used by the trace options files */
9251 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9252 tr->trace_flags_index[i] = i;
9255 static void __update_tracer_options(struct trace_array *tr)
9259 for (t = trace_types; t; t = t->next)
9260 add_tracer_options(tr, t);
9263 static void update_tracer_options(struct trace_array *tr)
9265 mutex_lock(&trace_types_lock);
9266 tracer_options_updated = true;
9267 __update_tracer_options(tr);
9268 mutex_unlock(&trace_types_lock);
9271 /* Must have trace_types_lock held */
9272 struct trace_array *trace_array_find(const char *instance)
9274 struct trace_array *tr, *found = NULL;
9276 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9277 if (tr->name && strcmp(tr->name, instance) == 0) {
9286 struct trace_array *trace_array_find_get(const char *instance)
9288 struct trace_array *tr;
9290 mutex_lock(&trace_types_lock);
9291 tr = trace_array_find(instance);
9294 mutex_unlock(&trace_types_lock);
9299 static int trace_array_create_dir(struct trace_array *tr)
9303 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9307 ret = event_trace_add_tracer(tr->dir, tr);
9309 tracefs_remove(tr->dir);
9313 init_tracer_tracefs(tr, tr->dir);
9314 __update_tracer_options(tr);
9319 static struct trace_array *trace_array_create(const char *name)
9321 struct trace_array *tr;
9325 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9327 return ERR_PTR(ret);
9329 tr->name = kstrdup(name, GFP_KERNEL);
9333 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9336 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9338 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9340 raw_spin_lock_init(&tr->start_lock);
9342 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9344 tr->current_trace = &nop_trace;
9346 INIT_LIST_HEAD(&tr->systems);
9347 INIT_LIST_HEAD(&tr->events);
9348 INIT_LIST_HEAD(&tr->hist_vars);
9349 INIT_LIST_HEAD(&tr->err_log);
9351 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9354 if (ftrace_allocate_ftrace_ops(tr) < 0)
9357 ftrace_init_trace_array(tr);
9359 init_trace_flags_index(tr);
9361 if (trace_instance_dir) {
9362 ret = trace_array_create_dir(tr);
9366 __trace_early_add_events(tr);
9368 list_add(&tr->list, &ftrace_trace_arrays);
9375 ftrace_free_ftrace_ops(tr);
9376 free_trace_buffers(tr);
9377 free_cpumask_var(tr->tracing_cpumask);
9381 return ERR_PTR(ret);
9384 static int instance_mkdir(const char *name)
9386 struct trace_array *tr;
9389 mutex_lock(&event_mutex);
9390 mutex_lock(&trace_types_lock);
9393 if (trace_array_find(name))
9396 tr = trace_array_create(name);
9398 ret = PTR_ERR_OR_ZERO(tr);
9401 mutex_unlock(&trace_types_lock);
9402 mutex_unlock(&event_mutex);
9407 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9408 * @name: The name of the trace array to be looked up/created.
9410 * Returns pointer to trace array with given name.
9411 * NULL, if it cannot be created.
9413 * NOTE: This function increments the reference counter associated with the
9414 * trace array returned. This makes sure it cannot be freed while in use.
9415 * Use trace_array_put() once the trace array is no longer needed.
9416 * If the trace_array is to be freed, trace_array_destroy() needs to
9417 * be called after the trace_array_put(), or simply let user space delete
9418 * it from the tracefs instances directory. But until the
9419 * trace_array_put() is called, user space can not delete it.
9422 struct trace_array *trace_array_get_by_name(const char *name)
9424 struct trace_array *tr;
9426 mutex_lock(&event_mutex);
9427 mutex_lock(&trace_types_lock);
9429 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9430 if (tr->name && strcmp(tr->name, name) == 0)
9434 tr = trace_array_create(name);
9442 mutex_unlock(&trace_types_lock);
9443 mutex_unlock(&event_mutex);
9446 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9448 static int __remove_instance(struct trace_array *tr)
9452 /* Reference counter for a newly created trace array = 1. */
9453 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9456 list_del(&tr->list);
9458 /* Disable all the flags that were enabled coming in */
9459 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9460 if ((1 << i) & ZEROED_TRACE_FLAGS)
9461 set_tracer_flag(tr, 1 << i, 0);
9464 tracing_set_nop(tr);
9465 clear_ftrace_function_probes(tr);
9466 event_trace_del_tracer(tr);
9467 ftrace_clear_pids(tr);
9468 ftrace_destroy_function_files(tr);
9469 tracefs_remove(tr->dir);
9470 free_percpu(tr->last_func_repeats);
9471 free_trace_buffers(tr);
9473 for (i = 0; i < tr->nr_topts; i++) {
9474 kfree(tr->topts[i].topts);
9478 free_cpumask_var(tr->tracing_cpumask);
9485 int trace_array_destroy(struct trace_array *this_tr)
9487 struct trace_array *tr;
9493 mutex_lock(&event_mutex);
9494 mutex_lock(&trace_types_lock);
9498 /* Making sure trace array exists before destroying it. */
9499 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9500 if (tr == this_tr) {
9501 ret = __remove_instance(tr);
9506 mutex_unlock(&trace_types_lock);
9507 mutex_unlock(&event_mutex);
9511 EXPORT_SYMBOL_GPL(trace_array_destroy);
9513 static int instance_rmdir(const char *name)
9515 struct trace_array *tr;
9518 mutex_lock(&event_mutex);
9519 mutex_lock(&trace_types_lock);
9522 tr = trace_array_find(name);
9524 ret = __remove_instance(tr);
9526 mutex_unlock(&trace_types_lock);
9527 mutex_unlock(&event_mutex);
9532 static __init void create_trace_instances(struct dentry *d_tracer)
9534 struct trace_array *tr;
9536 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9539 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9542 mutex_lock(&event_mutex);
9543 mutex_lock(&trace_types_lock);
9545 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9548 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9549 "Failed to create instance directory\n"))
9553 mutex_unlock(&trace_types_lock);
9554 mutex_unlock(&event_mutex);
9558 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9560 struct trace_event_file *file;
9563 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9564 tr, &show_traces_fops);
9566 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9567 tr, &set_tracer_fops);
9569 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9570 tr, &tracing_cpumask_fops);
9572 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9573 tr, &tracing_iter_fops);
9575 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9578 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9579 tr, &tracing_pipe_fops);
9581 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9582 tr, &tracing_entries_fops);
9584 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9585 tr, &tracing_total_entries_fops);
9587 trace_create_file("free_buffer", 0200, d_tracer,
9588 tr, &tracing_free_buffer_fops);
9590 trace_create_file("trace_marker", 0220, d_tracer,
9591 tr, &tracing_mark_fops);
9593 file = __find_event_file(tr, "ftrace", "print");
9594 if (file && file->dir)
9595 trace_create_file("trigger", TRACE_MODE_WRITE, file->dir,
9596 file, &event_trigger_fops);
9597 tr->trace_marker_file = file;
9599 trace_create_file("trace_marker_raw", 0220, d_tracer,
9600 tr, &tracing_mark_raw_fops);
9602 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9605 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9606 tr, &rb_simple_fops);
9608 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9609 &trace_time_stamp_mode_fops);
9611 tr->buffer_percent = 50;
9613 trace_create_file("buffer_percent", TRACE_MODE_READ, d_tracer,
9614 tr, &buffer_percent_fops);
9616 create_trace_options_dir(tr);
9618 #ifdef CONFIG_TRACER_MAX_TRACE
9619 trace_create_maxlat_file(tr, d_tracer);
9622 if (ftrace_create_function_files(tr, d_tracer))
9623 MEM_FAIL(1, "Could not allocate function filter files");
9625 #ifdef CONFIG_TRACER_SNAPSHOT
9626 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9627 tr, &snapshot_fops);
9630 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9631 tr, &tracing_err_log_fops);
9633 for_each_tracing_cpu(cpu)
9634 tracing_init_tracefs_percpu(tr, cpu);
9636 ftrace_init_tracefs(tr, d_tracer);
9639 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9641 struct vfsmount *mnt;
9642 struct file_system_type *type;
9645 * To maintain backward compatibility for tools that mount
9646 * debugfs to get to the tracing facility, tracefs is automatically
9647 * mounted to the debugfs/tracing directory.
9649 type = get_fs_type("tracefs");
9652 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9653 put_filesystem(type);
9662 * tracing_init_dentry - initialize top level trace array
9664 * This is called when creating files or directories in the tracing
9665 * directory. It is called via fs_initcall() by any of the boot up code
9666 * and expects to return the dentry of the top level tracing directory.
9668 int tracing_init_dentry(void)
9670 struct trace_array *tr = &global_trace;
9672 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9673 pr_warn("Tracing disabled due to lockdown\n");
9677 /* The top level trace array uses NULL as parent */
9681 if (WARN_ON(!tracefs_initialized()))
9685 * As there may still be users that expect the tracing
9686 * files to exist in debugfs/tracing, we must automount
9687 * the tracefs file system there, so older tools still
9688 * work with the newer kernel.
9690 tr->dir = debugfs_create_automount("tracing", NULL,
9691 trace_automount, NULL);
9696 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9697 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9699 static struct workqueue_struct *eval_map_wq __initdata;
9700 static struct work_struct eval_map_work __initdata;
9701 static struct work_struct tracerfs_init_work __initdata;
9703 static void __init eval_map_work_func(struct work_struct *work)
9707 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9708 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9711 static int __init trace_eval_init(void)
9713 INIT_WORK(&eval_map_work, eval_map_work_func);
9715 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9717 pr_err("Unable to allocate eval_map_wq\n");
9719 eval_map_work_func(&eval_map_work);
9723 queue_work(eval_map_wq, &eval_map_work);
9727 subsys_initcall(trace_eval_init);
9729 static int __init trace_eval_sync(void)
9731 /* Make sure the eval map updates are finished */
9733 destroy_workqueue(eval_map_wq);
9737 late_initcall_sync(trace_eval_sync);
9740 #ifdef CONFIG_MODULES
9741 static void trace_module_add_evals(struct module *mod)
9743 if (!mod->num_trace_evals)
9747 * Modules with bad taint do not have events created, do
9748 * not bother with enums either.
9750 if (trace_module_has_bad_taint(mod))
9753 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9756 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9757 static void trace_module_remove_evals(struct module *mod)
9759 union trace_eval_map_item *map;
9760 union trace_eval_map_item **last = &trace_eval_maps;
9762 if (!mod->num_trace_evals)
9765 mutex_lock(&trace_eval_mutex);
9767 map = trace_eval_maps;
9770 if (map->head.mod == mod)
9772 map = trace_eval_jmp_to_tail(map);
9773 last = &map->tail.next;
9774 map = map->tail.next;
9779 *last = trace_eval_jmp_to_tail(map)->tail.next;
9782 mutex_unlock(&trace_eval_mutex);
9785 static inline void trace_module_remove_evals(struct module *mod) { }
9786 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9788 static int trace_module_notify(struct notifier_block *self,
9789 unsigned long val, void *data)
9791 struct module *mod = data;
9794 case MODULE_STATE_COMING:
9795 trace_module_add_evals(mod);
9797 case MODULE_STATE_GOING:
9798 trace_module_remove_evals(mod);
9805 static struct notifier_block trace_module_nb = {
9806 .notifier_call = trace_module_notify,
9809 #endif /* CONFIG_MODULES */
9811 static __init void tracer_init_tracefs_work_func(struct work_struct *work)
9816 init_tracer_tracefs(&global_trace, NULL);
9817 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9819 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
9820 &global_trace, &tracing_thresh_fops);
9822 trace_create_file("README", TRACE_MODE_READ, NULL,
9823 NULL, &tracing_readme_fops);
9825 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
9826 NULL, &tracing_saved_cmdlines_fops);
9828 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
9829 NULL, &tracing_saved_cmdlines_size_fops);
9831 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
9832 NULL, &tracing_saved_tgids_fops);
9834 trace_create_eval_file(NULL);
9836 #ifdef CONFIG_MODULES
9837 register_module_notifier(&trace_module_nb);
9840 #ifdef CONFIG_DYNAMIC_FTRACE
9841 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
9842 NULL, &tracing_dyn_info_fops);
9845 create_trace_instances(NULL);
9847 update_tracer_options(&global_trace);
9850 static __init int tracer_init_tracefs(void)
9854 trace_access_lock_init();
9856 ret = tracing_init_dentry();
9861 INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
9862 queue_work(eval_map_wq, &tracerfs_init_work);
9864 tracer_init_tracefs_work_func(NULL);
9867 rv_init_interface();
9872 fs_initcall(tracer_init_tracefs);
9874 static int trace_panic_handler(struct notifier_block *this,
9875 unsigned long event, void *unused)
9877 if (ftrace_dump_on_oops)
9878 ftrace_dump(ftrace_dump_on_oops);
9882 static struct notifier_block trace_panic_notifier = {
9883 .notifier_call = trace_panic_handler,
9885 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9888 static int trace_die_handler(struct notifier_block *self,
9894 if (ftrace_dump_on_oops)
9895 ftrace_dump(ftrace_dump_on_oops);
9903 static struct notifier_block trace_die_notifier = {
9904 .notifier_call = trace_die_handler,
9909 * printk is set to max of 1024, we really don't need it that big.
9910 * Nothing should be printing 1000 characters anyway.
9912 #define TRACE_MAX_PRINT 1000
9915 * Define here KERN_TRACE so that we have one place to modify
9916 * it if we decide to change what log level the ftrace dump
9919 #define KERN_TRACE KERN_EMERG
9922 trace_printk_seq(struct trace_seq *s)
9924 /* Probably should print a warning here. */
9925 if (s->seq.len >= TRACE_MAX_PRINT)
9926 s->seq.len = TRACE_MAX_PRINT;
9929 * More paranoid code. Although the buffer size is set to
9930 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9931 * an extra layer of protection.
9933 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9934 s->seq.len = s->seq.size - 1;
9936 /* should be zero ended, but we are paranoid. */
9937 s->buffer[s->seq.len] = 0;
9939 printk(KERN_TRACE "%s", s->buffer);
9944 void trace_init_global_iter(struct trace_iterator *iter)
9946 iter->tr = &global_trace;
9947 iter->trace = iter->tr->current_trace;
9948 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9949 iter->array_buffer = &global_trace.array_buffer;
9951 if (iter->trace && iter->trace->open)
9952 iter->trace->open(iter);
9954 /* Annotate start of buffers if we had overruns */
9955 if (ring_buffer_overruns(iter->array_buffer->buffer))
9956 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9958 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9959 if (trace_clocks[iter->tr->clock_id].in_ns)
9960 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9962 /* Can not use kmalloc for iter.temp and iter.fmt */
9963 iter->temp = static_temp_buf;
9964 iter->temp_size = STATIC_TEMP_BUF_SIZE;
9965 iter->fmt = static_fmt_buf;
9966 iter->fmt_size = STATIC_FMT_BUF_SIZE;
9969 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9971 /* use static because iter can be a bit big for the stack */
9972 static struct trace_iterator iter;
9973 static atomic_t dump_running;
9974 struct trace_array *tr = &global_trace;
9975 unsigned int old_userobj;
9976 unsigned long flags;
9979 /* Only allow one dump user at a time. */
9980 if (atomic_inc_return(&dump_running) != 1) {
9981 atomic_dec(&dump_running);
9986 * Always turn off tracing when we dump.
9987 * We don't need to show trace output of what happens
9988 * between multiple crashes.
9990 * If the user does a sysrq-z, then they can re-enable
9991 * tracing with echo 1 > tracing_on.
9995 local_irq_save(flags);
9997 /* Simulate the iterator */
9998 trace_init_global_iter(&iter);
10000 for_each_tracing_cpu(cpu) {
10001 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10004 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10006 /* don't look at user memory in panic mode */
10007 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10009 switch (oops_dump_mode) {
10011 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10014 iter.cpu_file = raw_smp_processor_id();
10019 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
10020 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10023 printk(KERN_TRACE "Dumping ftrace buffer:\n");
10025 /* Did function tracer already get disabled? */
10026 if (ftrace_is_dead()) {
10027 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
10028 printk("# MAY BE MISSING FUNCTION EVENTS\n");
10032 * We need to stop all tracing on all CPUS to read
10033 * the next buffer. This is a bit expensive, but is
10034 * not done often. We fill all what we can read,
10035 * and then release the locks again.
10038 while (!trace_empty(&iter)) {
10041 printk(KERN_TRACE "---------------------------------\n");
10045 trace_iterator_reset(&iter);
10046 iter.iter_flags |= TRACE_FILE_LAT_FMT;
10048 if (trace_find_next_entry_inc(&iter) != NULL) {
10051 ret = print_trace_line(&iter);
10052 if (ret != TRACE_TYPE_NO_CONSUME)
10053 trace_consume(&iter);
10055 touch_nmi_watchdog();
10057 trace_printk_seq(&iter.seq);
10061 printk(KERN_TRACE " (ftrace buffer empty)\n");
10063 printk(KERN_TRACE "---------------------------------\n");
10066 tr->trace_flags |= old_userobj;
10068 for_each_tracing_cpu(cpu) {
10069 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10071 atomic_dec(&dump_running);
10072 local_irq_restore(flags);
10074 EXPORT_SYMBOL_GPL(ftrace_dump);
10076 #define WRITE_BUFSIZE 4096
10078 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
10079 size_t count, loff_t *ppos,
10080 int (*createfn)(const char *))
10082 char *kbuf, *buf, *tmp;
10087 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
10091 while (done < count) {
10092 size = count - done;
10094 if (size >= WRITE_BUFSIZE)
10095 size = WRITE_BUFSIZE - 1;
10097 if (copy_from_user(kbuf, buffer + done, size)) {
10104 tmp = strchr(buf, '\n');
10107 size = tmp - buf + 1;
10109 size = strlen(buf);
10110 if (done + size < count) {
10113 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10114 pr_warn("Line length is too long: Should be less than %d\n",
10115 WRITE_BUFSIZE - 2);
10122 /* Remove comments */
10123 tmp = strchr(buf, '#');
10128 ret = createfn(buf);
10133 } while (done < count);
10143 __init static int tracer_alloc_buffers(void)
10149 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10150 pr_warn("Tracing disabled due to lockdown\n");
10155 * Make sure we don't accidentally add more trace options
10156 * than we have bits for.
10158 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10160 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10163 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10164 goto out_free_buffer_mask;
10166 /* Only allocate trace_printk buffers if a trace_printk exists */
10167 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10168 /* Must be called before global_trace.buffer is allocated */
10169 trace_printk_init_buffers();
10171 /* To save memory, keep the ring buffer size to its minimum */
10172 if (ring_buffer_expanded)
10173 ring_buf_size = trace_buf_size;
10177 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10178 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10180 raw_spin_lock_init(&global_trace.start_lock);
10183 * The prepare callbacks allocates some memory for the ring buffer. We
10184 * don't free the buffer if the CPU goes down. If we were to free
10185 * the buffer, then the user would lose any trace that was in the
10186 * buffer. The memory will be removed once the "instance" is removed.
10188 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10189 "trace/RB:prepare", trace_rb_cpu_prepare,
10192 goto out_free_cpumask;
10193 /* Used for event triggers */
10195 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10197 goto out_rm_hp_state;
10199 if (trace_create_savedcmd() < 0)
10200 goto out_free_temp_buffer;
10202 /* TODO: make the number of buffers hot pluggable with CPUS */
10203 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10204 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10205 goto out_free_savedcmd;
10208 if (global_trace.buffer_disabled)
10211 if (trace_boot_clock) {
10212 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10214 pr_warn("Trace clock %s not defined, going back to default\n",
10219 * register_tracer() might reference current_trace, so it
10220 * needs to be set before we register anything. This is
10221 * just a bootstrap of current_trace anyway.
10223 global_trace.current_trace = &nop_trace;
10225 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10227 ftrace_init_global_array_ops(&global_trace);
10229 init_trace_flags_index(&global_trace);
10231 register_tracer(&nop_trace);
10233 /* Function tracing may start here (via kernel command line) */
10234 init_function_trace();
10236 /* All seems OK, enable tracing */
10237 tracing_disabled = 0;
10239 atomic_notifier_chain_register(&panic_notifier_list,
10240 &trace_panic_notifier);
10242 register_die_notifier(&trace_die_notifier);
10244 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10246 INIT_LIST_HEAD(&global_trace.systems);
10247 INIT_LIST_HEAD(&global_trace.events);
10248 INIT_LIST_HEAD(&global_trace.hist_vars);
10249 INIT_LIST_HEAD(&global_trace.err_log);
10250 list_add(&global_trace.list, &ftrace_trace_arrays);
10252 apply_trace_boot_options();
10254 register_snapshot_cmd();
10261 free_saved_cmdlines_buffer(savedcmd);
10262 out_free_temp_buffer:
10263 ring_buffer_free(temp_buffer);
10265 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10267 free_cpumask_var(global_trace.tracing_cpumask);
10268 out_free_buffer_mask:
10269 free_cpumask_var(tracing_buffer_mask);
10274 void __init ftrace_boot_snapshot(void)
10276 if (snapshot_at_boot) {
10277 tracing_snapshot();
10278 internal_trace_puts("** Boot snapshot taken **\n");
10282 void __init early_trace_init(void)
10284 if (tracepoint_printk) {
10285 tracepoint_print_iter =
10286 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10287 if (MEM_FAIL(!tracepoint_print_iter,
10288 "Failed to allocate trace iterator\n"))
10289 tracepoint_printk = 0;
10291 static_key_enable(&tracepoint_printk_key.key);
10293 tracer_alloc_buffers();
10296 void __init trace_init(void)
10298 trace_event_init();
10301 __init static void clear_boot_tracer(void)
10304 * The default tracer at boot buffer is an init section.
10305 * This function is called in lateinit. If we did not
10306 * find the boot tracer, then clear it out, to prevent
10307 * later registration from accessing the buffer that is
10308 * about to be freed.
10310 if (!default_bootup_tracer)
10313 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10314 default_bootup_tracer);
10315 default_bootup_tracer = NULL;
10318 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10319 __init static void tracing_set_default_clock(void)
10321 /* sched_clock_stable() is determined in late_initcall */
10322 if (!trace_boot_clock && !sched_clock_stable()) {
10323 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10324 pr_warn("Can not set tracing clock due to lockdown\n");
10328 printk(KERN_WARNING
10329 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10330 "If you want to keep using the local clock, then add:\n"
10331 " \"trace_clock=local\"\n"
10332 "on the kernel command line\n");
10333 tracing_set_clock(&global_trace, "global");
10337 static inline void tracing_set_default_clock(void) { }
10340 __init static int late_trace_init(void)
10342 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10343 static_key_disable(&tracepoint_printk_key.key);
10344 tracepoint_printk = 0;
10347 tracing_set_default_clock();
10348 clear_boot_tracer();
10352 late_initcall_sync(late_trace_init);