1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/seq_file.h>
21 #include <linux/notifier.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/poll.h>
42 #include <linux/nmi.h>
44 #include <linux/trace.h>
45 #include <linux/sched/clock.h>
46 #include <linux/sched/rt.h>
49 #include "trace_output.h"
52 * On boot up, the ring buffer is set to the minimum size, so that
53 * we do not waste memory on systems that are not using tracing.
55 bool ring_buffer_expanded;
58 * We need to change this state when a selftest is running.
59 * A selftest will lurk into the ring-buffer to count the
60 * entries inserted during the selftest although some concurrent
61 * insertions into the ring-buffer such as trace_printk could occurred
62 * at the same time, giving false positive or negative results.
64 static bool __read_mostly tracing_selftest_running;
67 * If a tracer is running, we do not want to run SELFTEST.
69 bool __read_mostly tracing_selftest_disabled;
71 /* Pipe tracepoints to printk */
72 struct trace_iterator *tracepoint_print_iter;
73 int tracepoint_printk;
74 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
76 /* For tracers that don't implement custom flags */
77 static struct tracer_opt dummy_tracer_opt[] = {
82 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
88 * To prevent the comm cache from being overwritten when no
89 * tracing is active, only save the comm when a trace event
92 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
95 * Kill all tracing for good (never come back).
96 * It is initialized to 1 but will turn to zero if the initialization
97 * of the tracer is successful. But that is the only place that sets
100 static int tracing_disabled = 1;
102 cpumask_var_t __read_mostly tracing_buffer_mask;
105 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
108 * is set, then ftrace_dump is called. This will output the contents
109 * of the ftrace buffers to the console. This is very useful for
110 * capturing traces that lead to crashes and outputing it to a
113 * It is default off, but you can enable it with either specifying
114 * "ftrace_dump_on_oops" in the kernel command line, or setting
115 * /proc/sys/kernel/ftrace_dump_on_oops
116 * Set 1 if you want to dump buffers of all CPUs
117 * Set 2 if you want to dump the buffer of the CPU that triggered oops
120 enum ftrace_dump_mode ftrace_dump_on_oops;
122 /* When set, tracing will stop when a WARN*() is hit */
123 int __disable_trace_on_warning;
125 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
126 /* Map of enums to their values, for "eval_map" file */
127 struct trace_eval_map_head {
129 unsigned long length;
132 union trace_eval_map_item;
134 struct trace_eval_map_tail {
136 * "end" is first and points to NULL as it must be different
137 * than "mod" or "eval_string"
139 union trace_eval_map_item *next;
140 const char *end; /* points to NULL */
143 static DEFINE_MUTEX(trace_eval_mutex);
146 * The trace_eval_maps are saved in an array with two extra elements,
147 * one at the beginning, and one at the end. The beginning item contains
148 * the count of the saved maps (head.length), and the module they
149 * belong to if not built in (head.mod). The ending item contains a
150 * pointer to the next array of saved eval_map items.
152 union trace_eval_map_item {
153 struct trace_eval_map map;
154 struct trace_eval_map_head head;
155 struct trace_eval_map_tail tail;
158 static union trace_eval_map_item *trace_eval_maps;
159 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
161 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
162 static void ftrace_trace_userstack(struct ring_buffer *buffer,
163 unsigned long flags, int pc);
165 #define MAX_TRACER_SIZE 100
166 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
167 static char *default_bootup_tracer;
169 static bool allocate_snapshot;
171 static int __init set_cmdline_ftrace(char *str)
173 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
174 default_bootup_tracer = bootup_tracer_buf;
175 /* We are using ftrace early, expand it */
176 ring_buffer_expanded = true;
179 __setup("ftrace=", set_cmdline_ftrace);
181 static int __init set_ftrace_dump_on_oops(char *str)
183 if (*str++ != '=' || !*str) {
184 ftrace_dump_on_oops = DUMP_ALL;
188 if (!strcmp("orig_cpu", str)) {
189 ftrace_dump_on_oops = DUMP_ORIG;
195 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
197 static int __init stop_trace_on_warning(char *str)
199 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
200 __disable_trace_on_warning = 1;
203 __setup("traceoff_on_warning", stop_trace_on_warning);
205 static int __init boot_alloc_snapshot(char *str)
207 allocate_snapshot = true;
208 /* We also need the main ring buffer expanded */
209 ring_buffer_expanded = true;
212 __setup("alloc_snapshot", boot_alloc_snapshot);
215 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
217 static int __init set_trace_boot_options(char *str)
219 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
222 __setup("trace_options=", set_trace_boot_options);
224 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
225 static char *trace_boot_clock __initdata;
227 static int __init set_trace_boot_clock(char *str)
229 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
230 trace_boot_clock = trace_boot_clock_buf;
233 __setup("trace_clock=", set_trace_boot_clock);
235 static int __init set_tracepoint_printk(char *str)
237 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
238 tracepoint_printk = 1;
241 __setup("tp_printk", set_tracepoint_printk);
243 unsigned long long ns2usecs(u64 nsec)
250 /* trace_flags holds trace_options default values */
251 #define TRACE_DEFAULT_FLAGS \
252 (FUNCTION_DEFAULT_FLAGS | \
253 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
254 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
255 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
256 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
258 /* trace_options that are only supported by global_trace */
259 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
260 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
262 /* trace_flags that are default zero for instances */
263 #define ZEROED_TRACE_FLAGS \
264 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
267 * The global_trace is the descriptor that holds the top-level tracing
268 * buffers for the live tracing.
270 static struct trace_array global_trace = {
271 .trace_flags = TRACE_DEFAULT_FLAGS,
274 LIST_HEAD(ftrace_trace_arrays);
276 int trace_array_get(struct trace_array *this_tr)
278 struct trace_array *tr;
281 mutex_lock(&trace_types_lock);
282 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
289 mutex_unlock(&trace_types_lock);
294 static void __trace_array_put(struct trace_array *this_tr)
296 WARN_ON(!this_tr->ref);
300 void trace_array_put(struct trace_array *this_tr)
302 mutex_lock(&trace_types_lock);
303 __trace_array_put(this_tr);
304 mutex_unlock(&trace_types_lock);
307 int call_filter_check_discard(struct trace_event_call *call, void *rec,
308 struct ring_buffer *buffer,
309 struct ring_buffer_event *event)
311 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
312 !filter_match_preds(call->filter, rec)) {
313 __trace_event_discard_commit(buffer, event);
320 void trace_free_pid_list(struct trace_pid_list *pid_list)
322 vfree(pid_list->pids);
327 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
328 * @filtered_pids: The list of pids to check
329 * @search_pid: The PID to find in @filtered_pids
331 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
334 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
337 * If pid_max changed after filtered_pids was created, we
338 * by default ignore all pids greater than the previous pid_max.
340 if (search_pid >= filtered_pids->pid_max)
343 return test_bit(search_pid, filtered_pids->pids);
347 * trace_ignore_this_task - should a task be ignored for tracing
348 * @filtered_pids: The list of pids to check
349 * @task: The task that should be ignored if not filtered
351 * Checks if @task should be traced or not from @filtered_pids.
352 * Returns true if @task should *NOT* be traced.
353 * Returns false if @task should be traced.
356 trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
359 * Return false, because if filtered_pids does not exist,
360 * all pids are good to trace.
365 return !trace_find_filtered_pid(filtered_pids, task->pid);
369 * trace_filter_add_remove_task - Add or remove a task from a pid_list
370 * @pid_list: The list to modify
371 * @self: The current task for fork or NULL for exit
372 * @task: The task to add or remove
374 * If adding a task, if @self is defined, the task is only added if @self
375 * is also included in @pid_list. This happens on fork and tasks should
376 * only be added when the parent is listed. If @self is NULL, then the
377 * @task pid will be removed from the list, which would happen on exit
380 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
381 struct task_struct *self,
382 struct task_struct *task)
387 /* For forks, we only add if the forking task is listed */
389 if (!trace_find_filtered_pid(pid_list, self->pid))
393 /* Sorry, but we don't support pid_max changing after setting */
394 if (task->pid >= pid_list->pid_max)
397 /* "self" is set for forks, and NULL for exits */
399 set_bit(task->pid, pid_list->pids);
401 clear_bit(task->pid, pid_list->pids);
405 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
406 * @pid_list: The pid list to show
407 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
408 * @pos: The position of the file
410 * This is used by the seq_file "next" operation to iterate the pids
411 * listed in a trace_pid_list structure.
413 * Returns the pid+1 as we want to display pid of zero, but NULL would
414 * stop the iteration.
416 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
418 unsigned long pid = (unsigned long)v;
422 /* pid already is +1 of the actual prevous bit */
423 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
425 /* Return pid + 1 to allow zero to be represented */
426 if (pid < pid_list->pid_max)
427 return (void *)(pid + 1);
433 * trace_pid_start - Used for seq_file to start reading pid lists
434 * @pid_list: The pid list to show
435 * @pos: The position of the file
437 * This is used by seq_file "start" operation to start the iteration
440 * Returns the pid+1 as we want to display pid of zero, but NULL would
441 * stop the iteration.
443 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
448 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
449 if (pid >= pid_list->pid_max)
452 /* Return pid + 1 so that zero can be the exit value */
453 for (pid++; pid && l < *pos;
454 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
460 * trace_pid_show - show the current pid in seq_file processing
461 * @m: The seq_file structure to write into
462 * @v: A void pointer of the pid (+1) value to display
464 * Can be directly used by seq_file operations to display the current
467 int trace_pid_show(struct seq_file *m, void *v)
469 unsigned long pid = (unsigned long)v - 1;
471 seq_printf(m, "%lu\n", pid);
475 /* 128 should be much more than enough */
476 #define PID_BUF_SIZE 127
478 int trace_pid_write(struct trace_pid_list *filtered_pids,
479 struct trace_pid_list **new_pid_list,
480 const char __user *ubuf, size_t cnt)
482 struct trace_pid_list *pid_list;
483 struct trace_parser parser;
491 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
495 * Always recreate a new array. The write is an all or nothing
496 * operation. Always create a new array when adding new pids by
497 * the user. If the operation fails, then the current list is
500 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
502 trace_parser_put(&parser);
506 pid_list->pid_max = READ_ONCE(pid_max);
508 /* Only truncating will shrink pid_max */
509 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
510 pid_list->pid_max = filtered_pids->pid_max;
512 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
513 if (!pid_list->pids) {
514 trace_parser_put(&parser);
520 /* copy the current bits to the new max */
521 for_each_set_bit(pid, filtered_pids->pids,
522 filtered_pids->pid_max) {
523 set_bit(pid, pid_list->pids);
532 ret = trace_get_user(&parser, ubuf, cnt, &pos);
533 if (ret < 0 || !trace_parser_loaded(&parser))
541 if (kstrtoul(parser.buffer, 0, &val))
543 if (val >= pid_list->pid_max)
548 set_bit(pid, pid_list->pids);
551 trace_parser_clear(&parser);
554 trace_parser_put(&parser);
557 trace_free_pid_list(pid_list);
562 /* Cleared the list of pids */
563 trace_free_pid_list(pid_list);
568 *new_pid_list = pid_list;
573 static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
577 /* Early boot up does not have a buffer yet */
579 return trace_clock_local();
581 ts = ring_buffer_time_stamp(buf->buffer, cpu);
582 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
587 u64 ftrace_now(int cpu)
589 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
593 * tracing_is_enabled - Show if global_trace has been disabled
595 * Shows if the global trace has been enabled or not. It uses the
596 * mirror flag "buffer_disabled" to be used in fast paths such as for
597 * the irqsoff tracer. But it may be inaccurate due to races. If you
598 * need to know the accurate state, use tracing_is_on() which is a little
599 * slower, but accurate.
601 int tracing_is_enabled(void)
604 * For quick access (irqsoff uses this in fast path), just
605 * return the mirror variable of the state of the ring buffer.
606 * It's a little racy, but we don't really care.
609 return !global_trace.buffer_disabled;
613 * trace_buf_size is the size in bytes that is allocated
614 * for a buffer. Note, the number of bytes is always rounded
617 * This number is purposely set to a low number of 16384.
618 * If the dump on oops happens, it will be much appreciated
619 * to not have to wait for all that output. Anyway this can be
620 * boot time and run time configurable.
622 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
624 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
626 /* trace_types holds a link list of available tracers. */
627 static struct tracer *trace_types __read_mostly;
630 * trace_types_lock is used to protect the trace_types list.
632 DEFINE_MUTEX(trace_types_lock);
635 * serialize the access of the ring buffer
637 * ring buffer serializes readers, but it is low level protection.
638 * The validity of the events (which returns by ring_buffer_peek() ..etc)
639 * are not protected by ring buffer.
641 * The content of events may become garbage if we allow other process consumes
642 * these events concurrently:
643 * A) the page of the consumed events may become a normal page
644 * (not reader page) in ring buffer, and this page will be rewrited
645 * by events producer.
646 * B) The page of the consumed events may become a page for splice_read,
647 * and this page will be returned to system.
649 * These primitives allow multi process access to different cpu ring buffer
652 * These primitives don't distinguish read-only and read-consume access.
653 * Multi read-only access are also serialized.
657 static DECLARE_RWSEM(all_cpu_access_lock);
658 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
660 static inline void trace_access_lock(int cpu)
662 if (cpu == RING_BUFFER_ALL_CPUS) {
663 /* gain it for accessing the whole ring buffer. */
664 down_write(&all_cpu_access_lock);
666 /* gain it for accessing a cpu ring buffer. */
668 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
669 down_read(&all_cpu_access_lock);
671 /* Secondly block other access to this @cpu ring buffer. */
672 mutex_lock(&per_cpu(cpu_access_lock, cpu));
676 static inline void trace_access_unlock(int cpu)
678 if (cpu == RING_BUFFER_ALL_CPUS) {
679 up_write(&all_cpu_access_lock);
681 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
682 up_read(&all_cpu_access_lock);
686 static inline void trace_access_lock_init(void)
690 for_each_possible_cpu(cpu)
691 mutex_init(&per_cpu(cpu_access_lock, cpu));
696 static DEFINE_MUTEX(access_lock);
698 static inline void trace_access_lock(int cpu)
701 mutex_lock(&access_lock);
704 static inline void trace_access_unlock(int cpu)
707 mutex_unlock(&access_lock);
710 static inline void trace_access_lock_init(void)
716 #ifdef CONFIG_STACKTRACE
717 static void __ftrace_trace_stack(struct ring_buffer *buffer,
719 int skip, int pc, struct pt_regs *regs);
720 static inline void ftrace_trace_stack(struct trace_array *tr,
721 struct ring_buffer *buffer,
723 int skip, int pc, struct pt_regs *regs);
726 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
728 int skip, int pc, struct pt_regs *regs)
731 static inline void ftrace_trace_stack(struct trace_array *tr,
732 struct ring_buffer *buffer,
734 int skip, int pc, struct pt_regs *regs)
740 static __always_inline void
741 trace_event_setup(struct ring_buffer_event *event,
742 int type, unsigned long flags, int pc)
744 struct trace_entry *ent = ring_buffer_event_data(event);
746 tracing_generic_entry_update(ent, flags, pc);
750 static __always_inline struct ring_buffer_event *
751 __trace_buffer_lock_reserve(struct ring_buffer *buffer,
754 unsigned long flags, int pc)
756 struct ring_buffer_event *event;
758 event = ring_buffer_lock_reserve(buffer, len);
760 trace_event_setup(event, type, flags, pc);
765 void tracer_tracing_on(struct trace_array *tr)
767 if (tr->trace_buffer.buffer)
768 ring_buffer_record_on(tr->trace_buffer.buffer);
770 * This flag is looked at when buffers haven't been allocated
771 * yet, or by some tracers (like irqsoff), that just want to
772 * know if the ring buffer has been disabled, but it can handle
773 * races of where it gets disabled but we still do a record.
774 * As the check is in the fast path of the tracers, it is more
775 * important to be fast than accurate.
777 tr->buffer_disabled = 0;
778 /* Make the flag seen by readers */
783 * tracing_on - enable tracing buffers
785 * This function enables tracing buffers that may have been
786 * disabled with tracing_off.
788 void tracing_on(void)
790 tracer_tracing_on(&global_trace);
792 EXPORT_SYMBOL_GPL(tracing_on);
795 static __always_inline void
796 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
798 __this_cpu_write(trace_taskinfo_save, true);
800 /* If this is the temp buffer, we need to commit fully */
801 if (this_cpu_read(trace_buffered_event) == event) {
802 /* Length is in event->array[0] */
803 ring_buffer_write(buffer, event->array[0], &event->array[1]);
804 /* Release the temp buffer */
805 this_cpu_dec(trace_buffered_event_cnt);
807 ring_buffer_unlock_commit(buffer, event);
811 * __trace_puts - write a constant string into the trace buffer.
812 * @ip: The address of the caller
813 * @str: The constant string to write
814 * @size: The size of the string.
816 int __trace_puts(unsigned long ip, const char *str, int size)
818 struct ring_buffer_event *event;
819 struct ring_buffer *buffer;
820 struct print_entry *entry;
821 unsigned long irq_flags;
825 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
828 pc = preempt_count();
830 if (unlikely(tracing_selftest_running || tracing_disabled))
833 alloc = sizeof(*entry) + size + 2; /* possible \n added */
835 local_save_flags(irq_flags);
836 buffer = global_trace.trace_buffer.buffer;
837 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
842 entry = ring_buffer_event_data(event);
845 memcpy(&entry->buf, str, size);
847 /* Add a newline if necessary */
848 if (entry->buf[size - 1] != '\n') {
849 entry->buf[size] = '\n';
850 entry->buf[size + 1] = '\0';
852 entry->buf[size] = '\0';
854 __buffer_unlock_commit(buffer, event);
855 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
859 EXPORT_SYMBOL_GPL(__trace_puts);
862 * __trace_bputs - write the pointer to a constant string into trace buffer
863 * @ip: The address of the caller
864 * @str: The constant string to write to the buffer to
866 int __trace_bputs(unsigned long ip, const char *str)
868 struct ring_buffer_event *event;
869 struct ring_buffer *buffer;
870 struct bputs_entry *entry;
871 unsigned long irq_flags;
872 int size = sizeof(struct bputs_entry);
875 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
878 pc = preempt_count();
880 if (unlikely(tracing_selftest_running || tracing_disabled))
883 local_save_flags(irq_flags);
884 buffer = global_trace.trace_buffer.buffer;
885 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
890 entry = ring_buffer_event_data(event);
894 __buffer_unlock_commit(buffer, event);
895 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
899 EXPORT_SYMBOL_GPL(__trace_bputs);
901 #ifdef CONFIG_TRACER_SNAPSHOT
902 void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data)
904 struct tracer *tracer = tr->current_trace;
908 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
909 internal_trace_puts("*** snapshot is being ignored ***\n");
913 if (!tr->allocated_snapshot) {
914 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
915 internal_trace_puts("*** stopping trace here! ***\n");
920 /* Note, snapshot can not be used when the tracer uses it */
921 if (tracer->use_max_tr) {
922 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
923 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
927 local_irq_save(flags);
928 update_max_tr(tr, current, smp_processor_id(), cond_data);
929 local_irq_restore(flags);
932 void tracing_snapshot_instance(struct trace_array *tr)
934 tracing_snapshot_instance_cond(tr, NULL);
938 * tracing_snapshot - take a snapshot of the current buffer.
940 * This causes a swap between the snapshot buffer and the current live
941 * tracing buffer. You can use this to take snapshots of the live
942 * trace when some condition is triggered, but continue to trace.
944 * Note, make sure to allocate the snapshot with either
945 * a tracing_snapshot_alloc(), or by doing it manually
946 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
948 * If the snapshot buffer is not allocated, it will stop tracing.
949 * Basically making a permanent snapshot.
951 void tracing_snapshot(void)
953 struct trace_array *tr = &global_trace;
955 tracing_snapshot_instance(tr);
957 EXPORT_SYMBOL_GPL(tracing_snapshot);
960 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
961 * @tr: The tracing instance to snapshot
962 * @cond_data: The data to be tested conditionally, and possibly saved
964 * This is the same as tracing_snapshot() except that the snapshot is
965 * conditional - the snapshot will only happen if the
966 * cond_snapshot.update() implementation receiving the cond_data
967 * returns true, which means that the trace array's cond_snapshot
968 * update() operation used the cond_data to determine whether the
969 * snapshot should be taken, and if it was, presumably saved it along
972 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
974 tracing_snapshot_instance_cond(tr, cond_data);
976 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
979 * tracing_snapshot_cond_data - get the user data associated with a snapshot
980 * @tr: The tracing instance
982 * When the user enables a conditional snapshot using
983 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
984 * with the snapshot. This accessor is used to retrieve it.
986 * Should not be called from cond_snapshot.update(), since it takes
987 * the tr->max_lock lock, which the code calling
988 * cond_snapshot.update() has already done.
990 * Returns the cond_data associated with the trace array's snapshot.
992 void *tracing_cond_snapshot_data(struct trace_array *tr)
994 void *cond_data = NULL;
996 arch_spin_lock(&tr->max_lock);
998 if (tr->cond_snapshot)
999 cond_data = tr->cond_snapshot->cond_data;
1001 arch_spin_unlock(&tr->max_lock);
1005 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1007 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
1008 struct trace_buffer *size_buf, int cpu_id);
1009 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
1011 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1015 if (!tr->allocated_snapshot) {
1017 /* allocate spare buffer */
1018 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1019 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
1023 tr->allocated_snapshot = true;
1029 static void free_snapshot(struct trace_array *tr)
1032 * We don't free the ring buffer. instead, resize it because
1033 * The max_tr ring buffer has some state (e.g. ring->clock) and
1034 * we want preserve it.
1036 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1037 set_buffer_entries(&tr->max_buffer, 1);
1038 tracing_reset_online_cpus(&tr->max_buffer);
1039 tr->allocated_snapshot = false;
1043 * tracing_alloc_snapshot - allocate snapshot buffer.
1045 * This only allocates the snapshot buffer if it isn't already
1046 * allocated - it doesn't also take a snapshot.
1048 * This is meant to be used in cases where the snapshot buffer needs
1049 * to be set up for events that can't sleep but need to be able to
1050 * trigger a snapshot.
1052 int tracing_alloc_snapshot(void)
1054 struct trace_array *tr = &global_trace;
1057 ret = tracing_alloc_snapshot_instance(tr);
1062 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1065 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1067 * This is similar to tracing_snapshot(), but it will allocate the
1068 * snapshot buffer if it isn't already allocated. Use this only
1069 * where it is safe to sleep, as the allocation may sleep.
1071 * This causes a swap between the snapshot buffer and the current live
1072 * tracing buffer. You can use this to take snapshots of the live
1073 * trace when some condition is triggered, but continue to trace.
1075 void tracing_snapshot_alloc(void)
1079 ret = tracing_alloc_snapshot();
1085 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1088 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1089 * @tr: The tracing instance
1090 * @cond_data: User data to associate with the snapshot
1091 * @update: Implementation of the cond_snapshot update function
1093 * Check whether the conditional snapshot for the given instance has
1094 * already been enabled, or if the current tracer is already using a
1095 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1096 * save the cond_data and update function inside.
1098 * Returns 0 if successful, error otherwise.
1100 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1101 cond_update_fn_t update)
1103 struct cond_snapshot *cond_snapshot;
1106 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1110 cond_snapshot->cond_data = cond_data;
1111 cond_snapshot->update = update;
1113 mutex_lock(&trace_types_lock);
1115 ret = tracing_alloc_snapshot_instance(tr);
1119 if (tr->current_trace->use_max_tr) {
1125 * The cond_snapshot can only change to NULL without the
1126 * trace_types_lock. We don't care if we race with it going
1127 * to NULL, but we want to make sure that it's not set to
1128 * something other than NULL when we get here, which we can
1129 * do safely with only holding the trace_types_lock and not
1130 * having to take the max_lock.
1132 if (tr->cond_snapshot) {
1137 arch_spin_lock(&tr->max_lock);
1138 tr->cond_snapshot = cond_snapshot;
1139 arch_spin_unlock(&tr->max_lock);
1141 mutex_unlock(&trace_types_lock);
1146 mutex_unlock(&trace_types_lock);
1147 kfree(cond_snapshot);
1150 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1153 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1154 * @tr: The tracing instance
1156 * Check whether the conditional snapshot for the given instance is
1157 * enabled; if so, free the cond_snapshot associated with it,
1158 * otherwise return -EINVAL.
1160 * Returns 0 if successful, error otherwise.
1162 int tracing_snapshot_cond_disable(struct trace_array *tr)
1166 arch_spin_lock(&tr->max_lock);
1168 if (!tr->cond_snapshot)
1171 kfree(tr->cond_snapshot);
1172 tr->cond_snapshot = NULL;
1175 arch_spin_unlock(&tr->max_lock);
1179 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1181 void tracing_snapshot(void)
1183 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1185 EXPORT_SYMBOL_GPL(tracing_snapshot);
1186 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1188 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1190 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1191 int tracing_alloc_snapshot(void)
1193 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1196 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1197 void tracing_snapshot_alloc(void)
1202 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1203 void *tracing_cond_snapshot_data(struct trace_array *tr)
1207 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1208 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1212 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1213 int tracing_snapshot_cond_disable(struct trace_array *tr)
1217 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1218 #endif /* CONFIG_TRACER_SNAPSHOT */
1220 void tracer_tracing_off(struct trace_array *tr)
1222 if (tr->trace_buffer.buffer)
1223 ring_buffer_record_off(tr->trace_buffer.buffer);
1225 * This flag is looked at when buffers haven't been allocated
1226 * yet, or by some tracers (like irqsoff), that just want to
1227 * know if the ring buffer has been disabled, but it can handle
1228 * races of where it gets disabled but we still do a record.
1229 * As the check is in the fast path of the tracers, it is more
1230 * important to be fast than accurate.
1232 tr->buffer_disabled = 1;
1233 /* Make the flag seen by readers */
1238 * tracing_off - turn off tracing buffers
1240 * This function stops the tracing buffers from recording data.
1241 * It does not disable any overhead the tracers themselves may
1242 * be causing. This function simply causes all recording to
1243 * the ring buffers to fail.
1245 void tracing_off(void)
1247 tracer_tracing_off(&global_trace);
1249 EXPORT_SYMBOL_GPL(tracing_off);
1251 void disable_trace_on_warning(void)
1253 if (__disable_trace_on_warning)
1258 * tracer_tracing_is_on - show real state of ring buffer enabled
1259 * @tr : the trace array to know if ring buffer is enabled
1261 * Shows real state of the ring buffer if it is enabled or not.
1263 bool tracer_tracing_is_on(struct trace_array *tr)
1265 if (tr->trace_buffer.buffer)
1266 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1267 return !tr->buffer_disabled;
1271 * tracing_is_on - show state of ring buffers enabled
1273 int tracing_is_on(void)
1275 return tracer_tracing_is_on(&global_trace);
1277 EXPORT_SYMBOL_GPL(tracing_is_on);
1279 static int __init set_buf_size(char *str)
1281 unsigned long buf_size;
1285 buf_size = memparse(str, &str);
1286 /* nr_entries can not be zero */
1289 trace_buf_size = buf_size;
1292 __setup("trace_buf_size=", set_buf_size);
1294 static int __init set_tracing_thresh(char *str)
1296 unsigned long threshold;
1301 ret = kstrtoul(str, 0, &threshold);
1304 tracing_thresh = threshold * 1000;
1307 __setup("tracing_thresh=", set_tracing_thresh);
1309 unsigned long nsecs_to_usecs(unsigned long nsecs)
1311 return nsecs / 1000;
1315 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1316 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1317 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1318 * of strings in the order that the evals (enum) were defined.
1323 /* These must match the bit postions in trace_iterator_flags */
1324 static const char *trace_options[] = {
1332 int in_ns; /* is this clock in nanoseconds? */
1333 } trace_clocks[] = {
1334 { trace_clock_local, "local", 1 },
1335 { trace_clock_global, "global", 1 },
1336 { trace_clock_counter, "counter", 0 },
1337 { trace_clock_jiffies, "uptime", 0 },
1338 { trace_clock, "perf", 1 },
1339 { ktime_get_mono_fast_ns, "mono", 1 },
1340 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1341 { ktime_get_boot_fast_ns, "boot", 1 },
1345 bool trace_clock_in_ns(struct trace_array *tr)
1347 if (trace_clocks[tr->clock_id].in_ns)
1354 * trace_parser_get_init - gets the buffer for trace parser
1356 int trace_parser_get_init(struct trace_parser *parser, int size)
1358 memset(parser, 0, sizeof(*parser));
1360 parser->buffer = kmalloc(size, GFP_KERNEL);
1361 if (!parser->buffer)
1364 parser->size = size;
1369 * trace_parser_put - frees the buffer for trace parser
1371 void trace_parser_put(struct trace_parser *parser)
1373 kfree(parser->buffer);
1374 parser->buffer = NULL;
1378 * trace_get_user - reads the user input string separated by space
1379 * (matched by isspace(ch))
1381 * For each string found the 'struct trace_parser' is updated,
1382 * and the function returns.
1384 * Returns number of bytes read.
1386 * See kernel/trace/trace.h for 'struct trace_parser' details.
1388 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1389 size_t cnt, loff_t *ppos)
1396 trace_parser_clear(parser);
1398 ret = get_user(ch, ubuf++);
1406 * The parser is not finished with the last write,
1407 * continue reading the user input without skipping spaces.
1409 if (!parser->cont) {
1410 /* skip white space */
1411 while (cnt && isspace(ch)) {
1412 ret = get_user(ch, ubuf++);
1421 /* only spaces were written */
1422 if (isspace(ch) || !ch) {
1429 /* read the non-space input */
1430 while (cnt && !isspace(ch) && ch) {
1431 if (parser->idx < parser->size - 1)
1432 parser->buffer[parser->idx++] = ch;
1437 ret = get_user(ch, ubuf++);
1444 /* We either got finished input or we have to wait for another call. */
1445 if (isspace(ch) || !ch) {
1446 parser->buffer[parser->idx] = 0;
1447 parser->cont = false;
1448 } else if (parser->idx < parser->size - 1) {
1449 parser->cont = true;
1450 parser->buffer[parser->idx++] = ch;
1451 /* Make sure the parsed string always terminates with '\0'. */
1452 parser->buffer[parser->idx] = 0;
1465 /* TODO add a seq_buf_to_buffer() */
1466 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1470 if (trace_seq_used(s) <= s->seq.readpos)
1473 len = trace_seq_used(s) - s->seq.readpos;
1476 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1478 s->seq.readpos += cnt;
1482 unsigned long __read_mostly tracing_thresh;
1484 #ifdef CONFIG_TRACER_MAX_TRACE
1486 * Copy the new maximum trace into the separate maximum-trace
1487 * structure. (this way the maximum trace is permanently saved,
1488 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1491 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1493 struct trace_buffer *trace_buf = &tr->trace_buffer;
1494 struct trace_buffer *max_buf = &tr->max_buffer;
1495 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1496 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1499 max_buf->time_start = data->preempt_timestamp;
1501 max_data->saved_latency = tr->max_latency;
1502 max_data->critical_start = data->critical_start;
1503 max_data->critical_end = data->critical_end;
1505 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1506 max_data->pid = tsk->pid;
1508 * If tsk == current, then use current_uid(), as that does not use
1509 * RCU. The irq tracer can be called out of RCU scope.
1512 max_data->uid = current_uid();
1514 max_data->uid = task_uid(tsk);
1516 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1517 max_data->policy = tsk->policy;
1518 max_data->rt_priority = tsk->rt_priority;
1520 /* record this tasks comm */
1521 tracing_record_cmdline(tsk);
1525 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1527 * @tsk: the task with the latency
1528 * @cpu: The cpu that initiated the trace.
1529 * @cond_data: User data associated with a conditional snapshot
1531 * Flip the buffers between the @tr and the max_tr and record information
1532 * about which task was the cause of this latency.
1535 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1541 WARN_ON_ONCE(!irqs_disabled());
1543 if (!tr->allocated_snapshot) {
1544 /* Only the nop tracer should hit this when disabling */
1545 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1549 arch_spin_lock(&tr->max_lock);
1551 /* Inherit the recordable setting from trace_buffer */
1552 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1553 ring_buffer_record_on(tr->max_buffer.buffer);
1555 ring_buffer_record_off(tr->max_buffer.buffer);
1557 #ifdef CONFIG_TRACER_SNAPSHOT
1558 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1561 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
1563 __update_max_tr(tr, tsk, cpu);
1566 arch_spin_unlock(&tr->max_lock);
1570 * update_max_tr_single - only copy one trace over, and reset the rest
1572 * @tsk - task with the latency
1573 * @cpu - the cpu of the buffer to copy.
1575 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1578 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1585 WARN_ON_ONCE(!irqs_disabled());
1586 if (!tr->allocated_snapshot) {
1587 /* Only the nop tracer should hit this when disabling */
1588 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1592 arch_spin_lock(&tr->max_lock);
1594 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1596 if (ret == -EBUSY) {
1598 * We failed to swap the buffer due to a commit taking
1599 * place on this CPU. We fail to record, but we reset
1600 * the max trace buffer (no one writes directly to it)
1601 * and flag that it failed.
1603 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1604 "Failed to swap buffers due to commit in progress\n");
1607 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1609 __update_max_tr(tr, tsk, cpu);
1610 arch_spin_unlock(&tr->max_lock);
1612 #endif /* CONFIG_TRACER_MAX_TRACE */
1614 static int wait_on_pipe(struct trace_iterator *iter, int full)
1616 /* Iterators are static, they should be filled or empty */
1617 if (trace_buffer_iter(iter, iter->cpu_file))
1620 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1624 #ifdef CONFIG_FTRACE_STARTUP_TEST
1625 static bool selftests_can_run;
1627 struct trace_selftests {
1628 struct list_head list;
1629 struct tracer *type;
1632 static LIST_HEAD(postponed_selftests);
1634 static int save_selftest(struct tracer *type)
1636 struct trace_selftests *selftest;
1638 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1642 selftest->type = type;
1643 list_add(&selftest->list, &postponed_selftests);
1647 static int run_tracer_selftest(struct tracer *type)
1649 struct trace_array *tr = &global_trace;
1650 struct tracer *saved_tracer = tr->current_trace;
1653 if (!type->selftest || tracing_selftest_disabled)
1657 * If a tracer registers early in boot up (before scheduling is
1658 * initialized and such), then do not run its selftests yet.
1659 * Instead, run it a little later in the boot process.
1661 if (!selftests_can_run)
1662 return save_selftest(type);
1665 * Run a selftest on this tracer.
1666 * Here we reset the trace buffer, and set the current
1667 * tracer to be this tracer. The tracer can then run some
1668 * internal tracing to verify that everything is in order.
1669 * If we fail, we do not register this tracer.
1671 tracing_reset_online_cpus(&tr->trace_buffer);
1673 tr->current_trace = type;
1675 #ifdef CONFIG_TRACER_MAX_TRACE
1676 if (type->use_max_tr) {
1677 /* If we expanded the buffers, make sure the max is expanded too */
1678 if (ring_buffer_expanded)
1679 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1680 RING_BUFFER_ALL_CPUS);
1681 tr->allocated_snapshot = true;
1685 /* the test is responsible for initializing and enabling */
1686 pr_info("Testing tracer %s: ", type->name);
1687 ret = type->selftest(type, tr);
1688 /* the test is responsible for resetting too */
1689 tr->current_trace = saved_tracer;
1691 printk(KERN_CONT "FAILED!\n");
1692 /* Add the warning after printing 'FAILED' */
1696 /* Only reset on passing, to avoid touching corrupted buffers */
1697 tracing_reset_online_cpus(&tr->trace_buffer);
1699 #ifdef CONFIG_TRACER_MAX_TRACE
1700 if (type->use_max_tr) {
1701 tr->allocated_snapshot = false;
1703 /* Shrink the max buffer again */
1704 if (ring_buffer_expanded)
1705 ring_buffer_resize(tr->max_buffer.buffer, 1,
1706 RING_BUFFER_ALL_CPUS);
1710 printk(KERN_CONT "PASSED\n");
1714 static __init int init_trace_selftests(void)
1716 struct trace_selftests *p, *n;
1717 struct tracer *t, **last;
1720 selftests_can_run = true;
1722 mutex_lock(&trace_types_lock);
1724 if (list_empty(&postponed_selftests))
1727 pr_info("Running postponed tracer tests:\n");
1729 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1730 /* This loop can take minutes when sanitizers are enabled, so
1731 * lets make sure we allow RCU processing.
1734 ret = run_tracer_selftest(p->type);
1735 /* If the test fails, then warn and remove from available_tracers */
1737 WARN(1, "tracer: %s failed selftest, disabling\n",
1739 last = &trace_types;
1740 for (t = trace_types; t; t = t->next) {
1753 mutex_unlock(&trace_types_lock);
1757 core_initcall(init_trace_selftests);
1759 static inline int run_tracer_selftest(struct tracer *type)
1763 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1765 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1767 static void __init apply_trace_boot_options(void);
1770 * register_tracer - register a tracer with the ftrace system.
1771 * @type - the plugin for the tracer
1773 * Register a new plugin tracer.
1775 int __init register_tracer(struct tracer *type)
1781 pr_info("Tracer must have a name\n");
1785 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1786 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1790 mutex_lock(&trace_types_lock);
1792 tracing_selftest_running = true;
1794 for (t = trace_types; t; t = t->next) {
1795 if (strcmp(type->name, t->name) == 0) {
1797 pr_info("Tracer %s already registered\n",
1804 if (!type->set_flag)
1805 type->set_flag = &dummy_set_flag;
1807 /*allocate a dummy tracer_flags*/
1808 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1813 type->flags->val = 0;
1814 type->flags->opts = dummy_tracer_opt;
1816 if (!type->flags->opts)
1817 type->flags->opts = dummy_tracer_opt;
1819 /* store the tracer for __set_tracer_option */
1820 type->flags->trace = type;
1822 ret = run_tracer_selftest(type);
1826 type->next = trace_types;
1828 add_tracer_options(&global_trace, type);
1831 tracing_selftest_running = false;
1832 mutex_unlock(&trace_types_lock);
1834 if (ret || !default_bootup_tracer)
1837 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1840 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1841 /* Do we want this tracer to start on bootup? */
1842 tracing_set_tracer(&global_trace, type->name);
1843 default_bootup_tracer = NULL;
1845 apply_trace_boot_options();
1847 /* disable other selftests, since this will break it. */
1848 tracing_selftest_disabled = true;
1849 #ifdef CONFIG_FTRACE_STARTUP_TEST
1850 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1858 void tracing_reset(struct trace_buffer *buf, int cpu)
1860 struct ring_buffer *buffer = buf->buffer;
1865 ring_buffer_record_disable(buffer);
1867 /* Make sure all commits have finished */
1869 ring_buffer_reset_cpu(buffer, cpu);
1871 ring_buffer_record_enable(buffer);
1874 void tracing_reset_online_cpus(struct trace_buffer *buf)
1876 struct ring_buffer *buffer = buf->buffer;
1882 ring_buffer_record_disable(buffer);
1884 /* Make sure all commits have finished */
1887 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1889 for_each_online_cpu(cpu)
1890 ring_buffer_reset_cpu(buffer, cpu);
1892 ring_buffer_record_enable(buffer);
1895 /* Must have trace_types_lock held */
1896 void tracing_reset_all_online_cpus(void)
1898 struct trace_array *tr;
1900 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1901 if (!tr->clear_trace)
1903 tr->clear_trace = false;
1904 tracing_reset_online_cpus(&tr->trace_buffer);
1905 #ifdef CONFIG_TRACER_MAX_TRACE
1906 tracing_reset_online_cpus(&tr->max_buffer);
1911 static int *tgid_map;
1913 #define SAVED_CMDLINES_DEFAULT 128
1914 #define NO_CMDLINE_MAP UINT_MAX
1915 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1916 struct saved_cmdlines_buffer {
1917 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1918 unsigned *map_cmdline_to_pid;
1919 unsigned cmdline_num;
1921 char *saved_cmdlines;
1923 static struct saved_cmdlines_buffer *savedcmd;
1925 /* temporary disable recording */
1926 static atomic_t trace_record_taskinfo_disabled __read_mostly;
1928 static inline char *get_saved_cmdlines(int idx)
1930 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1933 static inline void set_cmdline(int idx, const char *cmdline)
1935 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1938 static int allocate_cmdlines_buffer(unsigned int val,
1939 struct saved_cmdlines_buffer *s)
1941 s->map_cmdline_to_pid = kmalloc_array(val,
1942 sizeof(*s->map_cmdline_to_pid),
1944 if (!s->map_cmdline_to_pid)
1947 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
1948 if (!s->saved_cmdlines) {
1949 kfree(s->map_cmdline_to_pid);
1954 s->cmdline_num = val;
1955 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1956 sizeof(s->map_pid_to_cmdline));
1957 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1958 val * sizeof(*s->map_cmdline_to_pid));
1963 static int trace_create_savedcmd(void)
1967 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1971 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1981 int is_tracing_stopped(void)
1983 return global_trace.stop_count;
1987 * tracing_start - quick start of the tracer
1989 * If tracing is enabled but was stopped by tracing_stop,
1990 * this will start the tracer back up.
1992 void tracing_start(void)
1994 struct ring_buffer *buffer;
1995 unsigned long flags;
1997 if (tracing_disabled)
2000 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2001 if (--global_trace.stop_count) {
2002 if (global_trace.stop_count < 0) {
2003 /* Someone screwed up their debugging */
2005 global_trace.stop_count = 0;
2010 /* Prevent the buffers from switching */
2011 arch_spin_lock(&global_trace.max_lock);
2013 buffer = global_trace.trace_buffer.buffer;
2015 ring_buffer_record_enable(buffer);
2017 #ifdef CONFIG_TRACER_MAX_TRACE
2018 buffer = global_trace.max_buffer.buffer;
2020 ring_buffer_record_enable(buffer);
2023 arch_spin_unlock(&global_trace.max_lock);
2026 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2029 static void tracing_start_tr(struct trace_array *tr)
2031 struct ring_buffer *buffer;
2032 unsigned long flags;
2034 if (tracing_disabled)
2037 /* If global, we need to also start the max tracer */
2038 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2039 return tracing_start();
2041 raw_spin_lock_irqsave(&tr->start_lock, flags);
2043 if (--tr->stop_count) {
2044 if (tr->stop_count < 0) {
2045 /* Someone screwed up their debugging */
2052 buffer = tr->trace_buffer.buffer;
2054 ring_buffer_record_enable(buffer);
2057 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2061 * tracing_stop - quick stop of the tracer
2063 * Light weight way to stop tracing. Use in conjunction with
2066 void tracing_stop(void)
2068 struct ring_buffer *buffer;
2069 unsigned long flags;
2071 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2072 if (global_trace.stop_count++)
2075 /* Prevent the buffers from switching */
2076 arch_spin_lock(&global_trace.max_lock);
2078 buffer = global_trace.trace_buffer.buffer;
2080 ring_buffer_record_disable(buffer);
2082 #ifdef CONFIG_TRACER_MAX_TRACE
2083 buffer = global_trace.max_buffer.buffer;
2085 ring_buffer_record_disable(buffer);
2088 arch_spin_unlock(&global_trace.max_lock);
2091 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2094 static void tracing_stop_tr(struct trace_array *tr)
2096 struct ring_buffer *buffer;
2097 unsigned long flags;
2099 /* If global, we need to also stop the max tracer */
2100 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2101 return tracing_stop();
2103 raw_spin_lock_irqsave(&tr->start_lock, flags);
2104 if (tr->stop_count++)
2107 buffer = tr->trace_buffer.buffer;
2109 ring_buffer_record_disable(buffer);
2112 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2115 static int trace_save_cmdline(struct task_struct *tsk)
2119 /* treat recording of idle task as a success */
2123 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
2127 * It's not the end of the world if we don't get
2128 * the lock, but we also don't want to spin
2129 * nor do we want to disable interrupts,
2130 * so if we miss here, then better luck next time.
2132 if (!arch_spin_trylock(&trace_cmdline_lock))
2135 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2136 if (idx == NO_CMDLINE_MAP) {
2137 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2140 * Check whether the cmdline buffer at idx has a pid
2141 * mapped. We are going to overwrite that entry so we
2142 * need to clear the map_pid_to_cmdline. Otherwise we
2143 * would read the new comm for the old pid.
2145 pid = savedcmd->map_cmdline_to_pid[idx];
2146 if (pid != NO_CMDLINE_MAP)
2147 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
2149 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2150 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
2152 savedcmd->cmdline_idx = idx;
2155 set_cmdline(idx, tsk->comm);
2157 arch_spin_unlock(&trace_cmdline_lock);
2162 static void __trace_find_cmdline(int pid, char comm[])
2167 strcpy(comm, "<idle>");
2171 if (WARN_ON_ONCE(pid < 0)) {
2172 strcpy(comm, "<XXX>");
2176 if (pid > PID_MAX_DEFAULT) {
2177 strcpy(comm, "<...>");
2181 map = savedcmd->map_pid_to_cmdline[pid];
2182 if (map != NO_CMDLINE_MAP)
2183 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2185 strcpy(comm, "<...>");
2188 void trace_find_cmdline(int pid, char comm[])
2191 arch_spin_lock(&trace_cmdline_lock);
2193 __trace_find_cmdline(pid, comm);
2195 arch_spin_unlock(&trace_cmdline_lock);
2199 int trace_find_tgid(int pid)
2201 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2204 return tgid_map[pid];
2207 static int trace_save_tgid(struct task_struct *tsk)
2209 /* treat recording of idle task as a success */
2213 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2216 tgid_map[tsk->pid] = tsk->tgid;
2220 static bool tracing_record_taskinfo_skip(int flags)
2222 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2224 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2226 if (!__this_cpu_read(trace_taskinfo_save))
2232 * tracing_record_taskinfo - record the task info of a task
2234 * @task - task to record
2235 * @flags - TRACE_RECORD_CMDLINE for recording comm
2236 * - TRACE_RECORD_TGID for recording tgid
2238 void tracing_record_taskinfo(struct task_struct *task, int flags)
2242 if (tracing_record_taskinfo_skip(flags))
2246 * Record as much task information as possible. If some fail, continue
2247 * to try to record the others.
2249 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2250 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2252 /* If recording any information failed, retry again soon. */
2256 __this_cpu_write(trace_taskinfo_save, false);
2260 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2262 * @prev - previous task during sched_switch
2263 * @next - next task during sched_switch
2264 * @flags - TRACE_RECORD_CMDLINE for recording comm
2265 * TRACE_RECORD_TGID for recording tgid
2267 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2268 struct task_struct *next, int flags)
2272 if (tracing_record_taskinfo_skip(flags))
2276 * Record as much task information as possible. If some fail, continue
2277 * to try to record the others.
2279 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2280 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2281 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2282 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2284 /* If recording any information failed, retry again soon. */
2288 __this_cpu_write(trace_taskinfo_save, false);
2291 /* Helpers to record a specific task information */
2292 void tracing_record_cmdline(struct task_struct *task)
2294 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2297 void tracing_record_tgid(struct task_struct *task)
2299 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2303 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2304 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2305 * simplifies those functions and keeps them in sync.
2307 enum print_line_t trace_handle_return(struct trace_seq *s)
2309 return trace_seq_has_overflowed(s) ?
2310 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2312 EXPORT_SYMBOL_GPL(trace_handle_return);
2315 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2318 struct task_struct *tsk = current;
2320 entry->preempt_count = pc & 0xff;
2321 entry->pid = (tsk) ? tsk->pid : 0;
2323 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2324 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2326 TRACE_FLAG_IRQS_NOSUPPORT |
2328 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2329 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2330 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2331 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2332 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2334 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2336 struct ring_buffer_event *
2337 trace_buffer_lock_reserve(struct ring_buffer *buffer,
2340 unsigned long flags, int pc)
2342 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2345 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2346 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2347 static int trace_buffered_event_ref;
2350 * trace_buffered_event_enable - enable buffering events
2352 * When events are being filtered, it is quicker to use a temporary
2353 * buffer to write the event data into if there's a likely chance
2354 * that it will not be committed. The discard of the ring buffer
2355 * is not as fast as committing, and is much slower than copying
2358 * When an event is to be filtered, allocate per cpu buffers to
2359 * write the event data into, and if the event is filtered and discarded
2360 * it is simply dropped, otherwise, the entire data is to be committed
2363 void trace_buffered_event_enable(void)
2365 struct ring_buffer_event *event;
2369 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2371 if (trace_buffered_event_ref++)
2374 for_each_tracing_cpu(cpu) {
2375 page = alloc_pages_node(cpu_to_node(cpu),
2376 GFP_KERNEL | __GFP_NORETRY, 0);
2380 event = page_address(page);
2381 memset(event, 0, sizeof(*event));
2383 per_cpu(trace_buffered_event, cpu) = event;
2386 if (cpu == smp_processor_id() &&
2387 this_cpu_read(trace_buffered_event) !=
2388 per_cpu(trace_buffered_event, cpu))
2395 trace_buffered_event_disable();
2398 static void enable_trace_buffered_event(void *data)
2400 /* Probably not needed, but do it anyway */
2402 this_cpu_dec(trace_buffered_event_cnt);
2405 static void disable_trace_buffered_event(void *data)
2407 this_cpu_inc(trace_buffered_event_cnt);
2411 * trace_buffered_event_disable - disable buffering events
2413 * When a filter is removed, it is faster to not use the buffered
2414 * events, and to commit directly into the ring buffer. Free up
2415 * the temp buffers when there are no more users. This requires
2416 * special synchronization with current events.
2418 void trace_buffered_event_disable(void)
2422 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2424 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2427 if (--trace_buffered_event_ref)
2431 /* For each CPU, set the buffer as used. */
2432 smp_call_function_many(tracing_buffer_mask,
2433 disable_trace_buffered_event, NULL, 1);
2436 /* Wait for all current users to finish */
2439 for_each_tracing_cpu(cpu) {
2440 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2441 per_cpu(trace_buffered_event, cpu) = NULL;
2444 * Make sure trace_buffered_event is NULL before clearing
2445 * trace_buffered_event_cnt.
2450 /* Do the work on each cpu */
2451 smp_call_function_many(tracing_buffer_mask,
2452 enable_trace_buffered_event, NULL, 1);
2456 static struct ring_buffer *temp_buffer;
2458 struct ring_buffer_event *
2459 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
2460 struct trace_event_file *trace_file,
2461 int type, unsigned long len,
2462 unsigned long flags, int pc)
2464 struct ring_buffer_event *entry;
2467 *current_rb = trace_file->tr->trace_buffer.buffer;
2469 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2470 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2471 (entry = this_cpu_read(trace_buffered_event))) {
2472 /* Try to use the per cpu buffer first */
2473 val = this_cpu_inc_return(trace_buffered_event_cnt);
2475 trace_event_setup(entry, type, flags, pc);
2476 entry->array[0] = len;
2479 this_cpu_dec(trace_buffered_event_cnt);
2482 entry = __trace_buffer_lock_reserve(*current_rb,
2483 type, len, flags, pc);
2485 * If tracing is off, but we have triggers enabled
2486 * we still need to look at the event data. Use the temp_buffer
2487 * to store the trace event for the tigger to use. It's recusive
2488 * safe and will not be recorded anywhere.
2490 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2491 *current_rb = temp_buffer;
2492 entry = __trace_buffer_lock_reserve(*current_rb,
2493 type, len, flags, pc);
2497 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2499 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2500 static DEFINE_MUTEX(tracepoint_printk_mutex);
2502 static void output_printk(struct trace_event_buffer *fbuffer)
2504 struct trace_event_call *event_call;
2505 struct trace_event *event;
2506 unsigned long flags;
2507 struct trace_iterator *iter = tracepoint_print_iter;
2509 /* We should never get here if iter is NULL */
2510 if (WARN_ON_ONCE(!iter))
2513 event_call = fbuffer->trace_file->event_call;
2514 if (!event_call || !event_call->event.funcs ||
2515 !event_call->event.funcs->trace)
2518 event = &fbuffer->trace_file->event_call->event;
2520 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2521 trace_seq_init(&iter->seq);
2522 iter->ent = fbuffer->entry;
2523 event_call->event.funcs->trace(iter, 0, event);
2524 trace_seq_putc(&iter->seq, 0);
2525 printk("%s", iter->seq.buffer);
2527 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2530 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2531 void __user *buffer, size_t *lenp,
2534 int save_tracepoint_printk;
2537 mutex_lock(&tracepoint_printk_mutex);
2538 save_tracepoint_printk = tracepoint_printk;
2540 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2543 * This will force exiting early, as tracepoint_printk
2544 * is always zero when tracepoint_printk_iter is not allocated
2546 if (!tracepoint_print_iter)
2547 tracepoint_printk = 0;
2549 if (save_tracepoint_printk == tracepoint_printk)
2552 if (tracepoint_printk)
2553 static_key_enable(&tracepoint_printk_key.key);
2555 static_key_disable(&tracepoint_printk_key.key);
2558 mutex_unlock(&tracepoint_printk_mutex);
2563 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2565 if (static_key_false(&tracepoint_printk_key.key))
2566 output_printk(fbuffer);
2568 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2569 fbuffer->event, fbuffer->entry,
2570 fbuffer->flags, fbuffer->pc);
2572 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2577 * trace_buffer_unlock_commit_regs()
2578 * trace_event_buffer_commit()
2579 * trace_event_raw_event_xxx()
2581 # define STACK_SKIP 3
2583 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2584 struct ring_buffer *buffer,
2585 struct ring_buffer_event *event,
2586 unsigned long flags, int pc,
2587 struct pt_regs *regs)
2589 __buffer_unlock_commit(buffer, event);
2592 * If regs is not set, then skip the necessary functions.
2593 * Note, we can still get here via blktrace, wakeup tracer
2594 * and mmiotrace, but that's ok if they lose a function or
2595 * two. They are not that meaningful.
2597 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2598 ftrace_trace_userstack(buffer, flags, pc);
2602 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2605 trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2606 struct ring_buffer_event *event)
2608 __buffer_unlock_commit(buffer, event);
2612 trace_process_export(struct trace_export *export,
2613 struct ring_buffer_event *event)
2615 struct trace_entry *entry;
2616 unsigned int size = 0;
2618 entry = ring_buffer_event_data(event);
2619 size = ring_buffer_event_length(event);
2620 export->write(export, entry, size);
2623 static DEFINE_MUTEX(ftrace_export_lock);
2625 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2627 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2629 static inline void ftrace_exports_enable(void)
2631 static_branch_enable(&ftrace_exports_enabled);
2634 static inline void ftrace_exports_disable(void)
2636 static_branch_disable(&ftrace_exports_enabled);
2639 static void ftrace_exports(struct ring_buffer_event *event)
2641 struct trace_export *export;
2643 preempt_disable_notrace();
2645 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2647 trace_process_export(export, event);
2648 export = rcu_dereference_raw_notrace(export->next);
2651 preempt_enable_notrace();
2655 add_trace_export(struct trace_export **list, struct trace_export *export)
2657 rcu_assign_pointer(export->next, *list);
2659 * We are entering export into the list but another
2660 * CPU might be walking that list. We need to make sure
2661 * the export->next pointer is valid before another CPU sees
2662 * the export pointer included into the list.
2664 rcu_assign_pointer(*list, export);
2668 rm_trace_export(struct trace_export **list, struct trace_export *export)
2670 struct trace_export **p;
2672 for (p = list; *p != NULL; p = &(*p)->next)
2679 rcu_assign_pointer(*p, (*p)->next);
2685 add_ftrace_export(struct trace_export **list, struct trace_export *export)
2688 ftrace_exports_enable();
2690 add_trace_export(list, export);
2694 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2698 ret = rm_trace_export(list, export);
2700 ftrace_exports_disable();
2705 int register_ftrace_export(struct trace_export *export)
2707 if (WARN_ON_ONCE(!export->write))
2710 mutex_lock(&ftrace_export_lock);
2712 add_ftrace_export(&ftrace_exports_list, export);
2714 mutex_unlock(&ftrace_export_lock);
2718 EXPORT_SYMBOL_GPL(register_ftrace_export);
2720 int unregister_ftrace_export(struct trace_export *export)
2724 mutex_lock(&ftrace_export_lock);
2726 ret = rm_ftrace_export(&ftrace_exports_list, export);
2728 mutex_unlock(&ftrace_export_lock);
2732 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2735 trace_function(struct trace_array *tr,
2736 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2739 struct trace_event_call *call = &event_function;
2740 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2741 struct ring_buffer_event *event;
2742 struct ftrace_entry *entry;
2744 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2748 entry = ring_buffer_event_data(event);
2750 entry->parent_ip = parent_ip;
2752 if (!call_filter_check_discard(call, entry, buffer, event)) {
2753 if (static_branch_unlikely(&ftrace_exports_enabled))
2754 ftrace_exports(event);
2755 __buffer_unlock_commit(buffer, event);
2759 #ifdef CONFIG_STACKTRACE
2761 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2762 #define FTRACE_KSTACK_NESTING 4
2764 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2766 struct ftrace_stack {
2767 unsigned long calls[FTRACE_KSTACK_ENTRIES];
2771 struct ftrace_stacks {
2772 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2775 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2776 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2778 static void __ftrace_trace_stack(struct ring_buffer *buffer,
2779 unsigned long flags,
2780 int skip, int pc, struct pt_regs *regs)
2782 struct trace_event_call *call = &event_kernel_stack;
2783 struct ring_buffer_event *event;
2784 unsigned int size, nr_entries;
2785 struct ftrace_stack *fstack;
2786 struct stack_entry *entry;
2790 * Add one, for this function and the call to save_stack_trace()
2791 * If regs is set, then these functions will not be in the way.
2793 #ifndef CONFIG_UNWINDER_ORC
2799 * Since events can happen in NMIs there's no safe way to
2800 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2801 * or NMI comes in, it will just have to use the default
2802 * FTRACE_STACK_SIZE.
2804 preempt_disable_notrace();
2806 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2808 /* This should never happen. If it does, yell once and skip */
2809 if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
2813 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2814 * interrupt will either see the value pre increment or post
2815 * increment. If the interrupt happens pre increment it will have
2816 * restored the counter when it returns. We just need a barrier to
2817 * keep gcc from moving things around.
2821 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
2822 size = ARRAY_SIZE(fstack->calls);
2825 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2828 nr_entries = stack_trace_save(fstack->calls, size, skip);
2831 size = nr_entries * sizeof(unsigned long);
2832 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2833 sizeof(*entry) + size, flags, pc);
2836 entry = ring_buffer_event_data(event);
2838 memcpy(&entry->caller, fstack->calls, size);
2839 entry->size = nr_entries;
2841 if (!call_filter_check_discard(call, entry, buffer, event))
2842 __buffer_unlock_commit(buffer, event);
2845 /* Again, don't let gcc optimize things here */
2847 __this_cpu_dec(ftrace_stack_reserve);
2848 preempt_enable_notrace();
2852 static inline void ftrace_trace_stack(struct trace_array *tr,
2853 struct ring_buffer *buffer,
2854 unsigned long flags,
2855 int skip, int pc, struct pt_regs *regs)
2857 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2860 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
2863 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2866 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2868 if (rcu_is_watching()) {
2869 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2874 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2875 * but if the above rcu_is_watching() failed, then the NMI
2876 * triggered someplace critical, and rcu_irq_enter() should
2877 * not be called from NMI.
2879 if (unlikely(in_nmi()))
2882 rcu_irq_enter_irqson();
2883 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2884 rcu_irq_exit_irqson();
2888 * trace_dump_stack - record a stack back trace in the trace buffer
2889 * @skip: Number of functions to skip (helper handlers)
2891 void trace_dump_stack(int skip)
2893 unsigned long flags;
2895 if (tracing_disabled || tracing_selftest_running)
2898 local_save_flags(flags);
2900 #ifndef CONFIG_UNWINDER_ORC
2901 /* Skip 1 to skip this function. */
2904 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2905 flags, skip, preempt_count(), NULL);
2907 EXPORT_SYMBOL_GPL(trace_dump_stack);
2909 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
2910 static DEFINE_PER_CPU(int, user_stack_count);
2913 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
2915 struct trace_event_call *call = &event_user_stack;
2916 struct ring_buffer_event *event;
2917 struct userstack_entry *entry;
2919 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
2923 * NMIs can not handle page faults, even with fix ups.
2924 * The save user stack can (and often does) fault.
2926 if (unlikely(in_nmi()))
2930 * prevent recursion, since the user stack tracing may
2931 * trigger other kernel events.
2934 if (__this_cpu_read(user_stack_count))
2937 __this_cpu_inc(user_stack_count);
2939 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2940 sizeof(*entry), flags, pc);
2942 goto out_drop_count;
2943 entry = ring_buffer_event_data(event);
2945 entry->tgid = current->tgid;
2946 memset(&entry->caller, 0, sizeof(entry->caller));
2948 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
2949 if (!call_filter_check_discard(call, entry, buffer, event))
2950 __buffer_unlock_commit(buffer, event);
2953 __this_cpu_dec(user_stack_count);
2957 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
2958 static void ftrace_trace_userstack(struct ring_buffer *buffer,
2959 unsigned long flags, int pc)
2962 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
2964 #endif /* CONFIG_STACKTRACE */
2966 /* created for use with alloc_percpu */
2967 struct trace_buffer_struct {
2969 char buffer[4][TRACE_BUF_SIZE];
2972 static struct trace_buffer_struct *trace_percpu_buffer;
2975 * Thise allows for lockless recording. If we're nested too deeply, then
2976 * this returns NULL.
2978 static char *get_trace_buf(void)
2980 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
2982 if (!buffer || buffer->nesting >= 4)
2987 /* Interrupts must see nesting incremented before we use the buffer */
2989 return &buffer->buffer[buffer->nesting][0];
2992 static void put_trace_buf(void)
2994 /* Don't let the decrement of nesting leak before this */
2996 this_cpu_dec(trace_percpu_buffer->nesting);
2999 static int alloc_percpu_trace_buffer(void)
3001 struct trace_buffer_struct *buffers;
3003 buffers = alloc_percpu(struct trace_buffer_struct);
3004 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
3007 trace_percpu_buffer = buffers;
3011 static int buffers_allocated;
3013 void trace_printk_init_buffers(void)
3015 if (buffers_allocated)
3018 if (alloc_percpu_trace_buffer())
3021 /* trace_printk() is for debug use only. Don't use it in production. */
3024 pr_warn("**********************************************************\n");
3025 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3027 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3029 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3030 pr_warn("** unsafe for production use. **\n");
3032 pr_warn("** If you see this message and you are not debugging **\n");
3033 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3035 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3036 pr_warn("**********************************************************\n");
3038 /* Expand the buffers to set size */
3039 tracing_update_buffers();
3041 buffers_allocated = 1;
3044 * trace_printk_init_buffers() can be called by modules.
3045 * If that happens, then we need to start cmdline recording
3046 * directly here. If the global_trace.buffer is already
3047 * allocated here, then this was called by module code.
3049 if (global_trace.trace_buffer.buffer)
3050 tracing_start_cmdline_record();
3052 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3054 void trace_printk_start_comm(void)
3056 /* Start tracing comms if trace printk is set */
3057 if (!buffers_allocated)
3059 tracing_start_cmdline_record();
3062 static void trace_printk_start_stop_comm(int enabled)
3064 if (!buffers_allocated)
3068 tracing_start_cmdline_record();
3070 tracing_stop_cmdline_record();
3074 * trace_vbprintk - write binary msg to tracing buffer
3077 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3079 struct trace_event_call *call = &event_bprint;
3080 struct ring_buffer_event *event;
3081 struct ring_buffer *buffer;
3082 struct trace_array *tr = &global_trace;
3083 struct bprint_entry *entry;
3084 unsigned long flags;
3086 int len = 0, size, pc;
3088 if (unlikely(tracing_selftest_running || tracing_disabled))
3091 /* Don't pollute graph traces with trace_vprintk internals */
3092 pause_graph_tracing();
3094 pc = preempt_count();
3095 preempt_disable_notrace();
3097 tbuffer = get_trace_buf();
3103 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3105 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3108 local_save_flags(flags);
3109 size = sizeof(*entry) + sizeof(u32) * len;
3110 buffer = tr->trace_buffer.buffer;
3111 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3115 entry = ring_buffer_event_data(event);
3119 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3120 if (!call_filter_check_discard(call, entry, buffer, event)) {
3121 __buffer_unlock_commit(buffer, event);
3122 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
3129 preempt_enable_notrace();
3130 unpause_graph_tracing();
3134 EXPORT_SYMBOL_GPL(trace_vbprintk);
3138 __trace_array_vprintk(struct ring_buffer *buffer,
3139 unsigned long ip, const char *fmt, va_list args)
3141 struct trace_event_call *call = &event_print;
3142 struct ring_buffer_event *event;
3143 int len = 0, size, pc;
3144 struct print_entry *entry;
3145 unsigned long flags;
3148 if (tracing_disabled || tracing_selftest_running)
3151 /* Don't pollute graph traces with trace_vprintk internals */
3152 pause_graph_tracing();
3154 pc = preempt_count();
3155 preempt_disable_notrace();
3158 tbuffer = get_trace_buf();
3164 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3166 local_save_flags(flags);
3167 size = sizeof(*entry) + len + 1;
3168 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3172 entry = ring_buffer_event_data(event);
3175 memcpy(&entry->buf, tbuffer, len + 1);
3176 if (!call_filter_check_discard(call, entry, buffer, event)) {
3177 __buffer_unlock_commit(buffer, event);
3178 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
3185 preempt_enable_notrace();
3186 unpause_graph_tracing();
3192 int trace_array_vprintk(struct trace_array *tr,
3193 unsigned long ip, const char *fmt, va_list args)
3195 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3199 int trace_array_printk(struct trace_array *tr,
3200 unsigned long ip, const char *fmt, ...)
3205 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3209 ret = trace_array_vprintk(tr, ip, fmt, ap);
3213 EXPORT_SYMBOL_GPL(trace_array_printk);
3216 int trace_array_printk_buf(struct ring_buffer *buffer,
3217 unsigned long ip, const char *fmt, ...)
3222 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3226 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3232 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3234 return trace_array_vprintk(&global_trace, ip, fmt, args);
3236 EXPORT_SYMBOL_GPL(trace_vprintk);
3238 static void trace_iterator_increment(struct trace_iterator *iter)
3240 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3244 ring_buffer_read(buf_iter, NULL);
3247 static struct trace_entry *
3248 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3249 unsigned long *lost_events)
3251 struct ring_buffer_event *event;
3252 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3255 event = ring_buffer_iter_peek(buf_iter, ts);
3257 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
3261 iter->ent_size = ring_buffer_event_length(event);
3262 return ring_buffer_event_data(event);
3268 static struct trace_entry *
3269 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3270 unsigned long *missing_events, u64 *ent_ts)
3272 struct ring_buffer *buffer = iter->trace_buffer->buffer;
3273 struct trace_entry *ent, *next = NULL;
3274 unsigned long lost_events = 0, next_lost = 0;
3275 int cpu_file = iter->cpu_file;
3276 u64 next_ts = 0, ts;
3282 * If we are in a per_cpu trace file, don't bother by iterating over
3283 * all cpu and peek directly.
3285 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3286 if (ring_buffer_empty_cpu(buffer, cpu_file))
3288 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3290 *ent_cpu = cpu_file;
3295 for_each_tracing_cpu(cpu) {
3297 if (ring_buffer_empty_cpu(buffer, cpu))
3300 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3303 * Pick the entry with the smallest timestamp:
3305 if (ent && (!next || ts < next_ts)) {
3309 next_lost = lost_events;
3310 next_size = iter->ent_size;
3314 iter->ent_size = next_size;
3317 *ent_cpu = next_cpu;
3323 *missing_events = next_lost;
3328 /* Find the next real entry, without updating the iterator itself */
3329 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3330 int *ent_cpu, u64 *ent_ts)
3332 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3335 /* Find the next real entry, and increment the iterator to the next entry */
3336 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3338 iter->ent = __find_next_entry(iter, &iter->cpu,
3339 &iter->lost_events, &iter->ts);
3342 trace_iterator_increment(iter);
3344 return iter->ent ? iter : NULL;
3347 static void trace_consume(struct trace_iterator *iter)
3349 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
3350 &iter->lost_events);
3353 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3355 struct trace_iterator *iter = m->private;
3359 WARN_ON_ONCE(iter->leftover);
3363 /* can't go backwards */
3368 ent = trace_find_next_entry_inc(iter);
3372 while (ent && iter->idx < i)
3373 ent = trace_find_next_entry_inc(iter);
3380 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3382 struct ring_buffer_event *event;
3383 struct ring_buffer_iter *buf_iter;
3384 unsigned long entries = 0;
3387 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
3389 buf_iter = trace_buffer_iter(iter, cpu);
3393 ring_buffer_iter_reset(buf_iter);
3396 * We could have the case with the max latency tracers
3397 * that a reset never took place on a cpu. This is evident
3398 * by the timestamp being before the start of the buffer.
3400 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
3401 if (ts >= iter->trace_buffer->time_start)
3404 ring_buffer_read(buf_iter, NULL);
3407 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
3411 * The current tracer is copied to avoid a global locking
3414 static void *s_start(struct seq_file *m, loff_t *pos)
3416 struct trace_iterator *iter = m->private;
3417 struct trace_array *tr = iter->tr;
3418 int cpu_file = iter->cpu_file;
3424 * copy the tracer to avoid using a global lock all around.
3425 * iter->trace is a copy of current_trace, the pointer to the
3426 * name may be used instead of a strcmp(), as iter->trace->name
3427 * will point to the same string as current_trace->name.
3429 mutex_lock(&trace_types_lock);
3430 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3431 *iter->trace = *tr->current_trace;
3432 mutex_unlock(&trace_types_lock);
3434 #ifdef CONFIG_TRACER_MAX_TRACE
3435 if (iter->snapshot && iter->trace->use_max_tr)
3436 return ERR_PTR(-EBUSY);
3439 if (!iter->snapshot)
3440 atomic_inc(&trace_record_taskinfo_disabled);
3442 if (*pos != iter->pos) {
3447 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3448 for_each_tracing_cpu(cpu)
3449 tracing_iter_reset(iter, cpu);
3451 tracing_iter_reset(iter, cpu_file);
3454 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3459 * If we overflowed the seq_file before, then we want
3460 * to just reuse the trace_seq buffer again.
3466 p = s_next(m, p, &l);
3470 trace_event_read_lock();
3471 trace_access_lock(cpu_file);
3475 static void s_stop(struct seq_file *m, void *p)
3477 struct trace_iterator *iter = m->private;
3479 #ifdef CONFIG_TRACER_MAX_TRACE
3480 if (iter->snapshot && iter->trace->use_max_tr)
3484 if (!iter->snapshot)
3485 atomic_dec(&trace_record_taskinfo_disabled);
3487 trace_access_unlock(iter->cpu_file);
3488 trace_event_read_unlock();
3492 get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
3493 unsigned long *entries, int cpu)
3495 unsigned long count;
3497 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3499 * If this buffer has skipped entries, then we hold all
3500 * entries for the trace and we need to ignore the
3501 * ones before the time stamp.
3503 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3504 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3505 /* total is the same as the entries */
3509 ring_buffer_overrun_cpu(buf->buffer, cpu);
3514 get_total_entries(struct trace_buffer *buf,
3515 unsigned long *total, unsigned long *entries)
3523 for_each_tracing_cpu(cpu) {
3524 get_total_entries_cpu(buf, &t, &e, cpu);
3530 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3532 unsigned long total, entries;
3537 get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu);
3542 unsigned long trace_total_entries(struct trace_array *tr)
3544 unsigned long total, entries;
3549 get_total_entries(&tr->trace_buffer, &total, &entries);
3554 static void print_lat_help_header(struct seq_file *m)
3556 seq_puts(m, "# _------=> CPU# \n"
3557 "# / _-----=> irqs-off \n"
3558 "# | / _----=> need-resched \n"
3559 "# || / _---=> hardirq/softirq \n"
3560 "# ||| / _--=> preempt-depth \n"
3562 "# cmd pid ||||| time | caller \n"
3563 "# \\ / ||||| \\ | / \n");
3566 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
3568 unsigned long total;
3569 unsigned long entries;
3571 get_total_entries(buf, &total, &entries);
3572 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3573 entries, total, num_online_cpus());
3577 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3580 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3582 print_event_info(buf, m);
3584 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3585 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
3588 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3591 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3592 const char *space = " ";
3593 int prec = tgid ? 10 : 2;
3595 print_event_info(buf, m);
3597 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3598 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3599 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3600 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3601 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3602 seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3603 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
3607 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3609 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3610 struct trace_buffer *buf = iter->trace_buffer;
3611 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3612 struct tracer *type = iter->trace;
3613 unsigned long entries;
3614 unsigned long total;
3615 const char *name = "preemption";
3619 get_total_entries(buf, &total, &entries);
3621 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3623 seq_puts(m, "# -----------------------------------"
3624 "---------------------------------\n");
3625 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3626 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3627 nsecs_to_usecs(data->saved_latency),
3631 #if defined(CONFIG_PREEMPT_NONE)
3633 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3635 #elif defined(CONFIG_PREEMPT)
3640 /* These are reserved for later use */
3643 seq_printf(m, " #P:%d)\n", num_online_cpus());
3647 seq_puts(m, "# -----------------\n");
3648 seq_printf(m, "# | task: %.16s-%d "
3649 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3650 data->comm, data->pid,
3651 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3652 data->policy, data->rt_priority);
3653 seq_puts(m, "# -----------------\n");
3655 if (data->critical_start) {
3656 seq_puts(m, "# => started at: ");
3657 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3658 trace_print_seq(m, &iter->seq);
3659 seq_puts(m, "\n# => ended at: ");
3660 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3661 trace_print_seq(m, &iter->seq);
3662 seq_puts(m, "\n#\n");
3668 static void test_cpu_buff_start(struct trace_iterator *iter)
3670 struct trace_seq *s = &iter->seq;
3671 struct trace_array *tr = iter->tr;
3673 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3676 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3679 if (cpumask_available(iter->started) &&
3680 cpumask_test_cpu(iter->cpu, iter->started))
3683 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
3686 if (cpumask_available(iter->started))
3687 cpumask_set_cpu(iter->cpu, iter->started);
3689 /* Don't print started cpu buffer for the first entry of the trace */
3691 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3695 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3697 struct trace_array *tr = iter->tr;
3698 struct trace_seq *s = &iter->seq;
3699 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3700 struct trace_entry *entry;
3701 struct trace_event *event;
3705 test_cpu_buff_start(iter);
3707 event = ftrace_find_event(entry->type);
3709 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3710 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3711 trace_print_lat_context(iter);
3713 trace_print_context(iter);
3716 if (trace_seq_has_overflowed(s))
3717 return TRACE_TYPE_PARTIAL_LINE;
3720 return event->funcs->trace(iter, sym_flags, event);
3722 trace_seq_printf(s, "Unknown type %d\n", entry->type);
3724 return trace_handle_return(s);
3727 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3729 struct trace_array *tr = iter->tr;
3730 struct trace_seq *s = &iter->seq;
3731 struct trace_entry *entry;
3732 struct trace_event *event;
3736 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3737 trace_seq_printf(s, "%d %d %llu ",
3738 entry->pid, iter->cpu, iter->ts);
3740 if (trace_seq_has_overflowed(s))
3741 return TRACE_TYPE_PARTIAL_LINE;
3743 event = ftrace_find_event(entry->type);
3745 return event->funcs->raw(iter, 0, event);
3747 trace_seq_printf(s, "%d ?\n", entry->type);
3749 return trace_handle_return(s);
3752 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3754 struct trace_array *tr = iter->tr;
3755 struct trace_seq *s = &iter->seq;
3756 unsigned char newline = '\n';
3757 struct trace_entry *entry;
3758 struct trace_event *event;
3762 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3763 SEQ_PUT_HEX_FIELD(s, entry->pid);
3764 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3765 SEQ_PUT_HEX_FIELD(s, iter->ts);
3766 if (trace_seq_has_overflowed(s))
3767 return TRACE_TYPE_PARTIAL_LINE;
3770 event = ftrace_find_event(entry->type);
3772 enum print_line_t ret = event->funcs->hex(iter, 0, event);
3773 if (ret != TRACE_TYPE_HANDLED)
3777 SEQ_PUT_FIELD(s, newline);
3779 return trace_handle_return(s);
3782 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
3784 struct trace_array *tr = iter->tr;
3785 struct trace_seq *s = &iter->seq;
3786 struct trace_entry *entry;
3787 struct trace_event *event;
3791 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3792 SEQ_PUT_FIELD(s, entry->pid);
3793 SEQ_PUT_FIELD(s, iter->cpu);
3794 SEQ_PUT_FIELD(s, iter->ts);
3795 if (trace_seq_has_overflowed(s))
3796 return TRACE_TYPE_PARTIAL_LINE;
3799 event = ftrace_find_event(entry->type);
3800 return event ? event->funcs->binary(iter, 0, event) :
3804 int trace_empty(struct trace_iterator *iter)
3806 struct ring_buffer_iter *buf_iter;
3809 /* If we are looking at one CPU buffer, only check that one */
3810 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
3811 cpu = iter->cpu_file;
3812 buf_iter = trace_buffer_iter(iter, cpu);
3814 if (!ring_buffer_iter_empty(buf_iter))
3817 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3823 for_each_tracing_cpu(cpu) {
3824 buf_iter = trace_buffer_iter(iter, cpu);
3826 if (!ring_buffer_iter_empty(buf_iter))
3829 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3837 /* Called with trace_event_read_lock() held. */
3838 enum print_line_t print_trace_line(struct trace_iterator *iter)
3840 struct trace_array *tr = iter->tr;
3841 unsigned long trace_flags = tr->trace_flags;
3842 enum print_line_t ret;
3844 if (iter->lost_events) {
3845 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3846 iter->cpu, iter->lost_events);
3847 if (trace_seq_has_overflowed(&iter->seq))
3848 return TRACE_TYPE_PARTIAL_LINE;
3851 if (iter->trace && iter->trace->print_line) {
3852 ret = iter->trace->print_line(iter);
3853 if (ret != TRACE_TYPE_UNHANDLED)
3857 if (iter->ent->type == TRACE_BPUTS &&
3858 trace_flags & TRACE_ITER_PRINTK &&
3859 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3860 return trace_print_bputs_msg_only(iter);
3862 if (iter->ent->type == TRACE_BPRINT &&
3863 trace_flags & TRACE_ITER_PRINTK &&
3864 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3865 return trace_print_bprintk_msg_only(iter);
3867 if (iter->ent->type == TRACE_PRINT &&
3868 trace_flags & TRACE_ITER_PRINTK &&
3869 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3870 return trace_print_printk_msg_only(iter);
3872 if (trace_flags & TRACE_ITER_BIN)
3873 return print_bin_fmt(iter);
3875 if (trace_flags & TRACE_ITER_HEX)
3876 return print_hex_fmt(iter);
3878 if (trace_flags & TRACE_ITER_RAW)
3879 return print_raw_fmt(iter);
3881 return print_trace_fmt(iter);
3884 void trace_latency_header(struct seq_file *m)
3886 struct trace_iterator *iter = m->private;
3887 struct trace_array *tr = iter->tr;
3889 /* print nothing if the buffers are empty */
3890 if (trace_empty(iter))
3893 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3894 print_trace_header(m, iter);
3896 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
3897 print_lat_help_header(m);
3900 void trace_default_header(struct seq_file *m)
3902 struct trace_iterator *iter = m->private;
3903 struct trace_array *tr = iter->tr;
3904 unsigned long trace_flags = tr->trace_flags;
3906 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3909 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3910 /* print nothing if the buffers are empty */
3911 if (trace_empty(iter))
3913 print_trace_header(m, iter);
3914 if (!(trace_flags & TRACE_ITER_VERBOSE))
3915 print_lat_help_header(m);
3917 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3918 if (trace_flags & TRACE_ITER_IRQ_INFO)
3919 print_func_help_header_irq(iter->trace_buffer,
3922 print_func_help_header(iter->trace_buffer, m,
3928 static void test_ftrace_alive(struct seq_file *m)
3930 if (!ftrace_is_dead())
3932 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3933 "# MAY BE MISSING FUNCTION EVENTS\n");
3936 #ifdef CONFIG_TRACER_MAX_TRACE
3937 static void show_snapshot_main_help(struct seq_file *m)
3939 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3940 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3941 "# Takes a snapshot of the main buffer.\n"
3942 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3943 "# (Doesn't have to be '2' works with any number that\n"
3944 "# is not a '0' or '1')\n");
3947 static void show_snapshot_percpu_help(struct seq_file *m)
3949 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3950 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3951 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3952 "# Takes a snapshot of the main buffer for this cpu.\n");
3954 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3955 "# Must use main snapshot file to allocate.\n");
3957 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3958 "# (Doesn't have to be '2' works with any number that\n"
3959 "# is not a '0' or '1')\n");
3962 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3964 if (iter->tr->allocated_snapshot)
3965 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
3967 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
3969 seq_puts(m, "# Snapshot commands:\n");
3970 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3971 show_snapshot_main_help(m);
3973 show_snapshot_percpu_help(m);
3976 /* Should never be called */
3977 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3980 static int s_show(struct seq_file *m, void *v)
3982 struct trace_iterator *iter = v;
3985 if (iter->ent == NULL) {
3987 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3989 test_ftrace_alive(m);
3991 if (iter->snapshot && trace_empty(iter))
3992 print_snapshot_help(m, iter);
3993 else if (iter->trace && iter->trace->print_header)
3994 iter->trace->print_header(m);
3996 trace_default_header(m);
3998 } else if (iter->leftover) {
4000 * If we filled the seq_file buffer earlier, we
4001 * want to just show it now.
4003 ret = trace_print_seq(m, &iter->seq);
4005 /* ret should this time be zero, but you never know */
4006 iter->leftover = ret;
4009 print_trace_line(iter);
4010 ret = trace_print_seq(m, &iter->seq);
4012 * If we overflow the seq_file buffer, then it will
4013 * ask us for this data again at start up.
4015 * ret is 0 if seq_file write succeeded.
4018 iter->leftover = ret;
4025 * Should be used after trace_array_get(), trace_types_lock
4026 * ensures that i_cdev was already initialized.
4028 static inline int tracing_get_cpu(struct inode *inode)
4030 if (inode->i_cdev) /* See trace_create_cpu_file() */
4031 return (long)inode->i_cdev - 1;
4032 return RING_BUFFER_ALL_CPUS;
4035 static const struct seq_operations tracer_seq_ops = {
4042 static struct trace_iterator *
4043 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4045 struct trace_array *tr = inode->i_private;
4046 struct trace_iterator *iter;
4049 if (tracing_disabled)
4050 return ERR_PTR(-ENODEV);
4052 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4054 return ERR_PTR(-ENOMEM);
4056 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4058 if (!iter->buffer_iter)
4062 * We make a copy of the current tracer to avoid concurrent
4063 * changes on it while we are reading.
4065 mutex_lock(&trace_types_lock);
4066 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4070 *iter->trace = *tr->current_trace;
4072 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4077 #ifdef CONFIG_TRACER_MAX_TRACE
4078 /* Currently only the top directory has a snapshot */
4079 if (tr->current_trace->print_max || snapshot)
4080 iter->trace_buffer = &tr->max_buffer;
4083 iter->trace_buffer = &tr->trace_buffer;
4084 iter->snapshot = snapshot;
4086 iter->cpu_file = tracing_get_cpu(inode);
4087 mutex_init(&iter->mutex);
4089 /* Notify the tracer early; before we stop tracing. */
4090 if (iter->trace && iter->trace->open)
4091 iter->trace->open(iter);
4093 /* Annotate start of buffers if we had overruns */
4094 if (ring_buffer_overruns(iter->trace_buffer->buffer))
4095 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4097 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4098 if (trace_clocks[tr->clock_id].in_ns)
4099 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4101 /* stop the trace while dumping if we are not opening "snapshot" */
4102 if (!iter->snapshot)
4103 tracing_stop_tr(tr);
4105 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4106 for_each_tracing_cpu(cpu) {
4107 iter->buffer_iter[cpu] =
4108 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4111 ring_buffer_read_prepare_sync();
4112 for_each_tracing_cpu(cpu) {
4113 ring_buffer_read_start(iter->buffer_iter[cpu]);
4114 tracing_iter_reset(iter, cpu);
4117 cpu = iter->cpu_file;
4118 iter->buffer_iter[cpu] =
4119 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4121 ring_buffer_read_prepare_sync();
4122 ring_buffer_read_start(iter->buffer_iter[cpu]);
4123 tracing_iter_reset(iter, cpu);
4126 mutex_unlock(&trace_types_lock);
4131 mutex_unlock(&trace_types_lock);
4133 kfree(iter->buffer_iter);
4135 seq_release_private(inode, file);
4136 return ERR_PTR(-ENOMEM);
4139 int tracing_open_generic(struct inode *inode, struct file *filp)
4141 if (tracing_disabled)
4144 filp->private_data = inode->i_private;
4148 bool tracing_is_disabled(void)
4150 return (tracing_disabled) ? true: false;
4154 * Open and update trace_array ref count.
4155 * Must have the current trace_array passed to it.
4157 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4159 struct trace_array *tr = inode->i_private;
4161 if (tracing_disabled)
4164 if (trace_array_get(tr) < 0)
4167 filp->private_data = inode->i_private;
4172 static int tracing_release(struct inode *inode, struct file *file)
4174 struct trace_array *tr = inode->i_private;
4175 struct seq_file *m = file->private_data;
4176 struct trace_iterator *iter;
4179 if (!(file->f_mode & FMODE_READ)) {
4180 trace_array_put(tr);
4184 /* Writes do not use seq_file */
4186 mutex_lock(&trace_types_lock);
4188 for_each_tracing_cpu(cpu) {
4189 if (iter->buffer_iter[cpu])
4190 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4193 if (iter->trace && iter->trace->close)
4194 iter->trace->close(iter);
4196 if (!iter->snapshot)
4197 /* reenable tracing if it was previously enabled */
4198 tracing_start_tr(tr);
4200 __trace_array_put(tr);
4202 mutex_unlock(&trace_types_lock);
4204 mutex_destroy(&iter->mutex);
4205 free_cpumask_var(iter->started);
4207 kfree(iter->buffer_iter);
4208 seq_release_private(inode, file);
4213 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4215 struct trace_array *tr = inode->i_private;
4217 trace_array_put(tr);
4221 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4223 struct trace_array *tr = inode->i_private;
4225 trace_array_put(tr);
4227 return single_release(inode, file);
4230 static int tracing_open(struct inode *inode, struct file *file)
4232 struct trace_array *tr = inode->i_private;
4233 struct trace_iterator *iter;
4236 if (trace_array_get(tr) < 0)
4239 /* If this file was open for write, then erase contents */
4240 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4241 int cpu = tracing_get_cpu(inode);
4242 struct trace_buffer *trace_buf = &tr->trace_buffer;
4244 #ifdef CONFIG_TRACER_MAX_TRACE
4245 if (tr->current_trace->print_max)
4246 trace_buf = &tr->max_buffer;
4249 if (cpu == RING_BUFFER_ALL_CPUS)
4250 tracing_reset_online_cpus(trace_buf);
4252 tracing_reset(trace_buf, cpu);
4255 if (file->f_mode & FMODE_READ) {
4256 iter = __tracing_open(inode, file, false);
4258 ret = PTR_ERR(iter);
4259 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4260 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4264 trace_array_put(tr);
4270 * Some tracers are not suitable for instance buffers.
4271 * A tracer is always available for the global array (toplevel)
4272 * or if it explicitly states that it is.
4275 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4277 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4280 /* Find the next tracer that this trace array may use */
4281 static struct tracer *
4282 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4284 while (t && !trace_ok_for_array(t, tr))
4291 t_next(struct seq_file *m, void *v, loff_t *pos)
4293 struct trace_array *tr = m->private;
4294 struct tracer *t = v;
4299 t = get_tracer_for_array(tr, t->next);
4304 static void *t_start(struct seq_file *m, loff_t *pos)
4306 struct trace_array *tr = m->private;
4310 mutex_lock(&trace_types_lock);
4312 t = get_tracer_for_array(tr, trace_types);
4313 for (; t && l < *pos; t = t_next(m, t, &l))
4319 static void t_stop(struct seq_file *m, void *p)
4321 mutex_unlock(&trace_types_lock);
4324 static int t_show(struct seq_file *m, void *v)
4326 struct tracer *t = v;
4331 seq_puts(m, t->name);
4340 static const struct seq_operations show_traces_seq_ops = {
4347 static int show_traces_open(struct inode *inode, struct file *file)
4349 struct trace_array *tr = inode->i_private;
4353 if (tracing_disabled)
4356 ret = seq_open(file, &show_traces_seq_ops);
4360 m = file->private_data;
4367 tracing_write_stub(struct file *filp, const char __user *ubuf,
4368 size_t count, loff_t *ppos)
4373 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4377 if (file->f_mode & FMODE_READ)
4378 ret = seq_lseek(file, offset, whence);
4380 file->f_pos = ret = 0;
4385 static const struct file_operations tracing_fops = {
4386 .open = tracing_open,
4388 .write = tracing_write_stub,
4389 .llseek = tracing_lseek,
4390 .release = tracing_release,
4393 static const struct file_operations show_traces_fops = {
4394 .open = show_traces_open,
4396 .release = seq_release,
4397 .llseek = seq_lseek,
4401 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4402 size_t count, loff_t *ppos)
4404 struct trace_array *tr = file_inode(filp)->i_private;
4408 len = snprintf(NULL, 0, "%*pb\n",
4409 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4410 mask_str = kmalloc(len, GFP_KERNEL);
4414 len = snprintf(mask_str, len, "%*pb\n",
4415 cpumask_pr_args(tr->tracing_cpumask));
4420 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4429 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4430 size_t count, loff_t *ppos)
4432 struct trace_array *tr = file_inode(filp)->i_private;
4433 cpumask_var_t tracing_cpumask_new;
4436 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4439 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4443 local_irq_disable();
4444 arch_spin_lock(&tr->max_lock);
4445 for_each_tracing_cpu(cpu) {
4447 * Increase/decrease the disabled counter if we are
4448 * about to flip a bit in the cpumask:
4450 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4451 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4452 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4453 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
4455 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4456 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4457 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4458 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
4461 arch_spin_unlock(&tr->max_lock);
4464 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4465 free_cpumask_var(tracing_cpumask_new);
4470 free_cpumask_var(tracing_cpumask_new);
4475 static const struct file_operations tracing_cpumask_fops = {
4476 .open = tracing_open_generic_tr,
4477 .read = tracing_cpumask_read,
4478 .write = tracing_cpumask_write,
4479 .release = tracing_release_generic_tr,
4480 .llseek = generic_file_llseek,
4483 static int tracing_trace_options_show(struct seq_file *m, void *v)
4485 struct tracer_opt *trace_opts;
4486 struct trace_array *tr = m->private;
4490 mutex_lock(&trace_types_lock);
4491 tracer_flags = tr->current_trace->flags->val;
4492 trace_opts = tr->current_trace->flags->opts;
4494 for (i = 0; trace_options[i]; i++) {
4495 if (tr->trace_flags & (1 << i))
4496 seq_printf(m, "%s\n", trace_options[i]);
4498 seq_printf(m, "no%s\n", trace_options[i]);
4501 for (i = 0; trace_opts[i].name; i++) {
4502 if (tracer_flags & trace_opts[i].bit)
4503 seq_printf(m, "%s\n", trace_opts[i].name);
4505 seq_printf(m, "no%s\n", trace_opts[i].name);
4507 mutex_unlock(&trace_types_lock);
4512 static int __set_tracer_option(struct trace_array *tr,
4513 struct tracer_flags *tracer_flags,
4514 struct tracer_opt *opts, int neg)
4516 struct tracer *trace = tracer_flags->trace;
4519 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4524 tracer_flags->val &= ~opts->bit;
4526 tracer_flags->val |= opts->bit;
4530 /* Try to assign a tracer specific option */
4531 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4533 struct tracer *trace = tr->current_trace;
4534 struct tracer_flags *tracer_flags = trace->flags;
4535 struct tracer_opt *opts = NULL;
4538 for (i = 0; tracer_flags->opts[i].name; i++) {
4539 opts = &tracer_flags->opts[i];
4541 if (strcmp(cmp, opts->name) == 0)
4542 return __set_tracer_option(tr, trace->flags, opts, neg);
4548 /* Some tracers require overwrite to stay enabled */
4549 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4551 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4557 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4559 /* do nothing if flag is already set */
4560 if (!!(tr->trace_flags & mask) == !!enabled)
4563 /* Give the tracer a chance to approve the change */
4564 if (tr->current_trace->flag_changed)
4565 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4569 tr->trace_flags |= mask;
4571 tr->trace_flags &= ~mask;
4573 if (mask == TRACE_ITER_RECORD_CMD)
4574 trace_event_enable_cmd_record(enabled);
4576 if (mask == TRACE_ITER_RECORD_TGID) {
4578 tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
4582 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4586 trace_event_enable_tgid_record(enabled);
4589 if (mask == TRACE_ITER_EVENT_FORK)
4590 trace_event_follow_fork(tr, enabled);
4592 if (mask == TRACE_ITER_FUNC_FORK)
4593 ftrace_pid_follow_fork(tr, enabled);
4595 if (mask == TRACE_ITER_OVERWRITE) {
4596 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
4597 #ifdef CONFIG_TRACER_MAX_TRACE
4598 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4602 if (mask == TRACE_ITER_PRINTK) {
4603 trace_printk_start_stop_comm(enabled);
4604 trace_printk_control(enabled);
4610 static int trace_set_options(struct trace_array *tr, char *option)
4615 size_t orig_len = strlen(option);
4618 cmp = strstrip(option);
4620 len = str_has_prefix(cmp, "no");
4626 mutex_lock(&trace_types_lock);
4628 ret = match_string(trace_options, -1, cmp);
4629 /* If no option could be set, test the specific tracer options */
4631 ret = set_tracer_option(tr, cmp, neg);
4633 ret = set_tracer_flag(tr, 1 << ret, !neg);
4635 mutex_unlock(&trace_types_lock);
4638 * If the first trailing whitespace is replaced with '\0' by strstrip,
4639 * turn it back into a space.
4641 if (orig_len > strlen(option))
4642 option[strlen(option)] = ' ';
4647 static void __init apply_trace_boot_options(void)
4649 char *buf = trace_boot_options_buf;
4653 option = strsep(&buf, ",");
4659 trace_set_options(&global_trace, option);
4661 /* Put back the comma to allow this to be called again */
4668 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4669 size_t cnt, loff_t *ppos)
4671 struct seq_file *m = filp->private_data;
4672 struct trace_array *tr = m->private;
4676 if (cnt >= sizeof(buf))
4679 if (copy_from_user(buf, ubuf, cnt))
4684 ret = trace_set_options(tr, buf);
4693 static int tracing_trace_options_open(struct inode *inode, struct file *file)
4695 struct trace_array *tr = inode->i_private;
4698 if (tracing_disabled)
4701 if (trace_array_get(tr) < 0)
4704 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4706 trace_array_put(tr);
4711 static const struct file_operations tracing_iter_fops = {
4712 .open = tracing_trace_options_open,
4714 .llseek = seq_lseek,
4715 .release = tracing_single_release_tr,
4716 .write = tracing_trace_options_write,
4719 static const char readme_msg[] =
4720 "tracing mini-HOWTO:\n\n"
4721 "# echo 0 > tracing_on : quick way to disable tracing\n"
4722 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4723 " Important files:\n"
4724 " trace\t\t\t- The static contents of the buffer\n"
4725 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4726 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4727 " current_tracer\t- function and latency tracers\n"
4728 " available_tracers\t- list of configured tracers for current_tracer\n"
4729 " error_log\t- error log for failed commands (that support it)\n"
4730 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4731 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4732 " trace_clock\t\t-change the clock used to order events\n"
4733 " local: Per cpu clock but may not be synced across CPUs\n"
4734 " global: Synced across CPUs but slows tracing down.\n"
4735 " counter: Not a clock, but just an increment\n"
4736 " uptime: Jiffy counter from time of boot\n"
4737 " perf: Same clock that perf events use\n"
4738 #ifdef CONFIG_X86_64
4739 " x86-tsc: TSC cycle counter\n"
4741 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4742 " delta: Delta difference against a buffer-wide timestamp\n"
4743 " absolute: Absolute (standalone) timestamp\n"
4744 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4745 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4746 " tracing_cpumask\t- Limit which CPUs to trace\n"
4747 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4748 "\t\t\t Remove sub-buffer with rmdir\n"
4749 " trace_options\t\t- Set format or modify how tracing happens\n"
4750 "\t\t\t Disable an option by prefixing 'no' to the\n"
4751 "\t\t\t option name\n"
4752 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4753 #ifdef CONFIG_DYNAMIC_FTRACE
4754 "\n available_filter_functions - list of functions that can be filtered on\n"
4755 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4756 "\t\t\t functions\n"
4757 "\t accepts: func_full_name or glob-matching-pattern\n"
4758 "\t modules: Can select a group via module\n"
4759 "\t Format: :mod:<module-name>\n"
4760 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4761 "\t triggers: a command to perform when function is hit\n"
4762 "\t Format: <function>:<trigger>[:count]\n"
4763 "\t trigger: traceon, traceoff\n"
4764 "\t\t enable_event:<system>:<event>\n"
4765 "\t\t disable_event:<system>:<event>\n"
4766 #ifdef CONFIG_STACKTRACE
4769 #ifdef CONFIG_TRACER_SNAPSHOT
4774 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4775 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4776 "\t The first one will disable tracing every time do_fault is hit\n"
4777 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4778 "\t The first time do trap is hit and it disables tracing, the\n"
4779 "\t counter will decrement to 2. If tracing is already disabled,\n"
4780 "\t the counter will not decrement. It only decrements when the\n"
4781 "\t trigger did work\n"
4782 "\t To remove trigger without count:\n"
4783 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4784 "\t To remove trigger with a count:\n"
4785 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4786 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
4787 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4788 "\t modules: Can select a group via module command :mod:\n"
4789 "\t Does not accept triggers\n"
4790 #endif /* CONFIG_DYNAMIC_FTRACE */
4791 #ifdef CONFIG_FUNCTION_TRACER
4792 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4795 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4796 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4797 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4798 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4800 #ifdef CONFIG_TRACER_SNAPSHOT
4801 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4802 "\t\t\t snapshot buffer. Read the contents for more\n"
4803 "\t\t\t information\n"
4805 #ifdef CONFIG_STACK_TRACER
4806 " stack_trace\t\t- Shows the max stack trace when active\n"
4807 " stack_max_size\t- Shows current max stack size that was traced\n"
4808 "\t\t\t Write into this file to reset the max size (trigger a\n"
4809 "\t\t\t new trace)\n"
4810 #ifdef CONFIG_DYNAMIC_FTRACE
4811 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4814 #endif /* CONFIG_STACK_TRACER */
4815 #ifdef CONFIG_DYNAMIC_EVENTS
4816 " dynamic_events\t\t- Add/remove/show the generic dynamic events\n"
4817 "\t\t\t Write into this file to define/undefine new trace events.\n"
4819 #ifdef CONFIG_KPROBE_EVENTS
4820 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4821 "\t\t\t Write into this file to define/undefine new trace events.\n"
4823 #ifdef CONFIG_UPROBE_EVENTS
4824 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4825 "\t\t\t Write into this file to define/undefine new trace events.\n"
4827 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4828 "\t accepts: event-definitions (one definition per line)\n"
4829 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4830 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
4831 #ifdef CONFIG_HIST_TRIGGERS
4832 "\t s:[synthetic/]<event> <field> [<field>]\n"
4834 "\t -:[<group>/]<event>\n"
4835 #ifdef CONFIG_KPROBE_EVENTS
4836 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4837 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4839 #ifdef CONFIG_UPROBE_EVENTS
4840 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
4842 "\t args: <name>=fetcharg[:type]\n"
4843 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4844 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
4845 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
4847 "\t $stack<index>, $stack, $retval, $comm,\n"
4849 "\t +|-[u]<offset>(<fetcharg>)\n"
4850 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
4851 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
4852 "\t <type>\\[<array-size>\\]\n"
4853 #ifdef CONFIG_HIST_TRIGGERS
4854 "\t field: <stype> <name>;\n"
4855 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
4856 "\t [unsigned] char/int/long\n"
4859 " events/\t\t- Directory containing all trace event subsystems:\n"
4860 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4861 " events/<system>/\t- Directory containing all trace events for <system>:\n"
4862 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4864 " filter\t\t- If set, only events passing filter are traced\n"
4865 " events/<system>/<event>/\t- Directory containing control files for\n"
4867 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4868 " filter\t\t- If set, only events passing filter are traced\n"
4869 " trigger\t\t- If set, a command to perform when event is hit\n"
4870 "\t Format: <trigger>[:count][if <filter>]\n"
4871 "\t trigger: traceon, traceoff\n"
4872 "\t enable_event:<system>:<event>\n"
4873 "\t disable_event:<system>:<event>\n"
4874 #ifdef CONFIG_HIST_TRIGGERS
4875 "\t enable_hist:<system>:<event>\n"
4876 "\t disable_hist:<system>:<event>\n"
4878 #ifdef CONFIG_STACKTRACE
4881 #ifdef CONFIG_TRACER_SNAPSHOT
4884 #ifdef CONFIG_HIST_TRIGGERS
4885 "\t\t hist (see below)\n"
4887 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4888 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4889 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4890 "\t events/block/block_unplug/trigger\n"
4891 "\t The first disables tracing every time block_unplug is hit.\n"
4892 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4893 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4894 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4895 "\t Like function triggers, the counter is only decremented if it\n"
4896 "\t enabled or disabled tracing.\n"
4897 "\t To remove a trigger without a count:\n"
4898 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4899 "\t To remove a trigger with a count:\n"
4900 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4901 "\t Filters can be ignored when removing a trigger.\n"
4902 #ifdef CONFIG_HIST_TRIGGERS
4903 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
4904 "\t Format: hist:keys=<field1[,field2,...]>\n"
4905 "\t [:values=<field1[,field2,...]>]\n"
4906 "\t [:sort=<field1[,field2,...]>]\n"
4907 "\t [:size=#entries]\n"
4908 "\t [:pause][:continue][:clear]\n"
4909 "\t [:name=histname1]\n"
4910 "\t [:<handler>.<action>]\n"
4911 "\t [if <filter>]\n\n"
4912 "\t When a matching event is hit, an entry is added to a hash\n"
4913 "\t table using the key(s) and value(s) named, and the value of a\n"
4914 "\t sum called 'hitcount' is incremented. Keys and values\n"
4915 "\t correspond to fields in the event's format description. Keys\n"
4916 "\t can be any field, or the special string 'stacktrace'.\n"
4917 "\t Compound keys consisting of up to two fields can be specified\n"
4918 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4919 "\t fields. Sort keys consisting of up to two fields can be\n"
4920 "\t specified using the 'sort' keyword. The sort direction can\n"
4921 "\t be modified by appending '.descending' or '.ascending' to a\n"
4922 "\t sort field. The 'size' parameter can be used to specify more\n"
4923 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4924 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4925 "\t its histogram data will be shared with other triggers of the\n"
4926 "\t same name, and trigger hits will update this common data.\n\n"
4927 "\t Reading the 'hist' file for the event will dump the hash\n"
4928 "\t table in its entirety to stdout. If there are multiple hist\n"
4929 "\t triggers attached to an event, there will be a table for each\n"
4930 "\t trigger in the output. The table displayed for a named\n"
4931 "\t trigger will be the same as any other instance having the\n"
4932 "\t same name. The default format used to display a given field\n"
4933 "\t can be modified by appending any of the following modifiers\n"
4934 "\t to the field name, as applicable:\n\n"
4935 "\t .hex display a number as a hex value\n"
4936 "\t .sym display an address as a symbol\n"
4937 "\t .sym-offset display an address as a symbol and offset\n"
4938 "\t .execname display a common_pid as a program name\n"
4939 "\t .syscall display a syscall id as a syscall name\n"
4940 "\t .log2 display log2 value rather than raw number\n"
4941 "\t .usecs display a common_timestamp in microseconds\n\n"
4942 "\t The 'pause' parameter can be used to pause an existing hist\n"
4943 "\t trigger or to start a hist trigger but not log any events\n"
4944 "\t until told to do so. 'continue' can be used to start or\n"
4945 "\t restart a paused hist trigger.\n\n"
4946 "\t The 'clear' parameter will clear the contents of a running\n"
4947 "\t hist trigger and leave its current paused/active state\n"
4949 "\t The enable_hist and disable_hist triggers can be used to\n"
4950 "\t have one event conditionally start and stop another event's\n"
4951 "\t already-attached hist trigger. The syntax is analogous to\n"
4952 "\t the enable_event and disable_event triggers.\n\n"
4953 "\t Hist trigger handlers and actions are executed whenever a\n"
4954 "\t a histogram entry is added or updated. They take the form:\n\n"
4955 "\t <handler>.<action>\n\n"
4956 "\t The available handlers are:\n\n"
4957 "\t onmatch(matching.event) - invoke on addition or update\n"
4958 "\t onmax(var) - invoke if var exceeds current max\n"
4959 "\t onchange(var) - invoke action if var changes\n\n"
4960 "\t The available actions are:\n\n"
4961 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
4962 "\t save(field,...) - save current event fields\n"
4963 #ifdef CONFIG_TRACER_SNAPSHOT
4964 "\t snapshot() - snapshot the trace buffer\n"
4970 tracing_readme_read(struct file *filp, char __user *ubuf,
4971 size_t cnt, loff_t *ppos)
4973 return simple_read_from_buffer(ubuf, cnt, ppos,
4974 readme_msg, strlen(readme_msg));
4977 static const struct file_operations tracing_readme_fops = {
4978 .open = tracing_open_generic,
4979 .read = tracing_readme_read,
4980 .llseek = generic_file_llseek,
4983 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4987 if (*pos || m->count)
4992 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4993 if (trace_find_tgid(*ptr))
5000 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5010 v = saved_tgids_next(m, v, &l);
5018 static void saved_tgids_stop(struct seq_file *m, void *v)
5022 static int saved_tgids_show(struct seq_file *m, void *v)
5024 int pid = (int *)v - tgid_map;
5026 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5030 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5031 .start = saved_tgids_start,
5032 .stop = saved_tgids_stop,
5033 .next = saved_tgids_next,
5034 .show = saved_tgids_show,
5037 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5039 if (tracing_disabled)
5042 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5046 static const struct file_operations tracing_saved_tgids_fops = {
5047 .open = tracing_saved_tgids_open,
5049 .llseek = seq_lseek,
5050 .release = seq_release,
5053 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5055 unsigned int *ptr = v;
5057 if (*pos || m->count)
5062 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5064 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5073 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5079 arch_spin_lock(&trace_cmdline_lock);
5081 v = &savedcmd->map_cmdline_to_pid[0];
5083 v = saved_cmdlines_next(m, v, &l);
5091 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5093 arch_spin_unlock(&trace_cmdline_lock);
5097 static int saved_cmdlines_show(struct seq_file *m, void *v)
5099 char buf[TASK_COMM_LEN];
5100 unsigned int *pid = v;
5102 __trace_find_cmdline(*pid, buf);
5103 seq_printf(m, "%d %s\n", *pid, buf);
5107 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5108 .start = saved_cmdlines_start,
5109 .next = saved_cmdlines_next,
5110 .stop = saved_cmdlines_stop,
5111 .show = saved_cmdlines_show,
5114 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5116 if (tracing_disabled)
5119 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5122 static const struct file_operations tracing_saved_cmdlines_fops = {
5123 .open = tracing_saved_cmdlines_open,
5125 .llseek = seq_lseek,
5126 .release = seq_release,
5130 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5131 size_t cnt, loff_t *ppos)
5136 arch_spin_lock(&trace_cmdline_lock);
5137 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5138 arch_spin_unlock(&trace_cmdline_lock);
5140 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5143 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5145 kfree(s->saved_cmdlines);
5146 kfree(s->map_cmdline_to_pid);
5150 static int tracing_resize_saved_cmdlines(unsigned int val)
5152 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5154 s = kmalloc(sizeof(*s), GFP_KERNEL);
5158 if (allocate_cmdlines_buffer(val, s) < 0) {
5163 arch_spin_lock(&trace_cmdline_lock);
5164 savedcmd_temp = savedcmd;
5166 arch_spin_unlock(&trace_cmdline_lock);
5167 free_saved_cmdlines_buffer(savedcmd_temp);
5173 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5174 size_t cnt, loff_t *ppos)
5179 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5183 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5184 if (!val || val > PID_MAX_DEFAULT)
5187 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5196 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5197 .open = tracing_open_generic,
5198 .read = tracing_saved_cmdlines_size_read,
5199 .write = tracing_saved_cmdlines_size_write,
5202 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5203 static union trace_eval_map_item *
5204 update_eval_map(union trace_eval_map_item *ptr)
5206 if (!ptr->map.eval_string) {
5207 if (ptr->tail.next) {
5208 ptr = ptr->tail.next;
5209 /* Set ptr to the next real item (skip head) */
5217 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5219 union trace_eval_map_item *ptr = v;
5222 * Paranoid! If ptr points to end, we don't want to increment past it.
5223 * This really should never happen.
5225 ptr = update_eval_map(ptr);
5226 if (WARN_ON_ONCE(!ptr))
5233 ptr = update_eval_map(ptr);
5238 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5240 union trace_eval_map_item *v;
5243 mutex_lock(&trace_eval_mutex);
5245 v = trace_eval_maps;
5249 while (v && l < *pos) {
5250 v = eval_map_next(m, v, &l);
5256 static void eval_map_stop(struct seq_file *m, void *v)
5258 mutex_unlock(&trace_eval_mutex);
5261 static int eval_map_show(struct seq_file *m, void *v)
5263 union trace_eval_map_item *ptr = v;
5265 seq_printf(m, "%s %ld (%s)\n",
5266 ptr->map.eval_string, ptr->map.eval_value,
5272 static const struct seq_operations tracing_eval_map_seq_ops = {
5273 .start = eval_map_start,
5274 .next = eval_map_next,
5275 .stop = eval_map_stop,
5276 .show = eval_map_show,
5279 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5281 if (tracing_disabled)
5284 return seq_open(filp, &tracing_eval_map_seq_ops);
5287 static const struct file_operations tracing_eval_map_fops = {
5288 .open = tracing_eval_map_open,
5290 .llseek = seq_lseek,
5291 .release = seq_release,
5294 static inline union trace_eval_map_item *
5295 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5297 /* Return tail of array given the head */
5298 return ptr + ptr->head.length + 1;
5302 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5305 struct trace_eval_map **stop;
5306 struct trace_eval_map **map;
5307 union trace_eval_map_item *map_array;
5308 union trace_eval_map_item *ptr;
5313 * The trace_eval_maps contains the map plus a head and tail item,
5314 * where the head holds the module and length of array, and the
5315 * tail holds a pointer to the next list.
5317 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5319 pr_warn("Unable to allocate trace eval mapping\n");
5323 mutex_lock(&trace_eval_mutex);
5325 if (!trace_eval_maps)
5326 trace_eval_maps = map_array;
5328 ptr = trace_eval_maps;
5330 ptr = trace_eval_jmp_to_tail(ptr);
5331 if (!ptr->tail.next)
5333 ptr = ptr->tail.next;
5336 ptr->tail.next = map_array;
5338 map_array->head.mod = mod;
5339 map_array->head.length = len;
5342 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5343 map_array->map = **map;
5346 memset(map_array, 0, sizeof(*map_array));
5348 mutex_unlock(&trace_eval_mutex);
5351 static void trace_create_eval_file(struct dentry *d_tracer)
5353 trace_create_file("eval_map", 0444, d_tracer,
5354 NULL, &tracing_eval_map_fops);
5357 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5358 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5359 static inline void trace_insert_eval_map_file(struct module *mod,
5360 struct trace_eval_map **start, int len) { }
5361 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5363 static void trace_insert_eval_map(struct module *mod,
5364 struct trace_eval_map **start, int len)
5366 struct trace_eval_map **map;
5373 trace_event_eval_update(map, len);
5375 trace_insert_eval_map_file(mod, start, len);
5379 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5380 size_t cnt, loff_t *ppos)
5382 struct trace_array *tr = filp->private_data;
5383 char buf[MAX_TRACER_SIZE+2];
5386 mutex_lock(&trace_types_lock);
5387 r = sprintf(buf, "%s\n", tr->current_trace->name);
5388 mutex_unlock(&trace_types_lock);
5390 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5393 int tracer_init(struct tracer *t, struct trace_array *tr)
5395 tracing_reset_online_cpus(&tr->trace_buffer);
5399 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
5403 for_each_tracing_cpu(cpu)
5404 per_cpu_ptr(buf->data, cpu)->entries = val;
5407 #ifdef CONFIG_TRACER_MAX_TRACE
5408 /* resize @tr's buffer to the size of @size_tr's entries */
5409 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5410 struct trace_buffer *size_buf, int cpu_id)
5414 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5415 for_each_tracing_cpu(cpu) {
5416 ret = ring_buffer_resize(trace_buf->buffer,
5417 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5420 per_cpu_ptr(trace_buf->data, cpu)->entries =
5421 per_cpu_ptr(size_buf->data, cpu)->entries;
5424 ret = ring_buffer_resize(trace_buf->buffer,
5425 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5427 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5428 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5433 #endif /* CONFIG_TRACER_MAX_TRACE */
5435 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5436 unsigned long size, int cpu)
5441 * If kernel or user changes the size of the ring buffer
5442 * we use the size that was given, and we can forget about
5443 * expanding it later.
5445 ring_buffer_expanded = true;
5447 /* May be called before buffers are initialized */
5448 if (!tr->trace_buffer.buffer)
5451 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
5455 #ifdef CONFIG_TRACER_MAX_TRACE
5456 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5457 !tr->current_trace->use_max_tr)
5460 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5462 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5463 &tr->trace_buffer, cpu);
5466 * AARGH! We are left with different
5467 * size max buffer!!!!
5468 * The max buffer is our "snapshot" buffer.
5469 * When a tracer needs a snapshot (one of the
5470 * latency tracers), it swaps the max buffer
5471 * with the saved snap shot. We succeeded to
5472 * update the size of the main buffer, but failed to
5473 * update the size of the max buffer. But when we tried
5474 * to reset the main buffer to the original size, we
5475 * failed there too. This is very unlikely to
5476 * happen, but if it does, warn and kill all
5480 tracing_disabled = 1;
5485 if (cpu == RING_BUFFER_ALL_CPUS)
5486 set_buffer_entries(&tr->max_buffer, size);
5488 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5491 #endif /* CONFIG_TRACER_MAX_TRACE */
5493 if (cpu == RING_BUFFER_ALL_CPUS)
5494 set_buffer_entries(&tr->trace_buffer, size);
5496 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
5501 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5502 unsigned long size, int cpu_id)
5506 mutex_lock(&trace_types_lock);
5508 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5509 /* make sure, this cpu is enabled in the mask */
5510 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5516 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5521 mutex_unlock(&trace_types_lock);
5528 * tracing_update_buffers - used by tracing facility to expand ring buffers
5530 * To save on memory when the tracing is never used on a system with it
5531 * configured in. The ring buffers are set to a minimum size. But once
5532 * a user starts to use the tracing facility, then they need to grow
5533 * to their default size.
5535 * This function is to be called when a tracer is about to be used.
5537 int tracing_update_buffers(void)
5541 mutex_lock(&trace_types_lock);
5542 if (!ring_buffer_expanded)
5543 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5544 RING_BUFFER_ALL_CPUS);
5545 mutex_unlock(&trace_types_lock);
5550 struct trace_option_dentry;
5553 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5556 * Used to clear out the tracer before deletion of an instance.
5557 * Must have trace_types_lock held.
5559 static void tracing_set_nop(struct trace_array *tr)
5561 if (tr->current_trace == &nop_trace)
5564 tr->current_trace->enabled--;
5566 if (tr->current_trace->reset)
5567 tr->current_trace->reset(tr);
5569 tr->current_trace = &nop_trace;
5572 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5574 /* Only enable if the directory has been created already. */
5578 create_trace_option_files(tr, t);
5581 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5584 #ifdef CONFIG_TRACER_MAX_TRACE
5589 mutex_lock(&trace_types_lock);
5591 if (!ring_buffer_expanded) {
5592 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5593 RING_BUFFER_ALL_CPUS);
5599 for (t = trace_types; t; t = t->next) {
5600 if (strcmp(t->name, buf) == 0)
5607 if (t == tr->current_trace)
5610 #ifdef CONFIG_TRACER_SNAPSHOT
5611 if (t->use_max_tr) {
5612 arch_spin_lock(&tr->max_lock);
5613 if (tr->cond_snapshot)
5615 arch_spin_unlock(&tr->max_lock);
5620 /* Some tracers won't work on kernel command line */
5621 if (system_state < SYSTEM_RUNNING && t->noboot) {
5622 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5627 /* Some tracers are only allowed for the top level buffer */
5628 if (!trace_ok_for_array(t, tr)) {
5633 /* If trace pipe files are being read, we can't change the tracer */
5634 if (tr->current_trace->ref) {
5639 trace_branch_disable();
5641 tr->current_trace->enabled--;
5643 if (tr->current_trace->reset)
5644 tr->current_trace->reset(tr);
5646 /* Current trace needs to be nop_trace before synchronize_rcu */
5647 tr->current_trace = &nop_trace;
5649 #ifdef CONFIG_TRACER_MAX_TRACE
5650 had_max_tr = tr->allocated_snapshot;
5652 if (had_max_tr && !t->use_max_tr) {
5654 * We need to make sure that the update_max_tr sees that
5655 * current_trace changed to nop_trace to keep it from
5656 * swapping the buffers after we resize it.
5657 * The update_max_tr is called from interrupts disabled
5658 * so a synchronized_sched() is sufficient.
5665 #ifdef CONFIG_TRACER_MAX_TRACE
5666 if (t->use_max_tr && !had_max_tr) {
5667 ret = tracing_alloc_snapshot_instance(tr);
5674 ret = tracer_init(t, tr);
5679 tr->current_trace = t;
5680 tr->current_trace->enabled++;
5681 trace_branch_enable(tr);
5683 mutex_unlock(&trace_types_lock);
5689 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5690 size_t cnt, loff_t *ppos)
5692 struct trace_array *tr = filp->private_data;
5693 char buf[MAX_TRACER_SIZE+1];
5700 if (cnt > MAX_TRACER_SIZE)
5701 cnt = MAX_TRACER_SIZE;
5703 if (copy_from_user(buf, ubuf, cnt))
5708 /* strip ending whitespace. */
5709 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5712 err = tracing_set_tracer(tr, buf);
5722 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5723 size_t cnt, loff_t *ppos)
5728 r = snprintf(buf, sizeof(buf), "%ld\n",
5729 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
5730 if (r > sizeof(buf))
5732 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5736 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5737 size_t cnt, loff_t *ppos)
5742 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5752 tracing_thresh_read(struct file *filp, char __user *ubuf,
5753 size_t cnt, loff_t *ppos)
5755 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5759 tracing_thresh_write(struct file *filp, const char __user *ubuf,
5760 size_t cnt, loff_t *ppos)
5762 struct trace_array *tr = filp->private_data;
5765 mutex_lock(&trace_types_lock);
5766 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5770 if (tr->current_trace->update_thresh) {
5771 ret = tr->current_trace->update_thresh(tr);
5778 mutex_unlock(&trace_types_lock);
5783 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5786 tracing_max_lat_read(struct file *filp, char __user *ubuf,
5787 size_t cnt, loff_t *ppos)
5789 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5793 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5794 size_t cnt, loff_t *ppos)
5796 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5801 static int tracing_open_pipe(struct inode *inode, struct file *filp)
5803 struct trace_array *tr = inode->i_private;
5804 struct trace_iterator *iter;
5807 if (tracing_disabled)
5810 if (trace_array_get(tr) < 0)
5813 mutex_lock(&trace_types_lock);
5815 /* create a buffer to store the information to pass to userspace */
5816 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5819 __trace_array_put(tr);
5823 trace_seq_init(&iter->seq);
5824 iter->trace = tr->current_trace;
5826 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5831 /* trace pipe does not show start of buffer */
5832 cpumask_setall(iter->started);
5834 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5835 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5837 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5838 if (trace_clocks[tr->clock_id].in_ns)
5839 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5842 iter->trace_buffer = &tr->trace_buffer;
5843 iter->cpu_file = tracing_get_cpu(inode);
5844 mutex_init(&iter->mutex);
5845 filp->private_data = iter;
5847 if (iter->trace->pipe_open)
5848 iter->trace->pipe_open(iter);
5850 nonseekable_open(inode, filp);
5852 tr->current_trace->ref++;
5854 mutex_unlock(&trace_types_lock);
5859 __trace_array_put(tr);
5860 mutex_unlock(&trace_types_lock);
5864 static int tracing_release_pipe(struct inode *inode, struct file *file)
5866 struct trace_iterator *iter = file->private_data;
5867 struct trace_array *tr = inode->i_private;
5869 mutex_lock(&trace_types_lock);
5871 tr->current_trace->ref--;
5873 if (iter->trace->pipe_close)
5874 iter->trace->pipe_close(iter);
5876 mutex_unlock(&trace_types_lock);
5878 free_cpumask_var(iter->started);
5879 mutex_destroy(&iter->mutex);
5882 trace_array_put(tr);
5888 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
5890 struct trace_array *tr = iter->tr;
5892 /* Iterators are static, they should be filled or empty */
5893 if (trace_buffer_iter(iter, iter->cpu_file))
5894 return EPOLLIN | EPOLLRDNORM;
5896 if (tr->trace_flags & TRACE_ITER_BLOCK)
5898 * Always select as readable when in blocking mode
5900 return EPOLLIN | EPOLLRDNORM;
5902 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
5907 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5909 struct trace_iterator *iter = filp->private_data;
5911 return trace_poll(iter, filp, poll_table);
5914 /* Must be called with iter->mutex held. */
5915 static int tracing_wait_pipe(struct file *filp)
5917 struct trace_iterator *iter = filp->private_data;
5920 while (trace_empty(iter)) {
5922 if ((filp->f_flags & O_NONBLOCK)) {
5927 * We block until we read something and tracing is disabled.
5928 * We still block if tracing is disabled, but we have never
5929 * read anything. This allows a user to cat this file, and
5930 * then enable tracing. But after we have read something,
5931 * we give an EOF when tracing is again disabled.
5933 * iter->pos will be 0 if we haven't read anything.
5935 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
5938 mutex_unlock(&iter->mutex);
5940 ret = wait_on_pipe(iter, 0);
5942 mutex_lock(&iter->mutex);
5955 tracing_read_pipe(struct file *filp, char __user *ubuf,
5956 size_t cnt, loff_t *ppos)
5958 struct trace_iterator *iter = filp->private_data;
5962 * Avoid more than one consumer on a single file descriptor
5963 * This is just a matter of traces coherency, the ring buffer itself
5966 mutex_lock(&iter->mutex);
5968 /* return any leftover data */
5969 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5973 trace_seq_init(&iter->seq);
5975 if (iter->trace->read) {
5976 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5982 sret = tracing_wait_pipe(filp);
5986 /* stop when tracing is finished */
5987 if (trace_empty(iter)) {
5992 if (cnt >= PAGE_SIZE)
5993 cnt = PAGE_SIZE - 1;
5995 /* reset all but tr, trace, and overruns */
5996 memset(&iter->seq, 0,
5997 sizeof(struct trace_iterator) -
5998 offsetof(struct trace_iterator, seq));
5999 cpumask_clear(iter->started);
6002 trace_event_read_lock();
6003 trace_access_lock(iter->cpu_file);
6004 while (trace_find_next_entry_inc(iter) != NULL) {
6005 enum print_line_t ret;
6006 int save_len = iter->seq.seq.len;
6008 ret = print_trace_line(iter);
6009 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6010 /* don't print partial lines */
6011 iter->seq.seq.len = save_len;
6014 if (ret != TRACE_TYPE_NO_CONSUME)
6015 trace_consume(iter);
6017 if (trace_seq_used(&iter->seq) >= cnt)
6021 * Setting the full flag means we reached the trace_seq buffer
6022 * size and we should leave by partial output condition above.
6023 * One of the trace_seq_* functions is not used properly.
6025 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6028 trace_access_unlock(iter->cpu_file);
6029 trace_event_read_unlock();
6031 /* Now copy what we have to the user */
6032 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6033 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6034 trace_seq_init(&iter->seq);
6037 * If there was nothing to send to user, in spite of consuming trace
6038 * entries, go back to wait for more entries.
6044 mutex_unlock(&iter->mutex);
6049 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6052 __free_page(spd->pages[idx]);
6055 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
6056 .confirm = generic_pipe_buf_confirm,
6057 .release = generic_pipe_buf_release,
6058 .steal = generic_pipe_buf_steal,
6059 .get = generic_pipe_buf_get,
6063 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6069 /* Seq buffer is page-sized, exactly what we need. */
6071 save_len = iter->seq.seq.len;
6072 ret = print_trace_line(iter);
6074 if (trace_seq_has_overflowed(&iter->seq)) {
6075 iter->seq.seq.len = save_len;
6080 * This should not be hit, because it should only
6081 * be set if the iter->seq overflowed. But check it
6082 * anyway to be safe.
6084 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6085 iter->seq.seq.len = save_len;
6089 count = trace_seq_used(&iter->seq) - save_len;
6092 iter->seq.seq.len = save_len;
6096 if (ret != TRACE_TYPE_NO_CONSUME)
6097 trace_consume(iter);
6099 if (!trace_find_next_entry_inc(iter)) {
6109 static ssize_t tracing_splice_read_pipe(struct file *filp,
6111 struct pipe_inode_info *pipe,
6115 struct page *pages_def[PIPE_DEF_BUFFERS];
6116 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6117 struct trace_iterator *iter = filp->private_data;
6118 struct splice_pipe_desc spd = {
6120 .partial = partial_def,
6121 .nr_pages = 0, /* This gets updated below. */
6122 .nr_pages_max = PIPE_DEF_BUFFERS,
6123 .ops = &tracing_pipe_buf_ops,
6124 .spd_release = tracing_spd_release_pipe,
6130 if (splice_grow_spd(pipe, &spd))
6133 mutex_lock(&iter->mutex);
6135 if (iter->trace->splice_read) {
6136 ret = iter->trace->splice_read(iter, filp,
6137 ppos, pipe, len, flags);
6142 ret = tracing_wait_pipe(filp);
6146 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6151 trace_event_read_lock();
6152 trace_access_lock(iter->cpu_file);
6154 /* Fill as many pages as possible. */
6155 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6156 spd.pages[i] = alloc_page(GFP_KERNEL);
6160 rem = tracing_fill_pipe_page(rem, iter);
6162 /* Copy the data into the page, so we can start over. */
6163 ret = trace_seq_to_buffer(&iter->seq,
6164 page_address(spd.pages[i]),
6165 trace_seq_used(&iter->seq));
6167 __free_page(spd.pages[i]);
6170 spd.partial[i].offset = 0;
6171 spd.partial[i].len = trace_seq_used(&iter->seq);
6173 trace_seq_init(&iter->seq);
6176 trace_access_unlock(iter->cpu_file);
6177 trace_event_read_unlock();
6178 mutex_unlock(&iter->mutex);
6183 ret = splice_to_pipe(pipe, &spd);
6187 splice_shrink_spd(&spd);
6191 mutex_unlock(&iter->mutex);
6196 tracing_entries_read(struct file *filp, char __user *ubuf,
6197 size_t cnt, loff_t *ppos)
6199 struct inode *inode = file_inode(filp);
6200 struct trace_array *tr = inode->i_private;
6201 int cpu = tracing_get_cpu(inode);
6206 mutex_lock(&trace_types_lock);
6208 if (cpu == RING_BUFFER_ALL_CPUS) {
6209 int cpu, buf_size_same;
6214 /* check if all cpu sizes are same */
6215 for_each_tracing_cpu(cpu) {
6216 /* fill in the size from first enabled cpu */
6218 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
6219 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
6225 if (buf_size_same) {
6226 if (!ring_buffer_expanded)
6227 r = sprintf(buf, "%lu (expanded: %lu)\n",
6229 trace_buf_size >> 10);
6231 r = sprintf(buf, "%lu\n", size >> 10);
6233 r = sprintf(buf, "X\n");
6235 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
6237 mutex_unlock(&trace_types_lock);
6239 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6244 tracing_entries_write(struct file *filp, const char __user *ubuf,
6245 size_t cnt, loff_t *ppos)
6247 struct inode *inode = file_inode(filp);
6248 struct trace_array *tr = inode->i_private;
6252 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6256 /* must have at least 1 entry */
6260 /* value is in KB */
6262 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6272 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6273 size_t cnt, loff_t *ppos)
6275 struct trace_array *tr = filp->private_data;
6278 unsigned long size = 0, expanded_size = 0;
6280 mutex_lock(&trace_types_lock);
6281 for_each_tracing_cpu(cpu) {
6282 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
6283 if (!ring_buffer_expanded)
6284 expanded_size += trace_buf_size >> 10;
6286 if (ring_buffer_expanded)
6287 r = sprintf(buf, "%lu\n", size);
6289 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6290 mutex_unlock(&trace_types_lock);
6292 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6296 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6297 size_t cnt, loff_t *ppos)
6300 * There is no need to read what the user has written, this function
6301 * is just to make sure that there is no error when "echo" is used
6310 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6312 struct trace_array *tr = inode->i_private;
6314 /* disable tracing ? */
6315 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6316 tracer_tracing_off(tr);
6317 /* resize the ring buffer to 0 */
6318 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6320 trace_array_put(tr);
6326 tracing_mark_write(struct file *filp, const char __user *ubuf,
6327 size_t cnt, loff_t *fpos)
6329 struct trace_array *tr = filp->private_data;
6330 struct ring_buffer_event *event;
6331 enum event_trigger_type tt = ETT_NONE;
6332 struct ring_buffer *buffer;
6333 struct print_entry *entry;
6334 unsigned long irq_flags;
6339 /* Used in tracing_mark_raw_write() as well */
6340 #define FAULTED_STR "<faulted>"
6341 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6343 if (tracing_disabled)
6346 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6349 if (cnt > TRACE_BUF_SIZE)
6350 cnt = TRACE_BUF_SIZE;
6352 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6354 local_save_flags(irq_flags);
6355 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6357 /* If less than "<faulted>", then make sure we can still add that */
6358 if (cnt < FAULTED_SIZE)
6359 size += FAULTED_SIZE - cnt;
6361 buffer = tr->trace_buffer.buffer;
6362 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6363 irq_flags, preempt_count());
6364 if (unlikely(!event))
6365 /* Ring buffer disabled, return as if not open for write */
6368 entry = ring_buffer_event_data(event);
6369 entry->ip = _THIS_IP_;
6371 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6373 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6380 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6381 /* do not add \n before testing triggers, but add \0 */
6382 entry->buf[cnt] = '\0';
6383 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6386 if (entry->buf[cnt - 1] != '\n') {
6387 entry->buf[cnt] = '\n';
6388 entry->buf[cnt + 1] = '\0';
6390 entry->buf[cnt] = '\0';
6392 __buffer_unlock_commit(buffer, event);
6395 event_triggers_post_call(tr->trace_marker_file, tt);
6403 /* Limit it for now to 3K (including tag) */
6404 #define RAW_DATA_MAX_SIZE (1024*3)
6407 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6408 size_t cnt, loff_t *fpos)
6410 struct trace_array *tr = filp->private_data;
6411 struct ring_buffer_event *event;
6412 struct ring_buffer *buffer;
6413 struct raw_data_entry *entry;
6414 unsigned long irq_flags;
6419 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6421 if (tracing_disabled)
6424 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6427 /* The marker must at least have a tag id */
6428 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6431 if (cnt > TRACE_BUF_SIZE)
6432 cnt = TRACE_BUF_SIZE;
6434 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6436 local_save_flags(irq_flags);
6437 size = sizeof(*entry) + cnt;
6438 if (cnt < FAULT_SIZE_ID)
6439 size += FAULT_SIZE_ID - cnt;
6441 buffer = tr->trace_buffer.buffer;
6442 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6443 irq_flags, preempt_count());
6445 /* Ring buffer disabled, return as if not open for write */
6448 entry = ring_buffer_event_data(event);
6450 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6453 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6458 __buffer_unlock_commit(buffer, event);
6466 static int tracing_clock_show(struct seq_file *m, void *v)
6468 struct trace_array *tr = m->private;
6471 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6473 "%s%s%s%s", i ? " " : "",
6474 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6475 i == tr->clock_id ? "]" : "");
6481 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6485 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6486 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6489 if (i == ARRAY_SIZE(trace_clocks))
6492 mutex_lock(&trace_types_lock);
6496 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
6499 * New clock may not be consistent with the previous clock.
6500 * Reset the buffer so that it doesn't have incomparable timestamps.
6502 tracing_reset_online_cpus(&tr->trace_buffer);
6504 #ifdef CONFIG_TRACER_MAX_TRACE
6505 if (tr->max_buffer.buffer)
6506 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6507 tracing_reset_online_cpus(&tr->max_buffer);
6510 mutex_unlock(&trace_types_lock);
6515 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6516 size_t cnt, loff_t *fpos)
6518 struct seq_file *m = filp->private_data;
6519 struct trace_array *tr = m->private;
6521 const char *clockstr;
6524 if (cnt >= sizeof(buf))
6527 if (copy_from_user(buf, ubuf, cnt))
6532 clockstr = strstrip(buf);
6534 ret = tracing_set_clock(tr, clockstr);
6543 static int tracing_clock_open(struct inode *inode, struct file *file)
6545 struct trace_array *tr = inode->i_private;
6548 if (tracing_disabled)
6551 if (trace_array_get(tr))
6554 ret = single_open(file, tracing_clock_show, inode->i_private);
6556 trace_array_put(tr);
6561 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6563 struct trace_array *tr = m->private;
6565 mutex_lock(&trace_types_lock);
6567 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6568 seq_puts(m, "delta [absolute]\n");
6570 seq_puts(m, "[delta] absolute\n");
6572 mutex_unlock(&trace_types_lock);
6577 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6579 struct trace_array *tr = inode->i_private;
6582 if (tracing_disabled)
6585 if (trace_array_get(tr))
6588 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6590 trace_array_put(tr);
6595 int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6599 mutex_lock(&trace_types_lock);
6601 if (abs && tr->time_stamp_abs_ref++)
6605 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6610 if (--tr->time_stamp_abs_ref)
6614 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6616 #ifdef CONFIG_TRACER_MAX_TRACE
6617 if (tr->max_buffer.buffer)
6618 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6621 mutex_unlock(&trace_types_lock);
6626 struct ftrace_buffer_info {
6627 struct trace_iterator iter;
6629 unsigned int spare_cpu;
6633 #ifdef CONFIG_TRACER_SNAPSHOT
6634 static int tracing_snapshot_open(struct inode *inode, struct file *file)
6636 struct trace_array *tr = inode->i_private;
6637 struct trace_iterator *iter;
6641 if (trace_array_get(tr) < 0)
6644 if (file->f_mode & FMODE_READ) {
6645 iter = __tracing_open(inode, file, true);
6647 ret = PTR_ERR(iter);
6649 /* Writes still need the seq_file to hold the private data */
6651 m = kzalloc(sizeof(*m), GFP_KERNEL);
6654 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6662 iter->trace_buffer = &tr->max_buffer;
6663 iter->cpu_file = tracing_get_cpu(inode);
6665 file->private_data = m;
6669 trace_array_put(tr);
6675 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6678 struct seq_file *m = filp->private_data;
6679 struct trace_iterator *iter = m->private;
6680 struct trace_array *tr = iter->tr;
6684 ret = tracing_update_buffers();
6688 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6692 mutex_lock(&trace_types_lock);
6694 if (tr->current_trace->use_max_tr) {
6699 arch_spin_lock(&tr->max_lock);
6700 if (tr->cond_snapshot)
6702 arch_spin_unlock(&tr->max_lock);
6708 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6712 if (tr->allocated_snapshot)
6716 /* Only allow per-cpu swap if the ring buffer supports it */
6717 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6718 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6723 if (!tr->allocated_snapshot) {
6724 ret = tracing_alloc_snapshot_instance(tr);
6728 local_irq_disable();
6729 /* Now, we're going to swap */
6730 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6731 update_max_tr(tr, current, smp_processor_id(), NULL);
6733 update_max_tr_single(tr, current, iter->cpu_file);
6737 if (tr->allocated_snapshot) {
6738 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6739 tracing_reset_online_cpus(&tr->max_buffer);
6741 tracing_reset(&tr->max_buffer, iter->cpu_file);
6751 mutex_unlock(&trace_types_lock);
6755 static int tracing_snapshot_release(struct inode *inode, struct file *file)
6757 struct seq_file *m = file->private_data;
6760 ret = tracing_release(inode, file);
6762 if (file->f_mode & FMODE_READ)
6765 /* If write only, the seq_file is just a stub */
6773 static int tracing_buffers_open(struct inode *inode, struct file *filp);
6774 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6775 size_t count, loff_t *ppos);
6776 static int tracing_buffers_release(struct inode *inode, struct file *file);
6777 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6778 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6780 static int snapshot_raw_open(struct inode *inode, struct file *filp)
6782 struct ftrace_buffer_info *info;
6785 ret = tracing_buffers_open(inode, filp);
6789 info = filp->private_data;
6791 if (info->iter.trace->use_max_tr) {
6792 tracing_buffers_release(inode, filp);
6796 info->iter.snapshot = true;
6797 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6802 #endif /* CONFIG_TRACER_SNAPSHOT */
6805 static const struct file_operations tracing_thresh_fops = {
6806 .open = tracing_open_generic,
6807 .read = tracing_thresh_read,
6808 .write = tracing_thresh_write,
6809 .llseek = generic_file_llseek,
6812 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6813 static const struct file_operations tracing_max_lat_fops = {
6814 .open = tracing_open_generic,
6815 .read = tracing_max_lat_read,
6816 .write = tracing_max_lat_write,
6817 .llseek = generic_file_llseek,
6821 static const struct file_operations set_tracer_fops = {
6822 .open = tracing_open_generic,
6823 .read = tracing_set_trace_read,
6824 .write = tracing_set_trace_write,
6825 .llseek = generic_file_llseek,
6828 static const struct file_operations tracing_pipe_fops = {
6829 .open = tracing_open_pipe,
6830 .poll = tracing_poll_pipe,
6831 .read = tracing_read_pipe,
6832 .splice_read = tracing_splice_read_pipe,
6833 .release = tracing_release_pipe,
6834 .llseek = no_llseek,
6837 static const struct file_operations tracing_entries_fops = {
6838 .open = tracing_open_generic_tr,
6839 .read = tracing_entries_read,
6840 .write = tracing_entries_write,
6841 .llseek = generic_file_llseek,
6842 .release = tracing_release_generic_tr,
6845 static const struct file_operations tracing_total_entries_fops = {
6846 .open = tracing_open_generic_tr,
6847 .read = tracing_total_entries_read,
6848 .llseek = generic_file_llseek,
6849 .release = tracing_release_generic_tr,
6852 static const struct file_operations tracing_free_buffer_fops = {
6853 .open = tracing_open_generic_tr,
6854 .write = tracing_free_buffer_write,
6855 .release = tracing_free_buffer_release,
6858 static const struct file_operations tracing_mark_fops = {
6859 .open = tracing_open_generic_tr,
6860 .write = tracing_mark_write,
6861 .llseek = generic_file_llseek,
6862 .release = tracing_release_generic_tr,
6865 static const struct file_operations tracing_mark_raw_fops = {
6866 .open = tracing_open_generic_tr,
6867 .write = tracing_mark_raw_write,
6868 .llseek = generic_file_llseek,
6869 .release = tracing_release_generic_tr,
6872 static const struct file_operations trace_clock_fops = {
6873 .open = tracing_clock_open,
6875 .llseek = seq_lseek,
6876 .release = tracing_single_release_tr,
6877 .write = tracing_clock_write,
6880 static const struct file_operations trace_time_stamp_mode_fops = {
6881 .open = tracing_time_stamp_mode_open,
6883 .llseek = seq_lseek,
6884 .release = tracing_single_release_tr,
6887 #ifdef CONFIG_TRACER_SNAPSHOT
6888 static const struct file_operations snapshot_fops = {
6889 .open = tracing_snapshot_open,
6891 .write = tracing_snapshot_write,
6892 .llseek = tracing_lseek,
6893 .release = tracing_snapshot_release,
6896 static const struct file_operations snapshot_raw_fops = {
6897 .open = snapshot_raw_open,
6898 .read = tracing_buffers_read,
6899 .release = tracing_buffers_release,
6900 .splice_read = tracing_buffers_splice_read,
6901 .llseek = no_llseek,
6904 #endif /* CONFIG_TRACER_SNAPSHOT */
6906 #define TRACING_LOG_ERRS_MAX 8
6907 #define TRACING_LOG_LOC_MAX 128
6909 #define CMD_PREFIX " Command: "
6912 const char **errs; /* ptr to loc-specific array of err strings */
6913 u8 type; /* index into errs -> specific err string */
6914 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
6918 struct tracing_log_err {
6919 struct list_head list;
6920 struct err_info info;
6921 char loc[TRACING_LOG_LOC_MAX]; /* err location */
6922 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
6925 static DEFINE_MUTEX(tracing_err_log_lock);
6927 struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
6929 struct tracing_log_err *err;
6931 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
6932 err = kzalloc(sizeof(*err), GFP_KERNEL);
6934 err = ERR_PTR(-ENOMEM);
6935 tr->n_err_log_entries++;
6940 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
6941 list_del(&err->list);
6947 * err_pos - find the position of a string within a command for error careting
6948 * @cmd: The tracing command that caused the error
6949 * @str: The string to position the caret at within @cmd
6951 * Finds the position of the first occurence of @str within @cmd. The
6952 * return value can be passed to tracing_log_err() for caret placement
6955 * Returns the index within @cmd of the first occurence of @str or 0
6956 * if @str was not found.
6958 unsigned int err_pos(char *cmd, const char *str)
6962 if (WARN_ON(!strlen(cmd)))
6965 found = strstr(cmd, str);
6973 * tracing_log_err - write an error to the tracing error log
6974 * @tr: The associated trace array for the error (NULL for top level array)
6975 * @loc: A string describing where the error occurred
6976 * @cmd: The tracing command that caused the error
6977 * @errs: The array of loc-specific static error strings
6978 * @type: The index into errs[], which produces the specific static err string
6979 * @pos: The position the caret should be placed in the cmd
6981 * Writes an error into tracing/error_log of the form:
6983 * <loc>: error: <text>
6987 * tracing/error_log is a small log file containing the last
6988 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
6989 * unless there has been a tracing error, and the error log can be
6990 * cleared and have its memory freed by writing the empty string in
6991 * truncation mode to it i.e. echo > tracing/error_log.
6993 * NOTE: the @errs array along with the @type param are used to
6994 * produce a static error string - this string is not copied and saved
6995 * when the error is logged - only a pointer to it is saved. See
6996 * existing callers for examples of how static strings are typically
6997 * defined for use with tracing_log_err().
6999 void tracing_log_err(struct trace_array *tr,
7000 const char *loc, const char *cmd,
7001 const char **errs, u8 type, u8 pos)
7003 struct tracing_log_err *err;
7008 mutex_lock(&tracing_err_log_lock);
7009 err = get_tracing_log_err(tr);
7010 if (PTR_ERR(err) == -ENOMEM) {
7011 mutex_unlock(&tracing_err_log_lock);
7015 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7016 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7018 err->info.errs = errs;
7019 err->info.type = type;
7020 err->info.pos = pos;
7021 err->info.ts = local_clock();
7023 list_add_tail(&err->list, &tr->err_log);
7024 mutex_unlock(&tracing_err_log_lock);
7027 static void clear_tracing_err_log(struct trace_array *tr)
7029 struct tracing_log_err *err, *next;
7031 mutex_lock(&tracing_err_log_lock);
7032 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7033 list_del(&err->list);
7037 tr->n_err_log_entries = 0;
7038 mutex_unlock(&tracing_err_log_lock);
7041 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7043 struct trace_array *tr = m->private;
7045 mutex_lock(&tracing_err_log_lock);
7047 return seq_list_start(&tr->err_log, *pos);
7050 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7052 struct trace_array *tr = m->private;
7054 return seq_list_next(v, &tr->err_log, pos);
7057 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7059 mutex_unlock(&tracing_err_log_lock);
7062 static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7066 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7068 for (i = 0; i < pos; i++)
7073 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7075 struct tracing_log_err *err = v;
7078 const char *err_text = err->info.errs[err->info.type];
7079 u64 sec = err->info.ts;
7082 nsec = do_div(sec, NSEC_PER_SEC);
7083 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7084 err->loc, err_text);
7085 seq_printf(m, "%s", err->cmd);
7086 tracing_err_log_show_pos(m, err->info.pos);
7092 static const struct seq_operations tracing_err_log_seq_ops = {
7093 .start = tracing_err_log_seq_start,
7094 .next = tracing_err_log_seq_next,
7095 .stop = tracing_err_log_seq_stop,
7096 .show = tracing_err_log_seq_show
7099 static int tracing_err_log_open(struct inode *inode, struct file *file)
7101 struct trace_array *tr = inode->i_private;
7104 if (trace_array_get(tr) < 0)
7107 /* If this file was opened for write, then erase contents */
7108 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7109 clear_tracing_err_log(tr);
7111 if (file->f_mode & FMODE_READ) {
7112 ret = seq_open(file, &tracing_err_log_seq_ops);
7114 struct seq_file *m = file->private_data;
7117 trace_array_put(tr);
7123 static ssize_t tracing_err_log_write(struct file *file,
7124 const char __user *buffer,
7125 size_t count, loff_t *ppos)
7130 static const struct file_operations tracing_err_log_fops = {
7131 .open = tracing_err_log_open,
7132 .write = tracing_err_log_write,
7134 .llseek = seq_lseek,
7135 .release = tracing_release_generic_tr,
7138 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7140 struct trace_array *tr = inode->i_private;
7141 struct ftrace_buffer_info *info;
7144 if (tracing_disabled)
7147 if (trace_array_get(tr) < 0)
7150 info = kzalloc(sizeof(*info), GFP_KERNEL);
7152 trace_array_put(tr);
7156 mutex_lock(&trace_types_lock);
7159 info->iter.cpu_file = tracing_get_cpu(inode);
7160 info->iter.trace = tr->current_trace;
7161 info->iter.trace_buffer = &tr->trace_buffer;
7163 /* Force reading ring buffer for first read */
7164 info->read = (unsigned int)-1;
7166 filp->private_data = info;
7168 tr->current_trace->ref++;
7170 mutex_unlock(&trace_types_lock);
7172 ret = nonseekable_open(inode, filp);
7174 trace_array_put(tr);
7180 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7182 struct ftrace_buffer_info *info = filp->private_data;
7183 struct trace_iterator *iter = &info->iter;
7185 return trace_poll(iter, filp, poll_table);
7189 tracing_buffers_read(struct file *filp, char __user *ubuf,
7190 size_t count, loff_t *ppos)
7192 struct ftrace_buffer_info *info = filp->private_data;
7193 struct trace_iterator *iter = &info->iter;
7200 #ifdef CONFIG_TRACER_MAX_TRACE
7201 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7206 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
7208 if (IS_ERR(info->spare)) {
7209 ret = PTR_ERR(info->spare);
7212 info->spare_cpu = iter->cpu_file;
7218 /* Do we have previous read data to read? */
7219 if (info->read < PAGE_SIZE)
7223 trace_access_lock(iter->cpu_file);
7224 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
7228 trace_access_unlock(iter->cpu_file);
7231 if (trace_empty(iter)) {
7232 if ((filp->f_flags & O_NONBLOCK))
7235 ret = wait_on_pipe(iter, 0);
7246 size = PAGE_SIZE - info->read;
7250 ret = copy_to_user(ubuf, info->spare + info->read, size);
7262 static int tracing_buffers_release(struct inode *inode, struct file *file)
7264 struct ftrace_buffer_info *info = file->private_data;
7265 struct trace_iterator *iter = &info->iter;
7267 mutex_lock(&trace_types_lock);
7269 iter->tr->current_trace->ref--;
7271 __trace_array_put(iter->tr);
7274 ring_buffer_free_read_page(iter->trace_buffer->buffer,
7275 info->spare_cpu, info->spare);
7278 mutex_unlock(&trace_types_lock);
7284 struct ring_buffer *buffer;
7287 refcount_t refcount;
7290 static void buffer_ref_release(struct buffer_ref *ref)
7292 if (!refcount_dec_and_test(&ref->refcount))
7294 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7298 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7299 struct pipe_buffer *buf)
7301 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7303 buffer_ref_release(ref);
7307 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7308 struct pipe_buffer *buf)
7310 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7312 if (refcount_read(&ref->refcount) > INT_MAX/2)
7315 refcount_inc(&ref->refcount);
7319 /* Pipe buffer operations for a buffer. */
7320 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
7321 .confirm = generic_pipe_buf_confirm,
7322 .release = buffer_pipe_buf_release,
7323 .steal = generic_pipe_buf_nosteal,
7324 .get = buffer_pipe_buf_get,
7328 * Callback from splice_to_pipe(), if we need to release some pages
7329 * at the end of the spd in case we error'ed out in filling the pipe.
7331 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7333 struct buffer_ref *ref =
7334 (struct buffer_ref *)spd->partial[i].private;
7336 buffer_ref_release(ref);
7337 spd->partial[i].private = 0;
7341 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7342 struct pipe_inode_info *pipe, size_t len,
7345 struct ftrace_buffer_info *info = file->private_data;
7346 struct trace_iterator *iter = &info->iter;
7347 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7348 struct page *pages_def[PIPE_DEF_BUFFERS];
7349 struct splice_pipe_desc spd = {
7351 .partial = partial_def,
7352 .nr_pages_max = PIPE_DEF_BUFFERS,
7353 .ops = &buffer_pipe_buf_ops,
7354 .spd_release = buffer_spd_release,
7356 struct buffer_ref *ref;
7360 #ifdef CONFIG_TRACER_MAX_TRACE
7361 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7365 if (*ppos & (PAGE_SIZE - 1))
7368 if (len & (PAGE_SIZE - 1)) {
7369 if (len < PAGE_SIZE)
7374 if (splice_grow_spd(pipe, &spd))
7378 trace_access_lock(iter->cpu_file);
7379 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
7381 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
7385 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
7391 refcount_set(&ref->refcount, 1);
7392 ref->buffer = iter->trace_buffer->buffer;
7393 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
7394 if (IS_ERR(ref->page)) {
7395 ret = PTR_ERR(ref->page);
7400 ref->cpu = iter->cpu_file;
7402 r = ring_buffer_read_page(ref->buffer, &ref->page,
7403 len, iter->cpu_file, 1);
7405 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7411 page = virt_to_page(ref->page);
7413 spd.pages[i] = page;
7414 spd.partial[i].len = PAGE_SIZE;
7415 spd.partial[i].offset = 0;
7416 spd.partial[i].private = (unsigned long)ref;
7420 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
7423 trace_access_unlock(iter->cpu_file);
7426 /* did we read anything? */
7427 if (!spd.nr_pages) {
7432 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
7435 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
7442 ret = splice_to_pipe(pipe, &spd);
7444 splice_shrink_spd(&spd);
7449 static const struct file_operations tracing_buffers_fops = {
7450 .open = tracing_buffers_open,
7451 .read = tracing_buffers_read,
7452 .poll = tracing_buffers_poll,
7453 .release = tracing_buffers_release,
7454 .splice_read = tracing_buffers_splice_read,
7455 .llseek = no_llseek,
7459 tracing_stats_read(struct file *filp, char __user *ubuf,
7460 size_t count, loff_t *ppos)
7462 struct inode *inode = file_inode(filp);
7463 struct trace_array *tr = inode->i_private;
7464 struct trace_buffer *trace_buf = &tr->trace_buffer;
7465 int cpu = tracing_get_cpu(inode);
7466 struct trace_seq *s;
7468 unsigned long long t;
7469 unsigned long usec_rem;
7471 s = kmalloc(sizeof(*s), GFP_KERNEL);
7477 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
7478 trace_seq_printf(s, "entries: %ld\n", cnt);
7480 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
7481 trace_seq_printf(s, "overrun: %ld\n", cnt);
7483 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
7484 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7486 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
7487 trace_seq_printf(s, "bytes: %ld\n", cnt);
7489 if (trace_clocks[tr->clock_id].in_ns) {
7490 /* local or global for trace_clock */
7491 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7492 usec_rem = do_div(t, USEC_PER_SEC);
7493 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7496 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
7497 usec_rem = do_div(t, USEC_PER_SEC);
7498 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7500 /* counter or tsc mode for trace_clock */
7501 trace_seq_printf(s, "oldest event ts: %llu\n",
7502 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7504 trace_seq_printf(s, "now ts: %llu\n",
7505 ring_buffer_time_stamp(trace_buf->buffer, cpu));
7508 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
7509 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7511 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
7512 trace_seq_printf(s, "read events: %ld\n", cnt);
7514 count = simple_read_from_buffer(ubuf, count, ppos,
7515 s->buffer, trace_seq_used(s));
7522 static const struct file_operations tracing_stats_fops = {
7523 .open = tracing_open_generic_tr,
7524 .read = tracing_stats_read,
7525 .llseek = generic_file_llseek,
7526 .release = tracing_release_generic_tr,
7529 #ifdef CONFIG_DYNAMIC_FTRACE
7532 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
7533 size_t cnt, loff_t *ppos)
7535 unsigned long *p = filp->private_data;
7536 char buf[64]; /* Not too big for a shallow stack */
7539 r = scnprintf(buf, 63, "%ld", *p);
7542 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7545 static const struct file_operations tracing_dyn_info_fops = {
7546 .open = tracing_open_generic,
7547 .read = tracing_read_dyn_info,
7548 .llseek = generic_file_llseek,
7550 #endif /* CONFIG_DYNAMIC_FTRACE */
7552 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7554 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
7555 struct trace_array *tr, struct ftrace_probe_ops *ops,
7558 tracing_snapshot_instance(tr);
7562 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
7563 struct trace_array *tr, struct ftrace_probe_ops *ops,
7566 struct ftrace_func_mapper *mapper = data;
7570 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7580 tracing_snapshot_instance(tr);
7584 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7585 struct ftrace_probe_ops *ops, void *data)
7587 struct ftrace_func_mapper *mapper = data;
7590 seq_printf(m, "%ps:", (void *)ip);
7592 seq_puts(m, "snapshot");
7595 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7598 seq_printf(m, ":count=%ld\n", *count);
7600 seq_puts(m, ":unlimited\n");
7606 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
7607 unsigned long ip, void *init_data, void **data)
7609 struct ftrace_func_mapper *mapper = *data;
7612 mapper = allocate_ftrace_func_mapper();
7618 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
7622 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
7623 unsigned long ip, void *data)
7625 struct ftrace_func_mapper *mapper = data;
7630 free_ftrace_func_mapper(mapper, NULL);
7634 ftrace_func_mapper_remove_ip(mapper, ip);
7637 static struct ftrace_probe_ops snapshot_probe_ops = {
7638 .func = ftrace_snapshot,
7639 .print = ftrace_snapshot_print,
7642 static struct ftrace_probe_ops snapshot_count_probe_ops = {
7643 .func = ftrace_count_snapshot,
7644 .print = ftrace_snapshot_print,
7645 .init = ftrace_snapshot_init,
7646 .free = ftrace_snapshot_free,
7650 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
7651 char *glob, char *cmd, char *param, int enable)
7653 struct ftrace_probe_ops *ops;
7654 void *count = (void *)-1;
7661 /* hash funcs only work with set_ftrace_filter */
7665 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7668 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
7673 number = strsep(¶m, ":");
7675 if (!strlen(number))
7679 * We use the callback data field (which is a pointer)
7682 ret = kstrtoul(number, 0, (unsigned long *)&count);
7687 ret = tracing_alloc_snapshot_instance(tr);
7691 ret = register_ftrace_function_probe(glob, tr, ops, count);
7694 return ret < 0 ? ret : 0;
7697 static struct ftrace_func_command ftrace_snapshot_cmd = {
7699 .func = ftrace_trace_snapshot_callback,
7702 static __init int register_snapshot_cmd(void)
7704 return register_ftrace_command(&ftrace_snapshot_cmd);
7707 static inline __init int register_snapshot_cmd(void) { return 0; }
7708 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7710 static struct dentry *tracing_get_dentry(struct trace_array *tr)
7712 if (WARN_ON(!tr->dir))
7713 return ERR_PTR(-ENODEV);
7715 /* Top directory uses NULL as the parent */
7716 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7719 /* All sub buffers have a descriptor */
7723 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7725 struct dentry *d_tracer;
7728 return tr->percpu_dir;
7730 d_tracer = tracing_get_dentry(tr);
7731 if (IS_ERR(d_tracer))
7734 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
7736 WARN_ONCE(!tr->percpu_dir,
7737 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
7739 return tr->percpu_dir;
7742 static struct dentry *
7743 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7744 void *data, long cpu, const struct file_operations *fops)
7746 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7748 if (ret) /* See tracing_get_cpu() */
7749 d_inode(ret)->i_cdev = (void *)(cpu + 1);
7754 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
7756 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
7757 struct dentry *d_cpu;
7758 char cpu_dir[30]; /* 30 characters should be more than enough */
7763 snprintf(cpu_dir, 30, "cpu%ld", cpu);
7764 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
7766 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
7770 /* per cpu trace_pipe */
7771 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
7772 tr, cpu, &tracing_pipe_fops);
7775 trace_create_cpu_file("trace", 0644, d_cpu,
7776 tr, cpu, &tracing_fops);
7778 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
7779 tr, cpu, &tracing_buffers_fops);
7781 trace_create_cpu_file("stats", 0444, d_cpu,
7782 tr, cpu, &tracing_stats_fops);
7784 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
7785 tr, cpu, &tracing_entries_fops);
7787 #ifdef CONFIG_TRACER_SNAPSHOT
7788 trace_create_cpu_file("snapshot", 0644, d_cpu,
7789 tr, cpu, &snapshot_fops);
7791 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
7792 tr, cpu, &snapshot_raw_fops);
7796 #ifdef CONFIG_FTRACE_SELFTEST
7797 /* Let selftest have access to static functions in this file */
7798 #include "trace_selftest.c"
7802 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7805 struct trace_option_dentry *topt = filp->private_data;
7808 if (topt->flags->val & topt->opt->bit)
7813 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7817 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7820 struct trace_option_dentry *topt = filp->private_data;
7824 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7828 if (val != 0 && val != 1)
7831 if (!!(topt->flags->val & topt->opt->bit) != val) {
7832 mutex_lock(&trace_types_lock);
7833 ret = __set_tracer_option(topt->tr, topt->flags,
7835 mutex_unlock(&trace_types_lock);
7846 static const struct file_operations trace_options_fops = {
7847 .open = tracing_open_generic,
7848 .read = trace_options_read,
7849 .write = trace_options_write,
7850 .llseek = generic_file_llseek,
7854 * In order to pass in both the trace_array descriptor as well as the index
7855 * to the flag that the trace option file represents, the trace_array
7856 * has a character array of trace_flags_index[], which holds the index
7857 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7858 * The address of this character array is passed to the flag option file
7859 * read/write callbacks.
7861 * In order to extract both the index and the trace_array descriptor,
7862 * get_tr_index() uses the following algorithm.
7866 * As the pointer itself contains the address of the index (remember
7869 * Then to get the trace_array descriptor, by subtracting that index
7870 * from the ptr, we get to the start of the index itself.
7872 * ptr - idx == &index[0]
7874 * Then a simple container_of() from that pointer gets us to the
7875 * trace_array descriptor.
7877 static void get_tr_index(void *data, struct trace_array **ptr,
7878 unsigned int *pindex)
7880 *pindex = *(unsigned char *)data;
7882 *ptr = container_of(data - *pindex, struct trace_array,
7887 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7890 void *tr_index = filp->private_data;
7891 struct trace_array *tr;
7895 get_tr_index(tr_index, &tr, &index);
7897 if (tr->trace_flags & (1 << index))
7902 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7906 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7909 void *tr_index = filp->private_data;
7910 struct trace_array *tr;
7915 get_tr_index(tr_index, &tr, &index);
7917 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7921 if (val != 0 && val != 1)
7924 mutex_lock(&trace_types_lock);
7925 ret = set_tracer_flag(tr, 1 << index, val);
7926 mutex_unlock(&trace_types_lock);
7936 static const struct file_operations trace_options_core_fops = {
7937 .open = tracing_open_generic,
7938 .read = trace_options_core_read,
7939 .write = trace_options_core_write,
7940 .llseek = generic_file_llseek,
7943 struct dentry *trace_create_file(const char *name,
7945 struct dentry *parent,
7947 const struct file_operations *fops)
7951 ret = tracefs_create_file(name, mode, parent, data, fops);
7953 pr_warn("Could not create tracefs '%s' entry\n", name);
7959 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
7961 struct dentry *d_tracer;
7966 d_tracer = tracing_get_dentry(tr);
7967 if (IS_ERR(d_tracer))
7970 tr->options = tracefs_create_dir("options", d_tracer);
7972 pr_warn("Could not create tracefs directory 'options'\n");
7980 create_trace_option_file(struct trace_array *tr,
7981 struct trace_option_dentry *topt,
7982 struct tracer_flags *flags,
7983 struct tracer_opt *opt)
7985 struct dentry *t_options;
7987 t_options = trace_options_init_dentry(tr);
7991 topt->flags = flags;
7995 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
7996 &trace_options_fops);
8001 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8003 struct trace_option_dentry *topts;
8004 struct trace_options *tr_topts;
8005 struct tracer_flags *flags;
8006 struct tracer_opt *opts;
8013 flags = tracer->flags;
8015 if (!flags || !flags->opts)
8019 * If this is an instance, only create flags for tracers
8020 * the instance may have.
8022 if (!trace_ok_for_array(tracer, tr))
8025 for (i = 0; i < tr->nr_topts; i++) {
8026 /* Make sure there's no duplicate flags. */
8027 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8033 for (cnt = 0; opts[cnt].name; cnt++)
8036 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8040 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8047 tr->topts = tr_topts;
8048 tr->topts[tr->nr_topts].tracer = tracer;
8049 tr->topts[tr->nr_topts].topts = topts;
8052 for (cnt = 0; opts[cnt].name; cnt++) {
8053 create_trace_option_file(tr, &topts[cnt], flags,
8055 WARN_ONCE(topts[cnt].entry == NULL,
8056 "Failed to create trace option: %s",
8061 static struct dentry *
8062 create_trace_option_core_file(struct trace_array *tr,
8063 const char *option, long index)
8065 struct dentry *t_options;
8067 t_options = trace_options_init_dentry(tr);
8071 return trace_create_file(option, 0644, t_options,
8072 (void *)&tr->trace_flags_index[index],
8073 &trace_options_core_fops);
8076 static void create_trace_options_dir(struct trace_array *tr)
8078 struct dentry *t_options;
8079 bool top_level = tr == &global_trace;
8082 t_options = trace_options_init_dentry(tr);
8086 for (i = 0; trace_options[i]; i++) {
8088 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8089 create_trace_option_core_file(tr, trace_options[i], i);
8094 rb_simple_read(struct file *filp, char __user *ubuf,
8095 size_t cnt, loff_t *ppos)
8097 struct trace_array *tr = filp->private_data;
8101 r = tracer_tracing_is_on(tr);
8102 r = sprintf(buf, "%d\n", r);
8104 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8108 rb_simple_write(struct file *filp, const char __user *ubuf,
8109 size_t cnt, loff_t *ppos)
8111 struct trace_array *tr = filp->private_data;
8112 struct ring_buffer *buffer = tr->trace_buffer.buffer;
8116 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8121 mutex_lock(&trace_types_lock);
8122 if (!!val == tracer_tracing_is_on(tr)) {
8123 val = 0; /* do nothing */
8125 tracer_tracing_on(tr);
8126 if (tr->current_trace->start)
8127 tr->current_trace->start(tr);
8129 tracer_tracing_off(tr);
8130 if (tr->current_trace->stop)
8131 tr->current_trace->stop(tr);
8133 mutex_unlock(&trace_types_lock);
8141 static const struct file_operations rb_simple_fops = {
8142 .open = tracing_open_generic_tr,
8143 .read = rb_simple_read,
8144 .write = rb_simple_write,
8145 .release = tracing_release_generic_tr,
8146 .llseek = default_llseek,
8150 buffer_percent_read(struct file *filp, char __user *ubuf,
8151 size_t cnt, loff_t *ppos)
8153 struct trace_array *tr = filp->private_data;
8157 r = tr->buffer_percent;
8158 r = sprintf(buf, "%d\n", r);
8160 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8164 buffer_percent_write(struct file *filp, const char __user *ubuf,
8165 size_t cnt, loff_t *ppos)
8167 struct trace_array *tr = filp->private_data;
8171 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8181 tr->buffer_percent = val;
8188 static const struct file_operations buffer_percent_fops = {
8189 .open = tracing_open_generic_tr,
8190 .read = buffer_percent_read,
8191 .write = buffer_percent_write,
8192 .release = tracing_release_generic_tr,
8193 .llseek = default_llseek,
8196 struct dentry *trace_instance_dir;
8199 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
8202 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
8204 enum ring_buffer_flags rb_flags;
8206 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
8210 buf->buffer = ring_buffer_alloc(size, rb_flags);
8214 buf->data = alloc_percpu(struct trace_array_cpu);
8216 ring_buffer_free(buf->buffer);
8221 /* Allocate the first page for all buffers */
8222 set_buffer_entries(&tr->trace_buffer,
8223 ring_buffer_size(tr->trace_buffer.buffer, 0));
8228 static int allocate_trace_buffers(struct trace_array *tr, int size)
8232 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
8236 #ifdef CONFIG_TRACER_MAX_TRACE
8237 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8238 allocate_snapshot ? size : 1);
8240 ring_buffer_free(tr->trace_buffer.buffer);
8241 tr->trace_buffer.buffer = NULL;
8242 free_percpu(tr->trace_buffer.data);
8243 tr->trace_buffer.data = NULL;
8246 tr->allocated_snapshot = allocate_snapshot;
8249 * Only the top level trace array gets its snapshot allocated
8250 * from the kernel command line.
8252 allocate_snapshot = false;
8257 static void free_trace_buffer(struct trace_buffer *buf)
8260 ring_buffer_free(buf->buffer);
8262 free_percpu(buf->data);
8267 static void free_trace_buffers(struct trace_array *tr)
8272 free_trace_buffer(&tr->trace_buffer);
8274 #ifdef CONFIG_TRACER_MAX_TRACE
8275 free_trace_buffer(&tr->max_buffer);
8279 static void init_trace_flags_index(struct trace_array *tr)
8283 /* Used by the trace options files */
8284 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8285 tr->trace_flags_index[i] = i;
8288 static void __update_tracer_options(struct trace_array *tr)
8292 for (t = trace_types; t; t = t->next)
8293 add_tracer_options(tr, t);
8296 static void update_tracer_options(struct trace_array *tr)
8298 mutex_lock(&trace_types_lock);
8299 __update_tracer_options(tr);
8300 mutex_unlock(&trace_types_lock);
8303 struct trace_array *trace_array_create(const char *name)
8305 struct trace_array *tr;
8308 mutex_lock(&event_mutex);
8309 mutex_lock(&trace_types_lock);
8312 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8313 if (tr->name && strcmp(tr->name, name) == 0)
8318 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8322 tr->name = kstrdup(name, GFP_KERNEL);
8326 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8329 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
8331 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8333 raw_spin_lock_init(&tr->start_lock);
8335 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8337 tr->current_trace = &nop_trace;
8339 INIT_LIST_HEAD(&tr->systems);
8340 INIT_LIST_HEAD(&tr->events);
8341 INIT_LIST_HEAD(&tr->hist_vars);
8342 INIT_LIST_HEAD(&tr->err_log);
8344 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
8347 tr->dir = tracefs_create_dir(name, trace_instance_dir);
8351 ret = event_trace_add_tracer(tr->dir, tr);
8353 tracefs_remove_recursive(tr->dir);
8357 ftrace_init_trace_array(tr);
8359 init_tracer_tracefs(tr, tr->dir);
8360 init_trace_flags_index(tr);
8361 __update_tracer_options(tr);
8363 list_add(&tr->list, &ftrace_trace_arrays);
8365 mutex_unlock(&trace_types_lock);
8366 mutex_unlock(&event_mutex);
8371 free_trace_buffers(tr);
8372 free_cpumask_var(tr->tracing_cpumask);
8377 mutex_unlock(&trace_types_lock);
8378 mutex_unlock(&event_mutex);
8380 return ERR_PTR(ret);
8382 EXPORT_SYMBOL_GPL(trace_array_create);
8384 static int instance_mkdir(const char *name)
8386 return PTR_ERR_OR_ZERO(trace_array_create(name));
8389 static int __remove_instance(struct trace_array *tr)
8393 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
8396 list_del(&tr->list);
8398 /* Disable all the flags that were enabled coming in */
8399 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8400 if ((1 << i) & ZEROED_TRACE_FLAGS)
8401 set_tracer_flag(tr, 1 << i, 0);
8404 tracing_set_nop(tr);
8405 clear_ftrace_function_probes(tr);
8406 event_trace_del_tracer(tr);
8407 ftrace_clear_pids(tr);
8408 ftrace_destroy_function_files(tr);
8409 tracefs_remove_recursive(tr->dir);
8410 free_trace_buffers(tr);
8412 for (i = 0; i < tr->nr_topts; i++) {
8413 kfree(tr->topts[i].topts);
8417 free_cpumask_var(tr->tracing_cpumask);
8425 int trace_array_destroy(struct trace_array *tr)
8432 mutex_lock(&event_mutex);
8433 mutex_lock(&trace_types_lock);
8435 ret = __remove_instance(tr);
8437 mutex_unlock(&trace_types_lock);
8438 mutex_unlock(&event_mutex);
8442 EXPORT_SYMBOL_GPL(trace_array_destroy);
8444 static int instance_rmdir(const char *name)
8446 struct trace_array *tr;
8449 mutex_lock(&event_mutex);
8450 mutex_lock(&trace_types_lock);
8453 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8454 if (tr->name && strcmp(tr->name, name) == 0) {
8455 ret = __remove_instance(tr);
8460 mutex_unlock(&trace_types_lock);
8461 mutex_unlock(&event_mutex);
8466 static __init void create_trace_instances(struct dentry *d_tracer)
8468 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8471 if (WARN_ON(!trace_instance_dir))
8476 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8478 struct trace_event_file *file;
8481 trace_create_file("available_tracers", 0444, d_tracer,
8482 tr, &show_traces_fops);
8484 trace_create_file("current_tracer", 0644, d_tracer,
8485 tr, &set_tracer_fops);
8487 trace_create_file("tracing_cpumask", 0644, d_tracer,
8488 tr, &tracing_cpumask_fops);
8490 trace_create_file("trace_options", 0644, d_tracer,
8491 tr, &tracing_iter_fops);
8493 trace_create_file("trace", 0644, d_tracer,
8496 trace_create_file("trace_pipe", 0444, d_tracer,
8497 tr, &tracing_pipe_fops);
8499 trace_create_file("buffer_size_kb", 0644, d_tracer,
8500 tr, &tracing_entries_fops);
8502 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8503 tr, &tracing_total_entries_fops);
8505 trace_create_file("free_buffer", 0200, d_tracer,
8506 tr, &tracing_free_buffer_fops);
8508 trace_create_file("trace_marker", 0220, d_tracer,
8509 tr, &tracing_mark_fops);
8511 file = __find_event_file(tr, "ftrace", "print");
8512 if (file && file->dir)
8513 trace_create_file("trigger", 0644, file->dir, file,
8514 &event_trigger_fops);
8515 tr->trace_marker_file = file;
8517 trace_create_file("trace_marker_raw", 0220, d_tracer,
8518 tr, &tracing_mark_raw_fops);
8520 trace_create_file("trace_clock", 0644, d_tracer, tr,
8523 trace_create_file("tracing_on", 0644, d_tracer,
8524 tr, &rb_simple_fops);
8526 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8527 &trace_time_stamp_mode_fops);
8529 tr->buffer_percent = 50;
8531 trace_create_file("buffer_percent", 0444, d_tracer,
8532 tr, &buffer_percent_fops);
8534 create_trace_options_dir(tr);
8536 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
8537 trace_create_file("tracing_max_latency", 0644, d_tracer,
8538 &tr->max_latency, &tracing_max_lat_fops);
8541 if (ftrace_create_function_files(tr, d_tracer))
8542 WARN(1, "Could not allocate function filter files");
8544 #ifdef CONFIG_TRACER_SNAPSHOT
8545 trace_create_file("snapshot", 0644, d_tracer,
8546 tr, &snapshot_fops);
8549 trace_create_file("error_log", 0644, d_tracer,
8550 tr, &tracing_err_log_fops);
8552 for_each_tracing_cpu(cpu)
8553 tracing_init_tracefs_percpu(tr, cpu);
8555 ftrace_init_tracefs(tr, d_tracer);
8558 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
8560 struct vfsmount *mnt;
8561 struct file_system_type *type;
8564 * To maintain backward compatibility for tools that mount
8565 * debugfs to get to the tracing facility, tracefs is automatically
8566 * mounted to the debugfs/tracing directory.
8568 type = get_fs_type("tracefs");
8571 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
8572 put_filesystem(type);
8581 * tracing_init_dentry - initialize top level trace array
8583 * This is called when creating files or directories in the tracing
8584 * directory. It is called via fs_initcall() by any of the boot up code
8585 * and expects to return the dentry of the top level tracing directory.
8587 struct dentry *tracing_init_dentry(void)
8589 struct trace_array *tr = &global_trace;
8591 /* The top level trace array uses NULL as parent */
8595 if (WARN_ON(!tracefs_initialized()) ||
8596 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8597 WARN_ON(!debugfs_initialized())))
8598 return ERR_PTR(-ENODEV);
8601 * As there may still be users that expect the tracing
8602 * files to exist in debugfs/tracing, we must automount
8603 * the tracefs file system there, so older tools still
8604 * work with the newer kerenl.
8606 tr->dir = debugfs_create_automount("tracing", NULL,
8607 trace_automount, NULL);
8609 pr_warn_once("Could not create debugfs directory 'tracing'\n");
8610 return ERR_PTR(-ENOMEM);
8616 extern struct trace_eval_map *__start_ftrace_eval_maps[];
8617 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
8619 static void __init trace_eval_init(void)
8623 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
8624 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
8627 #ifdef CONFIG_MODULES
8628 static void trace_module_add_evals(struct module *mod)
8630 if (!mod->num_trace_evals)
8634 * Modules with bad taint do not have events created, do
8635 * not bother with enums either.
8637 if (trace_module_has_bad_taint(mod))
8640 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
8643 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
8644 static void trace_module_remove_evals(struct module *mod)
8646 union trace_eval_map_item *map;
8647 union trace_eval_map_item **last = &trace_eval_maps;
8649 if (!mod->num_trace_evals)
8652 mutex_lock(&trace_eval_mutex);
8654 map = trace_eval_maps;
8657 if (map->head.mod == mod)
8659 map = trace_eval_jmp_to_tail(map);
8660 last = &map->tail.next;
8661 map = map->tail.next;
8666 *last = trace_eval_jmp_to_tail(map)->tail.next;
8669 mutex_unlock(&trace_eval_mutex);
8672 static inline void trace_module_remove_evals(struct module *mod) { }
8673 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
8675 static int trace_module_notify(struct notifier_block *self,
8676 unsigned long val, void *data)
8678 struct module *mod = data;
8681 case MODULE_STATE_COMING:
8682 trace_module_add_evals(mod);
8684 case MODULE_STATE_GOING:
8685 trace_module_remove_evals(mod);
8692 static struct notifier_block trace_module_nb = {
8693 .notifier_call = trace_module_notify,
8696 #endif /* CONFIG_MODULES */
8698 static __init int tracer_init_tracefs(void)
8700 struct dentry *d_tracer;
8702 trace_access_lock_init();
8704 d_tracer = tracing_init_dentry();
8705 if (IS_ERR(d_tracer))
8710 init_tracer_tracefs(&global_trace, d_tracer);
8711 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
8713 trace_create_file("tracing_thresh", 0644, d_tracer,
8714 &global_trace, &tracing_thresh_fops);
8716 trace_create_file("README", 0444, d_tracer,
8717 NULL, &tracing_readme_fops);
8719 trace_create_file("saved_cmdlines", 0444, d_tracer,
8720 NULL, &tracing_saved_cmdlines_fops);
8722 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8723 NULL, &tracing_saved_cmdlines_size_fops);
8725 trace_create_file("saved_tgids", 0444, d_tracer,
8726 NULL, &tracing_saved_tgids_fops);
8730 trace_create_eval_file(d_tracer);
8732 #ifdef CONFIG_MODULES
8733 register_module_notifier(&trace_module_nb);
8736 #ifdef CONFIG_DYNAMIC_FTRACE
8737 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8738 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
8741 create_trace_instances(d_tracer);
8743 update_tracer_options(&global_trace);
8748 static int trace_panic_handler(struct notifier_block *this,
8749 unsigned long event, void *unused)
8751 if (ftrace_dump_on_oops)
8752 ftrace_dump(ftrace_dump_on_oops);
8756 static struct notifier_block trace_panic_notifier = {
8757 .notifier_call = trace_panic_handler,
8759 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8762 static int trace_die_handler(struct notifier_block *self,
8768 if (ftrace_dump_on_oops)
8769 ftrace_dump(ftrace_dump_on_oops);
8777 static struct notifier_block trace_die_notifier = {
8778 .notifier_call = trace_die_handler,
8783 * printk is set to max of 1024, we really don't need it that big.
8784 * Nothing should be printing 1000 characters anyway.
8786 #define TRACE_MAX_PRINT 1000
8789 * Define here KERN_TRACE so that we have one place to modify
8790 * it if we decide to change what log level the ftrace dump
8793 #define KERN_TRACE KERN_EMERG
8796 trace_printk_seq(struct trace_seq *s)
8798 /* Probably should print a warning here. */
8799 if (s->seq.len >= TRACE_MAX_PRINT)
8800 s->seq.len = TRACE_MAX_PRINT;
8803 * More paranoid code. Although the buffer size is set to
8804 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8805 * an extra layer of protection.
8807 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8808 s->seq.len = s->seq.size - 1;
8810 /* should be zero ended, but we are paranoid. */
8811 s->buffer[s->seq.len] = 0;
8813 printk(KERN_TRACE "%s", s->buffer);
8818 void trace_init_global_iter(struct trace_iterator *iter)
8820 iter->tr = &global_trace;
8821 iter->trace = iter->tr->current_trace;
8822 iter->cpu_file = RING_BUFFER_ALL_CPUS;
8823 iter->trace_buffer = &global_trace.trace_buffer;
8825 if (iter->trace && iter->trace->open)
8826 iter->trace->open(iter);
8828 /* Annotate start of buffers if we had overruns */
8829 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8830 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8832 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8833 if (trace_clocks[iter->tr->clock_id].in_ns)
8834 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
8837 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
8839 /* use static because iter can be a bit big for the stack */
8840 static struct trace_iterator iter;
8841 static atomic_t dump_running;
8842 struct trace_array *tr = &global_trace;
8843 unsigned int old_userobj;
8844 unsigned long flags;
8847 /* Only allow one dump user at a time. */
8848 if (atomic_inc_return(&dump_running) != 1) {
8849 atomic_dec(&dump_running);
8854 * Always turn off tracing when we dump.
8855 * We don't need to show trace output of what happens
8856 * between multiple crashes.
8858 * If the user does a sysrq-z, then they can re-enable
8859 * tracing with echo 1 > tracing_on.
8863 local_irq_save(flags);
8864 printk_nmi_direct_enter();
8866 /* Simulate the iterator */
8867 trace_init_global_iter(&iter);
8869 for_each_tracing_cpu(cpu) {
8870 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8873 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
8875 /* don't look at user memory in panic mode */
8876 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
8878 switch (oops_dump_mode) {
8880 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8883 iter.cpu_file = raw_smp_processor_id();
8888 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
8889 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8892 printk(KERN_TRACE "Dumping ftrace buffer:\n");
8894 /* Did function tracer already get disabled? */
8895 if (ftrace_is_dead()) {
8896 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8897 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8901 * We need to stop all tracing on all CPUS to read the
8902 * the next buffer. This is a bit expensive, but is
8903 * not done often. We fill all what we can read,
8904 * and then release the locks again.
8907 while (!trace_empty(&iter)) {
8910 printk(KERN_TRACE "---------------------------------\n");
8914 trace_iterator_reset(&iter);
8915 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8917 if (trace_find_next_entry_inc(&iter) != NULL) {
8920 ret = print_trace_line(&iter);
8921 if (ret != TRACE_TYPE_NO_CONSUME)
8922 trace_consume(&iter);
8924 touch_nmi_watchdog();
8926 trace_printk_seq(&iter.seq);
8930 printk(KERN_TRACE " (ftrace buffer empty)\n");
8932 printk(KERN_TRACE "---------------------------------\n");
8935 tr->trace_flags |= old_userobj;
8937 for_each_tracing_cpu(cpu) {
8938 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8940 atomic_dec(&dump_running);
8941 printk_nmi_direct_exit();
8942 local_irq_restore(flags);
8944 EXPORT_SYMBOL_GPL(ftrace_dump);
8946 int trace_run_command(const char *buf, int (*createfn)(int, char **))
8953 argv = argv_split(GFP_KERNEL, buf, &argc);
8958 ret = createfn(argc, argv);
8965 #define WRITE_BUFSIZE 4096
8967 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
8968 size_t count, loff_t *ppos,
8969 int (*createfn)(int, char **))
8971 char *kbuf, *buf, *tmp;
8976 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
8980 while (done < count) {
8981 size = count - done;
8983 if (size >= WRITE_BUFSIZE)
8984 size = WRITE_BUFSIZE - 1;
8986 if (copy_from_user(kbuf, buffer + done, size)) {
8993 tmp = strchr(buf, '\n');
8996 size = tmp - buf + 1;
8999 if (done + size < count) {
9002 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9003 pr_warn("Line length is too long: Should be less than %d\n",
9011 /* Remove comments */
9012 tmp = strchr(buf, '#');
9017 ret = trace_run_command(buf, createfn);
9022 } while (done < count);
9032 __init static int tracer_alloc_buffers(void)
9038 * Make sure we don't accidently add more trace options
9039 * than we have bits for.
9041 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
9043 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9046 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9047 goto out_free_buffer_mask;
9049 /* Only allocate trace_printk buffers if a trace_printk exists */
9050 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
9051 /* Must be called before global_trace.buffer is allocated */
9052 trace_printk_init_buffers();
9054 /* To save memory, keep the ring buffer size to its minimum */
9055 if (ring_buffer_expanded)
9056 ring_buf_size = trace_buf_size;
9060 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
9061 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9063 raw_spin_lock_init(&global_trace.start_lock);
9066 * The prepare callbacks allocates some memory for the ring buffer. We
9067 * don't free the buffer if the if the CPU goes down. If we were to free
9068 * the buffer, then the user would lose any trace that was in the
9069 * buffer. The memory will be removed once the "instance" is removed.
9071 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9072 "trace/RB:preapre", trace_rb_cpu_prepare,
9075 goto out_free_cpumask;
9076 /* Used for event triggers */
9078 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9080 goto out_rm_hp_state;
9082 if (trace_create_savedcmd() < 0)
9083 goto out_free_temp_buffer;
9085 /* TODO: make the number of buffers hot pluggable with CPUS */
9086 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
9087 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
9089 goto out_free_savedcmd;
9092 if (global_trace.buffer_disabled)
9095 if (trace_boot_clock) {
9096 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9098 pr_warn("Trace clock %s not defined, going back to default\n",
9103 * register_tracer() might reference current_trace, so it
9104 * needs to be set before we register anything. This is
9105 * just a bootstrap of current_trace anyway.
9107 global_trace.current_trace = &nop_trace;
9109 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9111 ftrace_init_global_array_ops(&global_trace);
9113 init_trace_flags_index(&global_trace);
9115 register_tracer(&nop_trace);
9117 /* Function tracing may start here (via kernel command line) */
9118 init_function_trace();
9120 /* All seems OK, enable tracing */
9121 tracing_disabled = 0;
9123 atomic_notifier_chain_register(&panic_notifier_list,
9124 &trace_panic_notifier);
9126 register_die_notifier(&trace_die_notifier);
9128 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9130 INIT_LIST_HEAD(&global_trace.systems);
9131 INIT_LIST_HEAD(&global_trace.events);
9132 INIT_LIST_HEAD(&global_trace.hist_vars);
9133 INIT_LIST_HEAD(&global_trace.err_log);
9134 list_add(&global_trace.list, &ftrace_trace_arrays);
9136 apply_trace_boot_options();
9138 register_snapshot_cmd();
9143 free_saved_cmdlines_buffer(savedcmd);
9144 out_free_temp_buffer:
9145 ring_buffer_free(temp_buffer);
9147 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9149 free_cpumask_var(global_trace.tracing_cpumask);
9150 out_free_buffer_mask:
9151 free_cpumask_var(tracing_buffer_mask);
9156 void __init early_trace_init(void)
9158 if (tracepoint_printk) {
9159 tracepoint_print_iter =
9160 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
9161 if (WARN_ON(!tracepoint_print_iter))
9162 tracepoint_printk = 0;
9164 static_key_enable(&tracepoint_printk_key.key);
9166 tracer_alloc_buffers();
9169 void __init trace_init(void)
9174 __init static int clear_boot_tracer(void)
9177 * The default tracer at boot buffer is an init section.
9178 * This function is called in lateinit. If we did not
9179 * find the boot tracer, then clear it out, to prevent
9180 * later registration from accessing the buffer that is
9181 * about to be freed.
9183 if (!default_bootup_tracer)
9186 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9187 default_bootup_tracer);
9188 default_bootup_tracer = NULL;
9193 fs_initcall(tracer_init_tracefs);
9194 late_initcall_sync(clear_boot_tracer);
9196 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9197 __init static int tracing_set_default_clock(void)
9199 /* sched_clock_stable() is determined in late_initcall */
9200 if (!trace_boot_clock && !sched_clock_stable()) {
9202 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9203 "If you want to keep using the local clock, then add:\n"
9204 " \"trace_clock=local\"\n"
9205 "on the kernel command line\n");
9206 tracing_set_clock(&global_trace, "global");
9211 late_initcall_sync(tracing_set_default_clock);