2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/tracefs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/mount.h>
36 #include <linux/rwsem.h>
37 #include <linux/slab.h>
38 #include <linux/ctype.h>
39 #include <linux/init.h>
40 #include <linux/poll.h>
41 #include <linux/nmi.h>
43 #include <linux/trace.h>
44 #include <linux/sched/rt.h>
47 #include "trace_output.h"
50 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing.
53 bool ring_buffer_expanded;
56 * We need to change this state when a selftest is running.
57 * A selftest will lurk into the ring-buffer to count the
58 * entries inserted during the selftest although some concurrent
59 * insertions into the ring-buffer such as trace_printk could occurred
60 * at the same time, giving false positive or negative results.
62 static bool __read_mostly tracing_selftest_running;
65 * If a tracer is running, we do not want to run SELFTEST.
67 bool __read_mostly tracing_selftest_disabled;
69 /* Pipe tracepoints to printk */
70 struct trace_iterator *tracepoint_print_iter;
71 int tracepoint_printk;
72 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
74 /* For tracers that don't implement custom flags */
75 static struct tracer_opt dummy_tracer_opt[] = {
80 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
86 * To prevent the comm cache from being overwritten when no
87 * tracing is active, only save the comm when a trace event
90 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
93 * Kill all tracing for good (never come back).
94 * It is initialized to 1 but will turn to zero if the initialization
95 * of the tracer is successful. But that is the only place that sets
98 static int tracing_disabled = 1;
100 cpumask_var_t __read_mostly tracing_buffer_mask;
103 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
105 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
106 * is set, then ftrace_dump is called. This will output the contents
107 * of the ftrace buffers to the console. This is very useful for
108 * capturing traces that lead to crashes and outputing it to a
111 * It is default off, but you can enable it with either specifying
112 * "ftrace_dump_on_oops" in the kernel command line, or setting
113 * /proc/sys/kernel/ftrace_dump_on_oops
114 * Set 1 if you want to dump buffers of all CPUs
115 * Set 2 if you want to dump the buffer of the CPU that triggered oops
118 enum ftrace_dump_mode ftrace_dump_on_oops;
120 /* When set, tracing will stop when a WARN*() is hit */
121 int __disable_trace_on_warning;
123 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
124 /* Map of enums to their values, for "eval_map" file */
125 struct trace_eval_map_head {
127 unsigned long length;
130 union trace_eval_map_item;
132 struct trace_eval_map_tail {
134 * "end" is first and points to NULL as it must be different
135 * than "mod" or "eval_string"
137 union trace_eval_map_item *next;
138 const char *end; /* points to NULL */
141 static DEFINE_MUTEX(trace_eval_mutex);
144 * The trace_eval_maps are saved in an array with two extra elements,
145 * one at the beginning, and one at the end. The beginning item contains
146 * the count of the saved maps (head.length), and the module they
147 * belong to if not built in (head.mod). The ending item contains a
148 * pointer to the next array of saved eval_map items.
150 union trace_eval_map_item {
151 struct trace_eval_map map;
152 struct trace_eval_map_head head;
153 struct trace_eval_map_tail tail;
156 static union trace_eval_map_item *trace_eval_maps;
157 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
159 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
161 #define MAX_TRACER_SIZE 100
162 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
163 static char *default_bootup_tracer;
165 static bool allocate_snapshot;
167 static int __init set_cmdline_ftrace(char *str)
169 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
170 default_bootup_tracer = bootup_tracer_buf;
171 /* We are using ftrace early, expand it */
172 ring_buffer_expanded = true;
175 __setup("ftrace=", set_cmdline_ftrace);
177 static int __init set_ftrace_dump_on_oops(char *str)
179 if (*str++ != '=' || !*str) {
180 ftrace_dump_on_oops = DUMP_ALL;
184 if (!strcmp("orig_cpu", str)) {
185 ftrace_dump_on_oops = DUMP_ORIG;
191 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
193 static int __init stop_trace_on_warning(char *str)
195 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
196 __disable_trace_on_warning = 1;
199 __setup("traceoff_on_warning", stop_trace_on_warning);
201 static int __init boot_alloc_snapshot(char *str)
203 allocate_snapshot = true;
204 /* We also need the main ring buffer expanded */
205 ring_buffer_expanded = true;
208 __setup("alloc_snapshot", boot_alloc_snapshot);
211 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
213 static int __init set_trace_boot_options(char *str)
215 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
218 __setup("trace_options=", set_trace_boot_options);
220 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
221 static char *trace_boot_clock __initdata;
223 static int __init set_trace_boot_clock(char *str)
225 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
226 trace_boot_clock = trace_boot_clock_buf;
229 __setup("trace_clock=", set_trace_boot_clock);
231 static int __init set_tracepoint_printk(char *str)
233 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
234 tracepoint_printk = 1;
237 __setup("tp_printk", set_tracepoint_printk);
239 unsigned long long ns2usecs(u64 nsec)
246 /* trace_flags holds trace_options default values */
247 #define TRACE_DEFAULT_FLAGS \
248 (FUNCTION_DEFAULT_FLAGS | \
249 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
250 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
251 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
252 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
254 /* trace_options that are only supported by global_trace */
255 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
256 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
258 /* trace_flags that are default zero for instances */
259 #define ZEROED_TRACE_FLAGS \
260 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
263 * The global_trace is the descriptor that holds the top-level tracing
264 * buffers for the live tracing.
266 static struct trace_array global_trace = {
267 .trace_flags = TRACE_DEFAULT_FLAGS,
270 LIST_HEAD(ftrace_trace_arrays);
272 int trace_array_get(struct trace_array *this_tr)
274 struct trace_array *tr;
277 mutex_lock(&trace_types_lock);
278 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
285 mutex_unlock(&trace_types_lock);
290 static void __trace_array_put(struct trace_array *this_tr)
292 WARN_ON(!this_tr->ref);
296 void trace_array_put(struct trace_array *this_tr)
298 mutex_lock(&trace_types_lock);
299 __trace_array_put(this_tr);
300 mutex_unlock(&trace_types_lock);
303 int call_filter_check_discard(struct trace_event_call *call, void *rec,
304 struct ring_buffer *buffer,
305 struct ring_buffer_event *event)
307 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
308 !filter_match_preds(call->filter, rec)) {
309 __trace_event_discard_commit(buffer, event);
316 void trace_free_pid_list(struct trace_pid_list *pid_list)
318 vfree(pid_list->pids);
323 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
324 * @filtered_pids: The list of pids to check
325 * @search_pid: The PID to find in @filtered_pids
327 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
330 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
333 * If pid_max changed after filtered_pids was created, we
334 * by default ignore all pids greater than the previous pid_max.
336 if (search_pid >= filtered_pids->pid_max)
339 return test_bit(search_pid, filtered_pids->pids);
343 * trace_ignore_this_task - should a task be ignored for tracing
344 * @filtered_pids: The list of pids to check
345 * @task: The task that should be ignored if not filtered
347 * Checks if @task should be traced or not from @filtered_pids.
348 * Returns true if @task should *NOT* be traced.
349 * Returns false if @task should be traced.
352 trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
355 * Return false, because if filtered_pids does not exist,
356 * all pids are good to trace.
361 return !trace_find_filtered_pid(filtered_pids, task->pid);
365 * trace_pid_filter_add_remove - Add or remove a task from a pid_list
366 * @pid_list: The list to modify
367 * @self: The current task for fork or NULL for exit
368 * @task: The task to add or remove
370 * If adding a task, if @self is defined, the task is only added if @self
371 * is also included in @pid_list. This happens on fork and tasks should
372 * only be added when the parent is listed. If @self is NULL, then the
373 * @task pid will be removed from the list, which would happen on exit
376 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
377 struct task_struct *self,
378 struct task_struct *task)
383 /* For forks, we only add if the forking task is listed */
385 if (!trace_find_filtered_pid(pid_list, self->pid))
389 /* Sorry, but we don't support pid_max changing after setting */
390 if (task->pid >= pid_list->pid_max)
393 /* "self" is set for forks, and NULL for exits */
395 set_bit(task->pid, pid_list->pids);
397 clear_bit(task->pid, pid_list->pids);
401 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
402 * @pid_list: The pid list to show
403 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
404 * @pos: The position of the file
406 * This is used by the seq_file "next" operation to iterate the pids
407 * listed in a trace_pid_list structure.
409 * Returns the pid+1 as we want to display pid of zero, but NULL would
410 * stop the iteration.
412 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
414 unsigned long pid = (unsigned long)v;
418 /* pid already is +1 of the actual prevous bit */
419 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
421 /* Return pid + 1 to allow zero to be represented */
422 if (pid < pid_list->pid_max)
423 return (void *)(pid + 1);
429 * trace_pid_start - Used for seq_file to start reading pid lists
430 * @pid_list: The pid list to show
431 * @pos: The position of the file
433 * This is used by seq_file "start" operation to start the iteration
436 * Returns the pid+1 as we want to display pid of zero, but NULL would
437 * stop the iteration.
439 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
444 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
445 if (pid >= pid_list->pid_max)
448 /* Return pid + 1 so that zero can be the exit value */
449 for (pid++; pid && l < *pos;
450 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
456 * trace_pid_show - show the current pid in seq_file processing
457 * @m: The seq_file structure to write into
458 * @v: A void pointer of the pid (+1) value to display
460 * Can be directly used by seq_file operations to display the current
463 int trace_pid_show(struct seq_file *m, void *v)
465 unsigned long pid = (unsigned long)v - 1;
467 seq_printf(m, "%lu\n", pid);
471 /* 128 should be much more than enough */
472 #define PID_BUF_SIZE 127
474 int trace_pid_write(struct trace_pid_list *filtered_pids,
475 struct trace_pid_list **new_pid_list,
476 const char __user *ubuf, size_t cnt)
478 struct trace_pid_list *pid_list;
479 struct trace_parser parser;
487 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
491 * Always recreate a new array. The write is an all or nothing
492 * operation. Always create a new array when adding new pids by
493 * the user. If the operation fails, then the current list is
496 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
500 pid_list->pid_max = READ_ONCE(pid_max);
502 /* Only truncating will shrink pid_max */
503 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
504 pid_list->pid_max = filtered_pids->pid_max;
506 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
507 if (!pid_list->pids) {
513 /* copy the current bits to the new max */
514 for_each_set_bit(pid, filtered_pids->pids,
515 filtered_pids->pid_max) {
516 set_bit(pid, pid_list->pids);
525 ret = trace_get_user(&parser, ubuf, cnt, &pos);
526 if (ret < 0 || !trace_parser_loaded(&parser))
533 parser.buffer[parser.idx] = 0;
536 if (kstrtoul(parser.buffer, 0, &val))
538 if (val >= pid_list->pid_max)
543 set_bit(pid, pid_list->pids);
546 trace_parser_clear(&parser);
549 trace_parser_put(&parser);
552 trace_free_pid_list(pid_list);
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list);
563 *new_pid_list = pid_list;
568 static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
572 /* Early boot up does not have a buffer yet */
574 return trace_clock_local();
576 ts = ring_buffer_time_stamp(buf->buffer, cpu);
577 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
582 u64 ftrace_now(int cpu)
584 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
588 * tracing_is_enabled - Show if global_trace has been disabled
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
596 int tracing_is_enabled(void)
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
604 return !global_trace.buffer_disabled;
608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
617 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
619 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
621 /* trace_types holds a link list of available tracers. */
622 static struct tracer *trace_types __read_mostly;
625 * trace_types_lock is used to protect the trace_types list.
627 DEFINE_MUTEX(trace_types_lock);
630 * serialize the access of the ring buffer
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
644 * These primitives allow multi process access to different cpu ring buffer
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
652 static DECLARE_RWSEM(all_cpu_access_lock);
653 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
655 static inline void trace_access_lock(int cpu)
657 if (cpu == RING_BUFFER_ALL_CPUS) {
658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock);
661 /* gain it for accessing a cpu ring buffer. */
663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
664 down_read(&all_cpu_access_lock);
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock, cpu));
671 static inline void trace_access_unlock(int cpu)
673 if (cpu == RING_BUFFER_ALL_CPUS) {
674 up_write(&all_cpu_access_lock);
676 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
677 up_read(&all_cpu_access_lock);
681 static inline void trace_access_lock_init(void)
685 for_each_possible_cpu(cpu)
686 mutex_init(&per_cpu(cpu_access_lock, cpu));
691 static DEFINE_MUTEX(access_lock);
693 static inline void trace_access_lock(int cpu)
696 mutex_lock(&access_lock);
699 static inline void trace_access_unlock(int cpu)
702 mutex_unlock(&access_lock);
705 static inline void trace_access_lock_init(void)
711 #ifdef CONFIG_STACKTRACE
712 static void __ftrace_trace_stack(struct ring_buffer *buffer,
714 int skip, int pc, struct pt_regs *regs);
715 static inline void ftrace_trace_stack(struct trace_array *tr,
716 struct ring_buffer *buffer,
718 int skip, int pc, struct pt_regs *regs);
721 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
723 int skip, int pc, struct pt_regs *regs)
726 static inline void ftrace_trace_stack(struct trace_array *tr,
727 struct ring_buffer *buffer,
729 int skip, int pc, struct pt_regs *regs)
735 static __always_inline void
736 trace_event_setup(struct ring_buffer_event *event,
737 int type, unsigned long flags, int pc)
739 struct trace_entry *ent = ring_buffer_event_data(event);
741 tracing_generic_entry_update(ent, flags, pc);
745 static __always_inline struct ring_buffer_event *
746 __trace_buffer_lock_reserve(struct ring_buffer *buffer,
749 unsigned long flags, int pc)
751 struct ring_buffer_event *event;
753 event = ring_buffer_lock_reserve(buffer, len);
755 trace_event_setup(event, type, flags, pc);
760 void tracer_tracing_on(struct trace_array *tr)
762 if (tr->trace_buffer.buffer)
763 ring_buffer_record_on(tr->trace_buffer.buffer);
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
772 tr->buffer_disabled = 0;
773 /* Make the flag seen by readers */
778 * tracing_on - enable tracing buffers
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
783 void tracing_on(void)
785 tracer_tracing_on(&global_trace);
787 EXPORT_SYMBOL_GPL(tracing_on);
790 static __always_inline void
791 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
793 __this_cpu_write(trace_taskinfo_save, true);
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event) == event) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer, event->array[0], &event->array[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt);
802 ring_buffer_unlock_commit(buffer, event);
806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
811 int __trace_puts(unsigned long ip, const char *str, int size)
813 struct ring_buffer_event *event;
814 struct ring_buffer *buffer;
815 struct print_entry *entry;
816 unsigned long irq_flags;
820 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
823 pc = preempt_count();
825 if (unlikely(tracing_selftest_running || tracing_disabled))
828 alloc = sizeof(*entry) + size + 2; /* possible \n added */
830 local_save_flags(irq_flags);
831 buffer = global_trace.trace_buffer.buffer;
832 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
837 entry = ring_buffer_event_data(event);
840 memcpy(&entry->buf, str, size);
842 /* Add a newline if necessary */
843 if (entry->buf[size - 1] != '\n') {
844 entry->buf[size] = '\n';
845 entry->buf[size + 1] = '\0';
847 entry->buf[size] = '\0';
849 __buffer_unlock_commit(buffer, event);
850 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
854 EXPORT_SYMBOL_GPL(__trace_puts);
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
861 int __trace_bputs(unsigned long ip, const char *str)
863 struct ring_buffer_event *event;
864 struct ring_buffer *buffer;
865 struct bputs_entry *entry;
866 unsigned long irq_flags;
867 int size = sizeof(struct bputs_entry);
870 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
873 pc = preempt_count();
875 if (unlikely(tracing_selftest_running || tracing_disabled))
878 local_save_flags(irq_flags);
879 buffer = global_trace.trace_buffer.buffer;
880 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
885 entry = ring_buffer_event_data(event);
889 __buffer_unlock_commit(buffer, event);
890 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
894 EXPORT_SYMBOL_GPL(__trace_bputs);
896 #ifdef CONFIG_TRACER_SNAPSHOT
897 static void tracing_snapshot_instance(struct trace_array *tr)
899 struct tracer *tracer = tr->current_trace;
903 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
904 internal_trace_puts("*** snapshot is being ignored ***\n");
908 if (!tr->allocated_snapshot) {
909 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
910 internal_trace_puts("*** stopping trace here! ***\n");
915 /* Note, snapshot can not be used when the tracer uses it */
916 if (tracer->use_max_tr) {
917 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
918 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
922 local_irq_save(flags);
923 update_max_tr(tr, current, smp_processor_id());
924 local_irq_restore(flags);
928 * trace_snapshot - take a snapshot of the current buffer.
930 * This causes a swap between the snapshot buffer and the current live
931 * tracing buffer. You can use this to take snapshots of the live
932 * trace when some condition is triggered, but continue to trace.
934 * Note, make sure to allocate the snapshot with either
935 * a tracing_snapshot_alloc(), or by doing it manually
936 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
938 * If the snapshot buffer is not allocated, it will stop tracing.
939 * Basically making a permanent snapshot.
941 void tracing_snapshot(void)
943 struct trace_array *tr = &global_trace;
945 tracing_snapshot_instance(tr);
947 EXPORT_SYMBOL_GPL(tracing_snapshot);
949 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
950 struct trace_buffer *size_buf, int cpu_id);
951 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
953 static int alloc_snapshot(struct trace_array *tr)
957 if (!tr->allocated_snapshot) {
959 /* allocate spare buffer */
960 ret = resize_buffer_duplicate_size(&tr->max_buffer,
961 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
965 tr->allocated_snapshot = true;
971 static void free_snapshot(struct trace_array *tr)
974 * We don't free the ring buffer. instead, resize it because
975 * The max_tr ring buffer has some state (e.g. ring->clock) and
976 * we want preserve it.
978 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
979 set_buffer_entries(&tr->max_buffer, 1);
980 tracing_reset_online_cpus(&tr->max_buffer);
981 tr->allocated_snapshot = false;
985 * tracing_alloc_snapshot - allocate snapshot buffer.
987 * This only allocates the snapshot buffer if it isn't already
988 * allocated - it doesn't also take a snapshot.
990 * This is meant to be used in cases where the snapshot buffer needs
991 * to be set up for events that can't sleep but need to be able to
992 * trigger a snapshot.
994 int tracing_alloc_snapshot(void)
996 struct trace_array *tr = &global_trace;
999 ret = alloc_snapshot(tr);
1004 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1007 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
1009 * This is similar to trace_snapshot(), but it will allocate the
1010 * snapshot buffer if it isn't already allocated. Use this only
1011 * where it is safe to sleep, as the allocation may sleep.
1013 * This causes a swap between the snapshot buffer and the current live
1014 * tracing buffer. You can use this to take snapshots of the live
1015 * trace when some condition is triggered, but continue to trace.
1017 void tracing_snapshot_alloc(void)
1021 ret = tracing_alloc_snapshot();
1027 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1029 void tracing_snapshot(void)
1031 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1033 EXPORT_SYMBOL_GPL(tracing_snapshot);
1034 int tracing_alloc_snapshot(void)
1036 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1039 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1040 void tracing_snapshot_alloc(void)
1045 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1046 #endif /* CONFIG_TRACER_SNAPSHOT */
1048 void tracer_tracing_off(struct trace_array *tr)
1050 if (tr->trace_buffer.buffer)
1051 ring_buffer_record_off(tr->trace_buffer.buffer);
1053 * This flag is looked at when buffers haven't been allocated
1054 * yet, or by some tracers (like irqsoff), that just want to
1055 * know if the ring buffer has been disabled, but it can handle
1056 * races of where it gets disabled but we still do a record.
1057 * As the check is in the fast path of the tracers, it is more
1058 * important to be fast than accurate.
1060 tr->buffer_disabled = 1;
1061 /* Make the flag seen by readers */
1066 * tracing_off - turn off tracing buffers
1068 * This function stops the tracing buffers from recording data.
1069 * It does not disable any overhead the tracers themselves may
1070 * be causing. This function simply causes all recording to
1071 * the ring buffers to fail.
1073 void tracing_off(void)
1075 tracer_tracing_off(&global_trace);
1077 EXPORT_SYMBOL_GPL(tracing_off);
1079 void disable_trace_on_warning(void)
1081 if (__disable_trace_on_warning)
1086 * tracer_tracing_is_on - show real state of ring buffer enabled
1087 * @tr : the trace array to know if ring buffer is enabled
1089 * Shows real state of the ring buffer if it is enabled or not.
1091 int tracer_tracing_is_on(struct trace_array *tr)
1093 if (tr->trace_buffer.buffer)
1094 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1095 return !tr->buffer_disabled;
1099 * tracing_is_on - show state of ring buffers enabled
1101 int tracing_is_on(void)
1103 return tracer_tracing_is_on(&global_trace);
1105 EXPORT_SYMBOL_GPL(tracing_is_on);
1107 static int __init set_buf_size(char *str)
1109 unsigned long buf_size;
1113 buf_size = memparse(str, &str);
1114 /* nr_entries can not be zero */
1117 trace_buf_size = buf_size;
1120 __setup("trace_buf_size=", set_buf_size);
1122 static int __init set_tracing_thresh(char *str)
1124 unsigned long threshold;
1129 ret = kstrtoul(str, 0, &threshold);
1132 tracing_thresh = threshold * 1000;
1135 __setup("tracing_thresh=", set_tracing_thresh);
1137 unsigned long nsecs_to_usecs(unsigned long nsecs)
1139 return nsecs / 1000;
1143 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1144 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1145 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1146 * of strings in the order that the evals (enum) were defined.
1151 /* These must match the bit postions in trace_iterator_flags */
1152 static const char *trace_options[] = {
1160 int in_ns; /* is this clock in nanoseconds? */
1161 } trace_clocks[] = {
1162 { trace_clock_local, "local", 1 },
1163 { trace_clock_global, "global", 1 },
1164 { trace_clock_counter, "counter", 0 },
1165 { trace_clock_jiffies, "uptime", 0 },
1166 { trace_clock, "perf", 1 },
1167 { ktime_get_mono_fast_ns, "mono", 1 },
1168 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1169 { ktime_get_boot_fast_ns, "boot", 1 },
1174 * trace_parser_get_init - gets the buffer for trace parser
1176 int trace_parser_get_init(struct trace_parser *parser, int size)
1178 memset(parser, 0, sizeof(*parser));
1180 parser->buffer = kmalloc(size, GFP_KERNEL);
1181 if (!parser->buffer)
1184 parser->size = size;
1189 * trace_parser_put - frees the buffer for trace parser
1191 void trace_parser_put(struct trace_parser *parser)
1193 kfree(parser->buffer);
1194 parser->buffer = NULL;
1198 * trace_get_user - reads the user input string separated by space
1199 * (matched by isspace(ch))
1201 * For each string found the 'struct trace_parser' is updated,
1202 * and the function returns.
1204 * Returns number of bytes read.
1206 * See kernel/trace/trace.h for 'struct trace_parser' details.
1208 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1209 size_t cnt, loff_t *ppos)
1216 trace_parser_clear(parser);
1218 ret = get_user(ch, ubuf++);
1226 * The parser is not finished with the last write,
1227 * continue reading the user input without skipping spaces.
1229 if (!parser->cont) {
1230 /* skip white space */
1231 while (cnt && isspace(ch)) {
1232 ret = get_user(ch, ubuf++);
1239 /* only spaces were written */
1249 /* read the non-space input */
1250 while (cnt && !isspace(ch)) {
1251 if (parser->idx < parser->size - 1)
1252 parser->buffer[parser->idx++] = ch;
1257 ret = get_user(ch, ubuf++);
1264 /* We either got finished input or we have to wait for another call. */
1266 parser->buffer[parser->idx] = 0;
1267 parser->cont = false;
1268 } else if (parser->idx < parser->size - 1) {
1269 parser->cont = true;
1270 parser->buffer[parser->idx++] = ch;
1283 /* TODO add a seq_buf_to_buffer() */
1284 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1288 if (trace_seq_used(s) <= s->seq.readpos)
1291 len = trace_seq_used(s) - s->seq.readpos;
1294 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1296 s->seq.readpos += cnt;
1300 unsigned long __read_mostly tracing_thresh;
1302 #ifdef CONFIG_TRACER_MAX_TRACE
1304 * Copy the new maximum trace into the separate maximum-trace
1305 * structure. (this way the maximum trace is permanently saved,
1306 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1309 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1311 struct trace_buffer *trace_buf = &tr->trace_buffer;
1312 struct trace_buffer *max_buf = &tr->max_buffer;
1313 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1314 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1317 max_buf->time_start = data->preempt_timestamp;
1319 max_data->saved_latency = tr->max_latency;
1320 max_data->critical_start = data->critical_start;
1321 max_data->critical_end = data->critical_end;
1323 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1324 max_data->pid = tsk->pid;
1326 * If tsk == current, then use current_uid(), as that does not use
1327 * RCU. The irq tracer can be called out of RCU scope.
1330 max_data->uid = current_uid();
1332 max_data->uid = task_uid(tsk);
1334 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1335 max_data->policy = tsk->policy;
1336 max_data->rt_priority = tsk->rt_priority;
1338 /* record this tasks comm */
1339 tracing_record_cmdline(tsk);
1343 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1345 * @tsk: the task with the latency
1346 * @cpu: The cpu that initiated the trace.
1348 * Flip the buffers between the @tr and the max_tr and record information
1349 * about which task was the cause of this latency.
1352 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1354 struct ring_buffer *buf;
1359 WARN_ON_ONCE(!irqs_disabled());
1361 if (!tr->allocated_snapshot) {
1362 /* Only the nop tracer should hit this when disabling */
1363 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1367 arch_spin_lock(&tr->max_lock);
1369 buf = tr->trace_buffer.buffer;
1370 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1371 tr->max_buffer.buffer = buf;
1373 __update_max_tr(tr, tsk, cpu);
1374 arch_spin_unlock(&tr->max_lock);
1378 * update_max_tr_single - only copy one trace over, and reset the rest
1380 * @tsk - task with the latency
1381 * @cpu - the cpu of the buffer to copy.
1383 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1386 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1393 WARN_ON_ONCE(!irqs_disabled());
1394 if (!tr->allocated_snapshot) {
1395 /* Only the nop tracer should hit this when disabling */
1396 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1400 arch_spin_lock(&tr->max_lock);
1402 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1404 if (ret == -EBUSY) {
1406 * We failed to swap the buffer due to a commit taking
1407 * place on this CPU. We fail to record, but we reset
1408 * the max trace buffer (no one writes directly to it)
1409 * and flag that it failed.
1411 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1412 "Failed to swap buffers due to commit in progress\n");
1415 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1417 __update_max_tr(tr, tsk, cpu);
1418 arch_spin_unlock(&tr->max_lock);
1420 #endif /* CONFIG_TRACER_MAX_TRACE */
1422 static int wait_on_pipe(struct trace_iterator *iter, bool full)
1424 /* Iterators are static, they should be filled or empty */
1425 if (trace_buffer_iter(iter, iter->cpu_file))
1428 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1432 #ifdef CONFIG_FTRACE_STARTUP_TEST
1433 static bool selftests_can_run;
1435 struct trace_selftests {
1436 struct list_head list;
1437 struct tracer *type;
1440 static LIST_HEAD(postponed_selftests);
1442 static int save_selftest(struct tracer *type)
1444 struct trace_selftests *selftest;
1446 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1450 selftest->type = type;
1451 list_add(&selftest->list, &postponed_selftests);
1455 static int run_tracer_selftest(struct tracer *type)
1457 struct trace_array *tr = &global_trace;
1458 struct tracer *saved_tracer = tr->current_trace;
1461 if (!type->selftest || tracing_selftest_disabled)
1465 * If a tracer registers early in boot up (before scheduling is
1466 * initialized and such), then do not run its selftests yet.
1467 * Instead, run it a little later in the boot process.
1469 if (!selftests_can_run)
1470 return save_selftest(type);
1473 * Run a selftest on this tracer.
1474 * Here we reset the trace buffer, and set the current
1475 * tracer to be this tracer. The tracer can then run some
1476 * internal tracing to verify that everything is in order.
1477 * If we fail, we do not register this tracer.
1479 tracing_reset_online_cpus(&tr->trace_buffer);
1481 tr->current_trace = type;
1483 #ifdef CONFIG_TRACER_MAX_TRACE
1484 if (type->use_max_tr) {
1485 /* If we expanded the buffers, make sure the max is expanded too */
1486 if (ring_buffer_expanded)
1487 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1488 RING_BUFFER_ALL_CPUS);
1489 tr->allocated_snapshot = true;
1493 /* the test is responsible for initializing and enabling */
1494 pr_info("Testing tracer %s: ", type->name);
1495 ret = type->selftest(type, tr);
1496 /* the test is responsible for resetting too */
1497 tr->current_trace = saved_tracer;
1499 printk(KERN_CONT "FAILED!\n");
1500 /* Add the warning after printing 'FAILED' */
1504 /* Only reset on passing, to avoid touching corrupted buffers */
1505 tracing_reset_online_cpus(&tr->trace_buffer);
1507 #ifdef CONFIG_TRACER_MAX_TRACE
1508 if (type->use_max_tr) {
1509 tr->allocated_snapshot = false;
1511 /* Shrink the max buffer again */
1512 if (ring_buffer_expanded)
1513 ring_buffer_resize(tr->max_buffer.buffer, 1,
1514 RING_BUFFER_ALL_CPUS);
1518 printk(KERN_CONT "PASSED\n");
1522 static __init int init_trace_selftests(void)
1524 struct trace_selftests *p, *n;
1525 struct tracer *t, **last;
1528 selftests_can_run = true;
1530 mutex_lock(&trace_types_lock);
1532 if (list_empty(&postponed_selftests))
1535 pr_info("Running postponed tracer tests:\n");
1537 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1538 ret = run_tracer_selftest(p->type);
1539 /* If the test fails, then warn and remove from available_tracers */
1541 WARN(1, "tracer: %s failed selftest, disabling\n",
1543 last = &trace_types;
1544 for (t = trace_types; t; t = t->next) {
1557 mutex_unlock(&trace_types_lock);
1561 core_initcall(init_trace_selftests);
1563 static inline int run_tracer_selftest(struct tracer *type)
1567 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1569 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1571 static void __init apply_trace_boot_options(void);
1574 * register_tracer - register a tracer with the ftrace system.
1575 * @type - the plugin for the tracer
1577 * Register a new plugin tracer.
1579 int __init register_tracer(struct tracer *type)
1585 pr_info("Tracer must have a name\n");
1589 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1590 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1594 mutex_lock(&trace_types_lock);
1596 tracing_selftest_running = true;
1598 for (t = trace_types; t; t = t->next) {
1599 if (strcmp(type->name, t->name) == 0) {
1601 pr_info("Tracer %s already registered\n",
1608 if (!type->set_flag)
1609 type->set_flag = &dummy_set_flag;
1611 /*allocate a dummy tracer_flags*/
1612 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1617 type->flags->val = 0;
1618 type->flags->opts = dummy_tracer_opt;
1620 if (!type->flags->opts)
1621 type->flags->opts = dummy_tracer_opt;
1623 /* store the tracer for __set_tracer_option */
1624 type->flags->trace = type;
1626 ret = run_tracer_selftest(type);
1630 type->next = trace_types;
1632 add_tracer_options(&global_trace, type);
1635 tracing_selftest_running = false;
1636 mutex_unlock(&trace_types_lock);
1638 if (ret || !default_bootup_tracer)
1641 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1644 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1645 /* Do we want this tracer to start on bootup? */
1646 tracing_set_tracer(&global_trace, type->name);
1647 default_bootup_tracer = NULL;
1649 apply_trace_boot_options();
1651 /* disable other selftests, since this will break it. */
1652 tracing_selftest_disabled = true;
1653 #ifdef CONFIG_FTRACE_STARTUP_TEST
1654 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1662 void tracing_reset(struct trace_buffer *buf, int cpu)
1664 struct ring_buffer *buffer = buf->buffer;
1669 ring_buffer_record_disable(buffer);
1671 /* Make sure all commits have finished */
1672 synchronize_sched();
1673 ring_buffer_reset_cpu(buffer, cpu);
1675 ring_buffer_record_enable(buffer);
1678 void tracing_reset_online_cpus(struct trace_buffer *buf)
1680 struct ring_buffer *buffer = buf->buffer;
1686 ring_buffer_record_disable(buffer);
1688 /* Make sure all commits have finished */
1689 synchronize_sched();
1691 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1693 for_each_online_cpu(cpu)
1694 ring_buffer_reset_cpu(buffer, cpu);
1696 ring_buffer_record_enable(buffer);
1699 /* Must have trace_types_lock held */
1700 void tracing_reset_all_online_cpus(void)
1702 struct trace_array *tr;
1704 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1705 if (!tr->clear_trace)
1707 tr->clear_trace = false;
1708 tracing_reset_online_cpus(&tr->trace_buffer);
1709 #ifdef CONFIG_TRACER_MAX_TRACE
1710 tracing_reset_online_cpus(&tr->max_buffer);
1715 static int *tgid_map;
1717 #define SAVED_CMDLINES_DEFAULT 128
1718 #define NO_CMDLINE_MAP UINT_MAX
1719 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1720 struct saved_cmdlines_buffer {
1721 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1722 unsigned *map_cmdline_to_pid;
1723 unsigned cmdline_num;
1725 char *saved_cmdlines;
1727 static struct saved_cmdlines_buffer *savedcmd;
1729 /* temporary disable recording */
1730 static atomic_t trace_record_taskinfo_disabled __read_mostly;
1732 static inline char *get_saved_cmdlines(int idx)
1734 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1737 static inline void set_cmdline(int idx, const char *cmdline)
1739 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1742 static int allocate_cmdlines_buffer(unsigned int val,
1743 struct saved_cmdlines_buffer *s)
1745 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1747 if (!s->map_cmdline_to_pid)
1750 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1751 if (!s->saved_cmdlines) {
1752 kfree(s->map_cmdline_to_pid);
1757 s->cmdline_num = val;
1758 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1759 sizeof(s->map_pid_to_cmdline));
1760 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1761 val * sizeof(*s->map_cmdline_to_pid));
1766 static int trace_create_savedcmd(void)
1770 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1774 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1784 int is_tracing_stopped(void)
1786 return global_trace.stop_count;
1790 * tracing_start - quick start of the tracer
1792 * If tracing is enabled but was stopped by tracing_stop,
1793 * this will start the tracer back up.
1795 void tracing_start(void)
1797 struct ring_buffer *buffer;
1798 unsigned long flags;
1800 if (tracing_disabled)
1803 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1804 if (--global_trace.stop_count) {
1805 if (global_trace.stop_count < 0) {
1806 /* Someone screwed up their debugging */
1808 global_trace.stop_count = 0;
1813 /* Prevent the buffers from switching */
1814 arch_spin_lock(&global_trace.max_lock);
1816 buffer = global_trace.trace_buffer.buffer;
1818 ring_buffer_record_enable(buffer);
1820 #ifdef CONFIG_TRACER_MAX_TRACE
1821 buffer = global_trace.max_buffer.buffer;
1823 ring_buffer_record_enable(buffer);
1826 arch_spin_unlock(&global_trace.max_lock);
1829 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1832 static void tracing_start_tr(struct trace_array *tr)
1834 struct ring_buffer *buffer;
1835 unsigned long flags;
1837 if (tracing_disabled)
1840 /* If global, we need to also start the max tracer */
1841 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1842 return tracing_start();
1844 raw_spin_lock_irqsave(&tr->start_lock, flags);
1846 if (--tr->stop_count) {
1847 if (tr->stop_count < 0) {
1848 /* Someone screwed up their debugging */
1855 buffer = tr->trace_buffer.buffer;
1857 ring_buffer_record_enable(buffer);
1860 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1864 * tracing_stop - quick stop of the tracer
1866 * Light weight way to stop tracing. Use in conjunction with
1869 void tracing_stop(void)
1871 struct ring_buffer *buffer;
1872 unsigned long flags;
1874 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1875 if (global_trace.stop_count++)
1878 /* Prevent the buffers from switching */
1879 arch_spin_lock(&global_trace.max_lock);
1881 buffer = global_trace.trace_buffer.buffer;
1883 ring_buffer_record_disable(buffer);
1885 #ifdef CONFIG_TRACER_MAX_TRACE
1886 buffer = global_trace.max_buffer.buffer;
1888 ring_buffer_record_disable(buffer);
1891 arch_spin_unlock(&global_trace.max_lock);
1894 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1897 static void tracing_stop_tr(struct trace_array *tr)
1899 struct ring_buffer *buffer;
1900 unsigned long flags;
1902 /* If global, we need to also stop the max tracer */
1903 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1904 return tracing_stop();
1906 raw_spin_lock_irqsave(&tr->start_lock, flags);
1907 if (tr->stop_count++)
1910 buffer = tr->trace_buffer.buffer;
1912 ring_buffer_record_disable(buffer);
1915 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1918 static int trace_save_cmdline(struct task_struct *tsk)
1922 /* treat recording of idle task as a success */
1926 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
1930 * It's not the end of the world if we don't get
1931 * the lock, but we also don't want to spin
1932 * nor do we want to disable interrupts,
1933 * so if we miss here, then better luck next time.
1935 if (!arch_spin_trylock(&trace_cmdline_lock))
1938 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1939 if (idx == NO_CMDLINE_MAP) {
1940 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1943 * Check whether the cmdline buffer at idx has a pid
1944 * mapped. We are going to overwrite that entry so we
1945 * need to clear the map_pid_to_cmdline. Otherwise we
1946 * would read the new comm for the old pid.
1948 pid = savedcmd->map_cmdline_to_pid[idx];
1949 if (pid != NO_CMDLINE_MAP)
1950 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1952 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1953 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1955 savedcmd->cmdline_idx = idx;
1958 set_cmdline(idx, tsk->comm);
1960 arch_spin_unlock(&trace_cmdline_lock);
1965 static void __trace_find_cmdline(int pid, char comm[])
1970 strcpy(comm, "<idle>");
1974 if (WARN_ON_ONCE(pid < 0)) {
1975 strcpy(comm, "<XXX>");
1979 if (pid > PID_MAX_DEFAULT) {
1980 strcpy(comm, "<...>");
1984 map = savedcmd->map_pid_to_cmdline[pid];
1985 if (map != NO_CMDLINE_MAP)
1986 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
1988 strcpy(comm, "<...>");
1991 void trace_find_cmdline(int pid, char comm[])
1994 arch_spin_lock(&trace_cmdline_lock);
1996 __trace_find_cmdline(pid, comm);
1998 arch_spin_unlock(&trace_cmdline_lock);
2002 int trace_find_tgid(int pid)
2004 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2007 return tgid_map[pid];
2010 static int trace_save_tgid(struct task_struct *tsk)
2012 /* treat recording of idle task as a success */
2016 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2019 tgid_map[tsk->pid] = tsk->tgid;
2023 static bool tracing_record_taskinfo_skip(int flags)
2025 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2027 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2029 if (!__this_cpu_read(trace_taskinfo_save))
2035 * tracing_record_taskinfo - record the task info of a task
2037 * @task - task to record
2038 * @flags - TRACE_RECORD_CMDLINE for recording comm
2039 * - TRACE_RECORD_TGID for recording tgid
2041 void tracing_record_taskinfo(struct task_struct *task, int flags)
2045 if (tracing_record_taskinfo_skip(flags))
2049 * Record as much task information as possible. If some fail, continue
2050 * to try to record the others.
2052 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2053 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2055 /* If recording any information failed, retry again soon. */
2059 __this_cpu_write(trace_taskinfo_save, false);
2063 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2065 * @prev - previous task during sched_switch
2066 * @next - next task during sched_switch
2067 * @flags - TRACE_RECORD_CMDLINE for recording comm
2068 * TRACE_RECORD_TGID for recording tgid
2070 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2071 struct task_struct *next, int flags)
2075 if (tracing_record_taskinfo_skip(flags))
2079 * Record as much task information as possible. If some fail, continue
2080 * to try to record the others.
2082 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2083 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2084 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2085 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2087 /* If recording any information failed, retry again soon. */
2091 __this_cpu_write(trace_taskinfo_save, false);
2094 /* Helpers to record a specific task information */
2095 void tracing_record_cmdline(struct task_struct *task)
2097 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2100 void tracing_record_tgid(struct task_struct *task)
2102 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2106 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2107 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2108 * simplifies those functions and keeps them in sync.
2110 enum print_line_t trace_handle_return(struct trace_seq *s)
2112 return trace_seq_has_overflowed(s) ?
2113 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2115 EXPORT_SYMBOL_GPL(trace_handle_return);
2118 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2121 struct task_struct *tsk = current;
2123 entry->preempt_count = pc & 0xff;
2124 entry->pid = (tsk) ? tsk->pid : 0;
2126 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2127 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2129 TRACE_FLAG_IRQS_NOSUPPORT |
2131 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2132 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2133 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2134 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2135 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2137 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2139 struct ring_buffer_event *
2140 trace_buffer_lock_reserve(struct ring_buffer *buffer,
2143 unsigned long flags, int pc)
2145 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2148 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2149 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2150 static int trace_buffered_event_ref;
2153 * trace_buffered_event_enable - enable buffering events
2155 * When events are being filtered, it is quicker to use a temporary
2156 * buffer to write the event data into if there's a likely chance
2157 * that it will not be committed. The discard of the ring buffer
2158 * is not as fast as committing, and is much slower than copying
2161 * When an event is to be filtered, allocate per cpu buffers to
2162 * write the event data into, and if the event is filtered and discarded
2163 * it is simply dropped, otherwise, the entire data is to be committed
2166 void trace_buffered_event_enable(void)
2168 struct ring_buffer_event *event;
2172 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2174 if (trace_buffered_event_ref++)
2177 for_each_tracing_cpu(cpu) {
2178 page = alloc_pages_node(cpu_to_node(cpu),
2179 GFP_KERNEL | __GFP_NORETRY, 0);
2183 event = page_address(page);
2184 memset(event, 0, sizeof(*event));
2186 per_cpu(trace_buffered_event, cpu) = event;
2189 if (cpu == smp_processor_id() &&
2190 this_cpu_read(trace_buffered_event) !=
2191 per_cpu(trace_buffered_event, cpu))
2198 trace_buffered_event_disable();
2201 static void enable_trace_buffered_event(void *data)
2203 /* Probably not needed, but do it anyway */
2205 this_cpu_dec(trace_buffered_event_cnt);
2208 static void disable_trace_buffered_event(void *data)
2210 this_cpu_inc(trace_buffered_event_cnt);
2214 * trace_buffered_event_disable - disable buffering events
2216 * When a filter is removed, it is faster to not use the buffered
2217 * events, and to commit directly into the ring buffer. Free up
2218 * the temp buffers when there are no more users. This requires
2219 * special synchronization with current events.
2221 void trace_buffered_event_disable(void)
2225 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2227 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2230 if (--trace_buffered_event_ref)
2234 /* For each CPU, set the buffer as used. */
2235 smp_call_function_many(tracing_buffer_mask,
2236 disable_trace_buffered_event, NULL, 1);
2239 /* Wait for all current users to finish */
2240 synchronize_sched();
2242 for_each_tracing_cpu(cpu) {
2243 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2244 per_cpu(trace_buffered_event, cpu) = NULL;
2247 * Make sure trace_buffered_event is NULL before clearing
2248 * trace_buffered_event_cnt.
2253 /* Do the work on each cpu */
2254 smp_call_function_many(tracing_buffer_mask,
2255 enable_trace_buffered_event, NULL, 1);
2259 static struct ring_buffer *temp_buffer;
2261 struct ring_buffer_event *
2262 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
2263 struct trace_event_file *trace_file,
2264 int type, unsigned long len,
2265 unsigned long flags, int pc)
2267 struct ring_buffer_event *entry;
2270 *current_rb = trace_file->tr->trace_buffer.buffer;
2272 if ((trace_file->flags &
2273 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2274 (entry = this_cpu_read(trace_buffered_event))) {
2275 /* Try to use the per cpu buffer first */
2276 val = this_cpu_inc_return(trace_buffered_event_cnt);
2278 trace_event_setup(entry, type, flags, pc);
2279 entry->array[0] = len;
2282 this_cpu_dec(trace_buffered_event_cnt);
2285 entry = __trace_buffer_lock_reserve(*current_rb,
2286 type, len, flags, pc);
2288 * If tracing is off, but we have triggers enabled
2289 * we still need to look at the event data. Use the temp_buffer
2290 * to store the trace event for the tigger to use. It's recusive
2291 * safe and will not be recorded anywhere.
2293 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2294 *current_rb = temp_buffer;
2295 entry = __trace_buffer_lock_reserve(*current_rb,
2296 type, len, flags, pc);
2300 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2302 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2303 static DEFINE_MUTEX(tracepoint_printk_mutex);
2305 static void output_printk(struct trace_event_buffer *fbuffer)
2307 struct trace_event_call *event_call;
2308 struct trace_event *event;
2309 unsigned long flags;
2310 struct trace_iterator *iter = tracepoint_print_iter;
2312 /* We should never get here if iter is NULL */
2313 if (WARN_ON_ONCE(!iter))
2316 event_call = fbuffer->trace_file->event_call;
2317 if (!event_call || !event_call->event.funcs ||
2318 !event_call->event.funcs->trace)
2321 event = &fbuffer->trace_file->event_call->event;
2323 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2324 trace_seq_init(&iter->seq);
2325 iter->ent = fbuffer->entry;
2326 event_call->event.funcs->trace(iter, 0, event);
2327 trace_seq_putc(&iter->seq, 0);
2328 printk("%s", iter->seq.buffer);
2330 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2333 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2334 void __user *buffer, size_t *lenp,
2337 int save_tracepoint_printk;
2340 mutex_lock(&tracepoint_printk_mutex);
2341 save_tracepoint_printk = tracepoint_printk;
2343 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2346 * This will force exiting early, as tracepoint_printk
2347 * is always zero when tracepoint_printk_iter is not allocated
2349 if (!tracepoint_print_iter)
2350 tracepoint_printk = 0;
2352 if (save_tracepoint_printk == tracepoint_printk)
2355 if (tracepoint_printk)
2356 static_key_enable(&tracepoint_printk_key.key);
2358 static_key_disable(&tracepoint_printk_key.key);
2361 mutex_unlock(&tracepoint_printk_mutex);
2366 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2368 if (static_key_false(&tracepoint_printk_key.key))
2369 output_printk(fbuffer);
2371 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2372 fbuffer->event, fbuffer->entry,
2373 fbuffer->flags, fbuffer->pc);
2375 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2377 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2378 struct ring_buffer *buffer,
2379 struct ring_buffer_event *event,
2380 unsigned long flags, int pc,
2381 struct pt_regs *regs)
2383 __buffer_unlock_commit(buffer, event);
2386 * If regs is not set, then skip the following callers:
2387 * trace_buffer_unlock_commit_regs
2388 * event_trigger_unlock_commit
2389 * trace_event_buffer_commit
2390 * trace_event_raw_event_sched_switch
2391 * Note, we can still get here via blktrace, wakeup tracer
2392 * and mmiotrace, but that's ok if they lose a function or
2393 * two. They are that meaningful.
2395 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
2396 ftrace_trace_userstack(buffer, flags, pc);
2400 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2403 trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2404 struct ring_buffer_event *event)
2406 __buffer_unlock_commit(buffer, event);
2410 trace_process_export(struct trace_export *export,
2411 struct ring_buffer_event *event)
2413 struct trace_entry *entry;
2414 unsigned int size = 0;
2416 entry = ring_buffer_event_data(event);
2417 size = ring_buffer_event_length(event);
2418 export->write(entry, size);
2421 static DEFINE_MUTEX(ftrace_export_lock);
2423 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2425 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2427 static inline void ftrace_exports_enable(void)
2429 static_branch_enable(&ftrace_exports_enabled);
2432 static inline void ftrace_exports_disable(void)
2434 static_branch_disable(&ftrace_exports_enabled);
2437 void ftrace_exports(struct ring_buffer_event *event)
2439 struct trace_export *export;
2441 preempt_disable_notrace();
2443 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2445 trace_process_export(export, event);
2446 export = rcu_dereference_raw_notrace(export->next);
2449 preempt_enable_notrace();
2453 add_trace_export(struct trace_export **list, struct trace_export *export)
2455 rcu_assign_pointer(export->next, *list);
2457 * We are entering export into the list but another
2458 * CPU might be walking that list. We need to make sure
2459 * the export->next pointer is valid before another CPU sees
2460 * the export pointer included into the list.
2462 rcu_assign_pointer(*list, export);
2466 rm_trace_export(struct trace_export **list, struct trace_export *export)
2468 struct trace_export **p;
2470 for (p = list; *p != NULL; p = &(*p)->next)
2477 rcu_assign_pointer(*p, (*p)->next);
2483 add_ftrace_export(struct trace_export **list, struct trace_export *export)
2486 ftrace_exports_enable();
2488 add_trace_export(list, export);
2492 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2496 ret = rm_trace_export(list, export);
2498 ftrace_exports_disable();
2503 int register_ftrace_export(struct trace_export *export)
2505 if (WARN_ON_ONCE(!export->write))
2508 mutex_lock(&ftrace_export_lock);
2510 add_ftrace_export(&ftrace_exports_list, export);
2512 mutex_unlock(&ftrace_export_lock);
2516 EXPORT_SYMBOL_GPL(register_ftrace_export);
2518 int unregister_ftrace_export(struct trace_export *export)
2522 mutex_lock(&ftrace_export_lock);
2524 ret = rm_ftrace_export(&ftrace_exports_list, export);
2526 mutex_unlock(&ftrace_export_lock);
2530 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2533 trace_function(struct trace_array *tr,
2534 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2537 struct trace_event_call *call = &event_function;
2538 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2539 struct ring_buffer_event *event;
2540 struct ftrace_entry *entry;
2542 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2546 entry = ring_buffer_event_data(event);
2548 entry->parent_ip = parent_ip;
2550 if (!call_filter_check_discard(call, entry, buffer, event)) {
2551 if (static_branch_unlikely(&ftrace_exports_enabled))
2552 ftrace_exports(event);
2553 __buffer_unlock_commit(buffer, event);
2557 #ifdef CONFIG_STACKTRACE
2559 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2560 struct ftrace_stack {
2561 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2564 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2565 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2567 static void __ftrace_trace_stack(struct ring_buffer *buffer,
2568 unsigned long flags,
2569 int skip, int pc, struct pt_regs *regs)
2571 struct trace_event_call *call = &event_kernel_stack;
2572 struct ring_buffer_event *event;
2573 struct stack_entry *entry;
2574 struct stack_trace trace;
2576 int size = FTRACE_STACK_ENTRIES;
2578 trace.nr_entries = 0;
2582 * Add two, for this function and the call to save_stack_trace()
2583 * If regs is set, then these functions will not be in the way.
2589 * Since events can happen in NMIs there's no safe way to
2590 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2591 * or NMI comes in, it will just have to use the default
2592 * FTRACE_STACK_SIZE.
2594 preempt_disable_notrace();
2596 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
2598 * We don't need any atomic variables, just a barrier.
2599 * If an interrupt comes in, we don't care, because it would
2600 * have exited and put the counter back to what we want.
2601 * We just need a barrier to keep gcc from moving things
2605 if (use_stack == 1) {
2606 trace.entries = this_cpu_ptr(ftrace_stack.calls);
2607 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2610 save_stack_trace_regs(regs, &trace);
2612 save_stack_trace(&trace);
2614 if (trace.nr_entries > size)
2615 size = trace.nr_entries;
2617 /* From now on, use_stack is a boolean */
2620 size *= sizeof(unsigned long);
2622 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2623 sizeof(*entry) + size, flags, pc);
2626 entry = ring_buffer_event_data(event);
2628 memset(&entry->caller, 0, size);
2631 memcpy(&entry->caller, trace.entries,
2632 trace.nr_entries * sizeof(unsigned long));
2634 trace.max_entries = FTRACE_STACK_ENTRIES;
2635 trace.entries = entry->caller;
2637 save_stack_trace_regs(regs, &trace);
2639 save_stack_trace(&trace);
2642 entry->size = trace.nr_entries;
2644 if (!call_filter_check_discard(call, entry, buffer, event))
2645 __buffer_unlock_commit(buffer, event);
2648 /* Again, don't let gcc optimize things here */
2650 __this_cpu_dec(ftrace_stack_reserve);
2651 preempt_enable_notrace();
2655 static inline void ftrace_trace_stack(struct trace_array *tr,
2656 struct ring_buffer *buffer,
2657 unsigned long flags,
2658 int skip, int pc, struct pt_regs *regs)
2660 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2663 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
2666 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2669 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2671 if (rcu_is_watching()) {
2672 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2677 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2678 * but if the above rcu_is_watching() failed, then the NMI
2679 * triggered someplace critical, and rcu_irq_enter() should
2680 * not be called from NMI.
2682 if (unlikely(in_nmi()))
2686 * It is possible that a function is being traced in a
2687 * location that RCU is not watching. A call to
2688 * rcu_irq_enter() will make sure that it is, but there's
2689 * a few internal rcu functions that could be traced
2690 * where that wont work either. In those cases, we just
2693 if (unlikely(rcu_irq_enter_disabled()))
2696 rcu_irq_enter_irqson();
2697 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2698 rcu_irq_exit_irqson();
2702 * trace_dump_stack - record a stack back trace in the trace buffer
2703 * @skip: Number of functions to skip (helper handlers)
2705 void trace_dump_stack(int skip)
2707 unsigned long flags;
2709 if (tracing_disabled || tracing_selftest_running)
2712 local_save_flags(flags);
2715 * Skip 3 more, seems to get us at the caller of
2719 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2720 flags, skip, preempt_count(), NULL);
2723 static DEFINE_PER_CPU(int, user_stack_count);
2726 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
2728 struct trace_event_call *call = &event_user_stack;
2729 struct ring_buffer_event *event;
2730 struct userstack_entry *entry;
2731 struct stack_trace trace;
2733 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
2737 * NMIs can not handle page faults, even with fix ups.
2738 * The save user stack can (and often does) fault.
2740 if (unlikely(in_nmi()))
2744 * prevent recursion, since the user stack tracing may
2745 * trigger other kernel events.
2748 if (__this_cpu_read(user_stack_count))
2751 __this_cpu_inc(user_stack_count);
2753 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2754 sizeof(*entry), flags, pc);
2756 goto out_drop_count;
2757 entry = ring_buffer_event_data(event);
2759 entry->tgid = current->tgid;
2760 memset(&entry->caller, 0, sizeof(entry->caller));
2762 trace.nr_entries = 0;
2763 trace.max_entries = FTRACE_STACK_ENTRIES;
2765 trace.entries = entry->caller;
2767 save_stack_trace_user(&trace);
2768 if (!call_filter_check_discard(call, entry, buffer, event))
2769 __buffer_unlock_commit(buffer, event);
2772 __this_cpu_dec(user_stack_count);
2778 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
2780 ftrace_trace_userstack(tr, flags, preempt_count());
2784 #endif /* CONFIG_STACKTRACE */
2786 /* created for use with alloc_percpu */
2787 struct trace_buffer_struct {
2789 char buffer[4][TRACE_BUF_SIZE];
2792 static struct trace_buffer_struct *trace_percpu_buffer;
2795 * Thise allows for lockless recording. If we're nested too deeply, then
2796 * this returns NULL.
2798 static char *get_trace_buf(void)
2800 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
2802 if (!buffer || buffer->nesting >= 4)
2807 /* Interrupts must see nesting incremented before we use the buffer */
2809 return &buffer->buffer[buffer->nesting][0];
2812 static void put_trace_buf(void)
2814 /* Don't let the decrement of nesting leak before this */
2816 this_cpu_dec(trace_percpu_buffer->nesting);
2819 static int alloc_percpu_trace_buffer(void)
2821 struct trace_buffer_struct *buffers;
2823 buffers = alloc_percpu(struct trace_buffer_struct);
2824 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2827 trace_percpu_buffer = buffers;
2831 static int buffers_allocated;
2833 void trace_printk_init_buffers(void)
2835 if (buffers_allocated)
2838 if (alloc_percpu_trace_buffer())
2841 /* trace_printk() is for debug use only. Don't use it in production. */
2844 pr_warn("**********************************************************\n");
2845 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2847 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2849 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2850 pr_warn("** unsafe for production use. **\n");
2852 pr_warn("** If you see this message and you are not debugging **\n");
2853 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2855 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2856 pr_warn("**********************************************************\n");
2858 /* Expand the buffers to set size */
2859 tracing_update_buffers();
2861 buffers_allocated = 1;
2864 * trace_printk_init_buffers() can be called by modules.
2865 * If that happens, then we need to start cmdline recording
2866 * directly here. If the global_trace.buffer is already
2867 * allocated here, then this was called by module code.
2869 if (global_trace.trace_buffer.buffer)
2870 tracing_start_cmdline_record();
2873 void trace_printk_start_comm(void)
2875 /* Start tracing comms if trace printk is set */
2876 if (!buffers_allocated)
2878 tracing_start_cmdline_record();
2881 static void trace_printk_start_stop_comm(int enabled)
2883 if (!buffers_allocated)
2887 tracing_start_cmdline_record();
2889 tracing_stop_cmdline_record();
2893 * trace_vbprintk - write binary msg to tracing buffer
2896 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2898 struct trace_event_call *call = &event_bprint;
2899 struct ring_buffer_event *event;
2900 struct ring_buffer *buffer;
2901 struct trace_array *tr = &global_trace;
2902 struct bprint_entry *entry;
2903 unsigned long flags;
2905 int len = 0, size, pc;
2907 if (unlikely(tracing_selftest_running || tracing_disabled))
2910 /* Don't pollute graph traces with trace_vprintk internals */
2911 pause_graph_tracing();
2913 pc = preempt_count();
2914 preempt_disable_notrace();
2916 tbuffer = get_trace_buf();
2922 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2924 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2927 local_save_flags(flags);
2928 size = sizeof(*entry) + sizeof(u32) * len;
2929 buffer = tr->trace_buffer.buffer;
2930 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2934 entry = ring_buffer_event_data(event);
2938 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2939 if (!call_filter_check_discard(call, entry, buffer, event)) {
2940 __buffer_unlock_commit(buffer, event);
2941 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2948 preempt_enable_notrace();
2949 unpause_graph_tracing();
2953 EXPORT_SYMBOL_GPL(trace_vbprintk);
2956 __trace_array_vprintk(struct ring_buffer *buffer,
2957 unsigned long ip, const char *fmt, va_list args)
2959 struct trace_event_call *call = &event_print;
2960 struct ring_buffer_event *event;
2961 int len = 0, size, pc;
2962 struct print_entry *entry;
2963 unsigned long flags;
2966 if (tracing_disabled || tracing_selftest_running)
2969 /* Don't pollute graph traces with trace_vprintk internals */
2970 pause_graph_tracing();
2972 pc = preempt_count();
2973 preempt_disable_notrace();
2976 tbuffer = get_trace_buf();
2982 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2984 local_save_flags(flags);
2985 size = sizeof(*entry) + len + 1;
2986 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2990 entry = ring_buffer_event_data(event);
2993 memcpy(&entry->buf, tbuffer, len + 1);
2994 if (!call_filter_check_discard(call, entry, buffer, event)) {
2995 __buffer_unlock_commit(buffer, event);
2996 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
3003 preempt_enable_notrace();
3004 unpause_graph_tracing();
3009 int trace_array_vprintk(struct trace_array *tr,
3010 unsigned long ip, const char *fmt, va_list args)
3012 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3015 int trace_array_printk(struct trace_array *tr,
3016 unsigned long ip, const char *fmt, ...)
3021 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3025 ret = trace_array_vprintk(tr, ip, fmt, ap);
3030 int trace_array_printk_buf(struct ring_buffer *buffer,
3031 unsigned long ip, const char *fmt, ...)
3036 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3040 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3045 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3047 return trace_array_vprintk(&global_trace, ip, fmt, args);
3049 EXPORT_SYMBOL_GPL(trace_vprintk);
3051 static void trace_iterator_increment(struct trace_iterator *iter)
3053 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3057 ring_buffer_read(buf_iter, NULL);
3060 static struct trace_entry *
3061 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3062 unsigned long *lost_events)
3064 struct ring_buffer_event *event;
3065 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3068 event = ring_buffer_iter_peek(buf_iter, ts);
3070 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
3074 iter->ent_size = ring_buffer_event_length(event);
3075 return ring_buffer_event_data(event);
3081 static struct trace_entry *
3082 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3083 unsigned long *missing_events, u64 *ent_ts)
3085 struct ring_buffer *buffer = iter->trace_buffer->buffer;
3086 struct trace_entry *ent, *next = NULL;
3087 unsigned long lost_events = 0, next_lost = 0;
3088 int cpu_file = iter->cpu_file;
3089 u64 next_ts = 0, ts;
3095 * If we are in a per_cpu trace file, don't bother by iterating over
3096 * all cpu and peek directly.
3098 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3099 if (ring_buffer_empty_cpu(buffer, cpu_file))
3101 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3103 *ent_cpu = cpu_file;
3108 for_each_tracing_cpu(cpu) {
3110 if (ring_buffer_empty_cpu(buffer, cpu))
3113 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3116 * Pick the entry with the smallest timestamp:
3118 if (ent && (!next || ts < next_ts)) {
3122 next_lost = lost_events;
3123 next_size = iter->ent_size;
3127 iter->ent_size = next_size;
3130 *ent_cpu = next_cpu;
3136 *missing_events = next_lost;
3141 /* Find the next real entry, without updating the iterator itself */
3142 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3143 int *ent_cpu, u64 *ent_ts)
3145 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3148 /* Find the next real entry, and increment the iterator to the next entry */
3149 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3151 iter->ent = __find_next_entry(iter, &iter->cpu,
3152 &iter->lost_events, &iter->ts);
3155 trace_iterator_increment(iter);
3157 return iter->ent ? iter : NULL;
3160 static void trace_consume(struct trace_iterator *iter)
3162 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
3163 &iter->lost_events);
3166 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3168 struct trace_iterator *iter = m->private;
3172 WARN_ON_ONCE(iter->leftover);
3176 /* can't go backwards */
3181 ent = trace_find_next_entry_inc(iter);
3185 while (ent && iter->idx < i)
3186 ent = trace_find_next_entry_inc(iter);
3193 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3195 struct ring_buffer_event *event;
3196 struct ring_buffer_iter *buf_iter;
3197 unsigned long entries = 0;
3200 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
3202 buf_iter = trace_buffer_iter(iter, cpu);
3206 ring_buffer_iter_reset(buf_iter);
3209 * We could have the case with the max latency tracers
3210 * that a reset never took place on a cpu. This is evident
3211 * by the timestamp being before the start of the buffer.
3213 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
3214 if (ts >= iter->trace_buffer->time_start)
3217 ring_buffer_read(buf_iter, NULL);
3220 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
3224 * The current tracer is copied to avoid a global locking
3227 static void *s_start(struct seq_file *m, loff_t *pos)
3229 struct trace_iterator *iter = m->private;
3230 struct trace_array *tr = iter->tr;
3231 int cpu_file = iter->cpu_file;
3237 * copy the tracer to avoid using a global lock all around.
3238 * iter->trace is a copy of current_trace, the pointer to the
3239 * name may be used instead of a strcmp(), as iter->trace->name
3240 * will point to the same string as current_trace->name.
3242 mutex_lock(&trace_types_lock);
3243 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3244 *iter->trace = *tr->current_trace;
3245 mutex_unlock(&trace_types_lock);
3247 #ifdef CONFIG_TRACER_MAX_TRACE
3248 if (iter->snapshot && iter->trace->use_max_tr)
3249 return ERR_PTR(-EBUSY);
3252 if (!iter->snapshot)
3253 atomic_inc(&trace_record_taskinfo_disabled);
3255 if (*pos != iter->pos) {
3260 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3261 for_each_tracing_cpu(cpu)
3262 tracing_iter_reset(iter, cpu);
3264 tracing_iter_reset(iter, cpu_file);
3267 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3272 * If we overflowed the seq_file before, then we want
3273 * to just reuse the trace_seq buffer again.
3279 p = s_next(m, p, &l);
3283 trace_event_read_lock();
3284 trace_access_lock(cpu_file);
3288 static void s_stop(struct seq_file *m, void *p)
3290 struct trace_iterator *iter = m->private;
3292 #ifdef CONFIG_TRACER_MAX_TRACE
3293 if (iter->snapshot && iter->trace->use_max_tr)
3297 if (!iter->snapshot)
3298 atomic_dec(&trace_record_taskinfo_disabled);
3300 trace_access_unlock(iter->cpu_file);
3301 trace_event_read_unlock();
3305 get_total_entries(struct trace_buffer *buf,
3306 unsigned long *total, unsigned long *entries)
3308 unsigned long count;
3314 for_each_tracing_cpu(cpu) {
3315 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3317 * If this buffer has skipped entries, then we hold all
3318 * entries for the trace and we need to ignore the
3319 * ones before the time stamp.
3321 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3322 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3323 /* total is the same as the entries */
3327 ring_buffer_overrun_cpu(buf->buffer, cpu);
3332 static void print_lat_help_header(struct seq_file *m)
3334 seq_puts(m, "# _------=> CPU# \n"
3335 "# / _-----=> irqs-off \n"
3336 "# | / _----=> need-resched \n"
3337 "# || / _---=> hardirq/softirq \n"
3338 "# ||| / _--=> preempt-depth \n"
3340 "# cmd pid ||||| time | caller \n"
3341 "# \\ / ||||| \\ | / \n");
3344 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
3346 unsigned long total;
3347 unsigned long entries;
3349 get_total_entries(buf, &total, &entries);
3350 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3351 entries, total, num_online_cpus());
3355 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3358 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3360 print_event_info(buf, m);
3362 seq_printf(m, "# TASK-PID CPU# %s TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3363 seq_printf(m, "# | | | %s | |\n", tgid ? " | " : "");
3366 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3369 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3370 const char tgid_space[] = " ";
3371 const char space[] = " ";
3373 seq_printf(m, "# %s _-----=> irqs-off\n",
3374 tgid ? tgid_space : space);
3375 seq_printf(m, "# %s / _----=> need-resched\n",
3376 tgid ? tgid_space : space);
3377 seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
3378 tgid ? tgid_space : space);
3379 seq_printf(m, "# %s|| / _--=> preempt-depth\n",
3380 tgid ? tgid_space : space);
3381 seq_printf(m, "# %s||| / delay\n",
3382 tgid ? tgid_space : space);
3383 seq_printf(m, "# TASK-PID CPU#%s|||| TIMESTAMP FUNCTION\n",
3384 tgid ? " TGID " : space);
3385 seq_printf(m, "# | | | %s|||| | |\n",
3386 tgid ? " | " : space);
3390 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3392 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3393 struct trace_buffer *buf = iter->trace_buffer;
3394 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3395 struct tracer *type = iter->trace;
3396 unsigned long entries;
3397 unsigned long total;
3398 const char *name = "preemption";
3402 get_total_entries(buf, &total, &entries);
3404 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3406 seq_puts(m, "# -----------------------------------"
3407 "---------------------------------\n");
3408 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3409 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3410 nsecs_to_usecs(data->saved_latency),
3414 #if defined(CONFIG_PREEMPT_NONE)
3416 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3418 #elif defined(CONFIG_PREEMPT)
3423 /* These are reserved for later use */
3426 seq_printf(m, " #P:%d)\n", num_online_cpus());
3430 seq_puts(m, "# -----------------\n");
3431 seq_printf(m, "# | task: %.16s-%d "
3432 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3433 data->comm, data->pid,
3434 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3435 data->policy, data->rt_priority);
3436 seq_puts(m, "# -----------------\n");
3438 if (data->critical_start) {
3439 seq_puts(m, "# => started at: ");
3440 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3441 trace_print_seq(m, &iter->seq);
3442 seq_puts(m, "\n# => ended at: ");
3443 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3444 trace_print_seq(m, &iter->seq);
3445 seq_puts(m, "\n#\n");
3451 static void test_cpu_buff_start(struct trace_iterator *iter)
3453 struct trace_seq *s = &iter->seq;
3454 struct trace_array *tr = iter->tr;
3456 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3459 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3462 if (cpumask_available(iter->started) &&
3463 cpumask_test_cpu(iter->cpu, iter->started))
3466 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
3469 if (cpumask_available(iter->started))
3470 cpumask_set_cpu(iter->cpu, iter->started);
3472 /* Don't print started cpu buffer for the first entry of the trace */
3474 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3478 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3480 struct trace_array *tr = iter->tr;
3481 struct trace_seq *s = &iter->seq;
3482 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3483 struct trace_entry *entry;
3484 struct trace_event *event;
3488 test_cpu_buff_start(iter);
3490 event = ftrace_find_event(entry->type);
3492 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3493 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3494 trace_print_lat_context(iter);
3496 trace_print_context(iter);
3499 if (trace_seq_has_overflowed(s))
3500 return TRACE_TYPE_PARTIAL_LINE;
3503 return event->funcs->trace(iter, sym_flags, event);
3505 trace_seq_printf(s, "Unknown type %d\n", entry->type);
3507 return trace_handle_return(s);
3510 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3512 struct trace_array *tr = iter->tr;
3513 struct trace_seq *s = &iter->seq;
3514 struct trace_entry *entry;
3515 struct trace_event *event;
3519 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3520 trace_seq_printf(s, "%d %d %llu ",
3521 entry->pid, iter->cpu, iter->ts);
3523 if (trace_seq_has_overflowed(s))
3524 return TRACE_TYPE_PARTIAL_LINE;
3526 event = ftrace_find_event(entry->type);
3528 return event->funcs->raw(iter, 0, event);
3530 trace_seq_printf(s, "%d ?\n", entry->type);
3532 return trace_handle_return(s);
3535 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3537 struct trace_array *tr = iter->tr;
3538 struct trace_seq *s = &iter->seq;
3539 unsigned char newline = '\n';
3540 struct trace_entry *entry;
3541 struct trace_event *event;
3545 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3546 SEQ_PUT_HEX_FIELD(s, entry->pid);
3547 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3548 SEQ_PUT_HEX_FIELD(s, iter->ts);
3549 if (trace_seq_has_overflowed(s))
3550 return TRACE_TYPE_PARTIAL_LINE;
3553 event = ftrace_find_event(entry->type);
3555 enum print_line_t ret = event->funcs->hex(iter, 0, event);
3556 if (ret != TRACE_TYPE_HANDLED)
3560 SEQ_PUT_FIELD(s, newline);
3562 return trace_handle_return(s);
3565 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
3567 struct trace_array *tr = iter->tr;
3568 struct trace_seq *s = &iter->seq;
3569 struct trace_entry *entry;
3570 struct trace_event *event;
3574 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3575 SEQ_PUT_FIELD(s, entry->pid);
3576 SEQ_PUT_FIELD(s, iter->cpu);
3577 SEQ_PUT_FIELD(s, iter->ts);
3578 if (trace_seq_has_overflowed(s))
3579 return TRACE_TYPE_PARTIAL_LINE;
3582 event = ftrace_find_event(entry->type);
3583 return event ? event->funcs->binary(iter, 0, event) :
3587 int trace_empty(struct trace_iterator *iter)
3589 struct ring_buffer_iter *buf_iter;
3592 /* If we are looking at one CPU buffer, only check that one */
3593 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
3594 cpu = iter->cpu_file;
3595 buf_iter = trace_buffer_iter(iter, cpu);
3597 if (!ring_buffer_iter_empty(buf_iter))
3600 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3606 for_each_tracing_cpu(cpu) {
3607 buf_iter = trace_buffer_iter(iter, cpu);
3609 if (!ring_buffer_iter_empty(buf_iter))
3612 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3620 /* Called with trace_event_read_lock() held. */
3621 enum print_line_t print_trace_line(struct trace_iterator *iter)
3623 struct trace_array *tr = iter->tr;
3624 unsigned long trace_flags = tr->trace_flags;
3625 enum print_line_t ret;
3627 if (iter->lost_events) {
3628 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3629 iter->cpu, iter->lost_events);
3630 if (trace_seq_has_overflowed(&iter->seq))
3631 return TRACE_TYPE_PARTIAL_LINE;
3634 if (iter->trace && iter->trace->print_line) {
3635 ret = iter->trace->print_line(iter);
3636 if (ret != TRACE_TYPE_UNHANDLED)
3640 if (iter->ent->type == TRACE_BPUTS &&
3641 trace_flags & TRACE_ITER_PRINTK &&
3642 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3643 return trace_print_bputs_msg_only(iter);
3645 if (iter->ent->type == TRACE_BPRINT &&
3646 trace_flags & TRACE_ITER_PRINTK &&
3647 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3648 return trace_print_bprintk_msg_only(iter);
3650 if (iter->ent->type == TRACE_PRINT &&
3651 trace_flags & TRACE_ITER_PRINTK &&
3652 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3653 return trace_print_printk_msg_only(iter);
3655 if (trace_flags & TRACE_ITER_BIN)
3656 return print_bin_fmt(iter);
3658 if (trace_flags & TRACE_ITER_HEX)
3659 return print_hex_fmt(iter);
3661 if (trace_flags & TRACE_ITER_RAW)
3662 return print_raw_fmt(iter);
3664 return print_trace_fmt(iter);
3667 void trace_latency_header(struct seq_file *m)
3669 struct trace_iterator *iter = m->private;
3670 struct trace_array *tr = iter->tr;
3672 /* print nothing if the buffers are empty */
3673 if (trace_empty(iter))
3676 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3677 print_trace_header(m, iter);
3679 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
3680 print_lat_help_header(m);
3683 void trace_default_header(struct seq_file *m)
3685 struct trace_iterator *iter = m->private;
3686 struct trace_array *tr = iter->tr;
3687 unsigned long trace_flags = tr->trace_flags;
3689 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3692 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3693 /* print nothing if the buffers are empty */
3694 if (trace_empty(iter))
3696 print_trace_header(m, iter);
3697 if (!(trace_flags & TRACE_ITER_VERBOSE))
3698 print_lat_help_header(m);
3700 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3701 if (trace_flags & TRACE_ITER_IRQ_INFO)
3702 print_func_help_header_irq(iter->trace_buffer,
3705 print_func_help_header(iter->trace_buffer, m,
3711 static void test_ftrace_alive(struct seq_file *m)
3713 if (!ftrace_is_dead())
3715 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3716 "# MAY BE MISSING FUNCTION EVENTS\n");
3719 #ifdef CONFIG_TRACER_MAX_TRACE
3720 static void show_snapshot_main_help(struct seq_file *m)
3722 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3723 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3724 "# Takes a snapshot of the main buffer.\n"
3725 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3726 "# (Doesn't have to be '2' works with any number that\n"
3727 "# is not a '0' or '1')\n");
3730 static void show_snapshot_percpu_help(struct seq_file *m)
3732 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3733 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3734 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3735 "# Takes a snapshot of the main buffer for this cpu.\n");
3737 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3738 "# Must use main snapshot file to allocate.\n");
3740 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3741 "# (Doesn't have to be '2' works with any number that\n"
3742 "# is not a '0' or '1')\n");
3745 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3747 if (iter->tr->allocated_snapshot)
3748 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
3750 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
3752 seq_puts(m, "# Snapshot commands:\n");
3753 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3754 show_snapshot_main_help(m);
3756 show_snapshot_percpu_help(m);
3759 /* Should never be called */
3760 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3763 static int s_show(struct seq_file *m, void *v)
3765 struct trace_iterator *iter = v;
3768 if (iter->ent == NULL) {
3770 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3772 test_ftrace_alive(m);
3774 if (iter->snapshot && trace_empty(iter))
3775 print_snapshot_help(m, iter);
3776 else if (iter->trace && iter->trace->print_header)
3777 iter->trace->print_header(m);
3779 trace_default_header(m);
3781 } else if (iter->leftover) {
3783 * If we filled the seq_file buffer earlier, we
3784 * want to just show it now.
3786 ret = trace_print_seq(m, &iter->seq);
3788 /* ret should this time be zero, but you never know */
3789 iter->leftover = ret;
3792 print_trace_line(iter);
3793 ret = trace_print_seq(m, &iter->seq);
3795 * If we overflow the seq_file buffer, then it will
3796 * ask us for this data again at start up.
3798 * ret is 0 if seq_file write succeeded.
3801 iter->leftover = ret;
3808 * Should be used after trace_array_get(), trace_types_lock
3809 * ensures that i_cdev was already initialized.
3811 static inline int tracing_get_cpu(struct inode *inode)
3813 if (inode->i_cdev) /* See trace_create_cpu_file() */
3814 return (long)inode->i_cdev - 1;
3815 return RING_BUFFER_ALL_CPUS;
3818 static const struct seq_operations tracer_seq_ops = {
3825 static struct trace_iterator *
3826 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
3828 struct trace_array *tr = inode->i_private;
3829 struct trace_iterator *iter;
3832 if (tracing_disabled)
3833 return ERR_PTR(-ENODEV);
3835 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3837 return ERR_PTR(-ENOMEM);
3839 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3841 if (!iter->buffer_iter)
3845 * We make a copy of the current tracer to avoid concurrent
3846 * changes on it while we are reading.
3848 mutex_lock(&trace_types_lock);
3849 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3853 *iter->trace = *tr->current_trace;
3855 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3860 #ifdef CONFIG_TRACER_MAX_TRACE
3861 /* Currently only the top directory has a snapshot */
3862 if (tr->current_trace->print_max || snapshot)
3863 iter->trace_buffer = &tr->max_buffer;
3866 iter->trace_buffer = &tr->trace_buffer;
3867 iter->snapshot = snapshot;
3869 iter->cpu_file = tracing_get_cpu(inode);
3870 mutex_init(&iter->mutex);
3872 /* Notify the tracer early; before we stop tracing. */
3873 if (iter->trace && iter->trace->open)
3874 iter->trace->open(iter);
3876 /* Annotate start of buffers if we had overruns */
3877 if (ring_buffer_overruns(iter->trace_buffer->buffer))
3878 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3880 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3881 if (trace_clocks[tr->clock_id].in_ns)
3882 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3884 /* stop the trace while dumping if we are not opening "snapshot" */
3885 if (!iter->snapshot)
3886 tracing_stop_tr(tr);
3888 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3889 for_each_tracing_cpu(cpu) {
3890 iter->buffer_iter[cpu] =
3891 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3893 ring_buffer_read_prepare_sync();
3894 for_each_tracing_cpu(cpu) {
3895 ring_buffer_read_start(iter->buffer_iter[cpu]);
3896 tracing_iter_reset(iter, cpu);
3899 cpu = iter->cpu_file;
3900 iter->buffer_iter[cpu] =
3901 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3902 ring_buffer_read_prepare_sync();
3903 ring_buffer_read_start(iter->buffer_iter[cpu]);
3904 tracing_iter_reset(iter, cpu);
3907 mutex_unlock(&trace_types_lock);
3912 mutex_unlock(&trace_types_lock);
3914 kfree(iter->buffer_iter);
3916 seq_release_private(inode, file);
3917 return ERR_PTR(-ENOMEM);
3920 int tracing_open_generic(struct inode *inode, struct file *filp)
3922 if (tracing_disabled)
3925 filp->private_data = inode->i_private;
3929 bool tracing_is_disabled(void)
3931 return (tracing_disabled) ? true: false;
3935 * Open and update trace_array ref count.
3936 * Must have the current trace_array passed to it.
3938 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3940 struct trace_array *tr = inode->i_private;
3942 if (tracing_disabled)
3945 if (trace_array_get(tr) < 0)
3948 filp->private_data = inode->i_private;
3953 static int tracing_release(struct inode *inode, struct file *file)
3955 struct trace_array *tr = inode->i_private;
3956 struct seq_file *m = file->private_data;
3957 struct trace_iterator *iter;
3960 if (!(file->f_mode & FMODE_READ)) {
3961 trace_array_put(tr);
3965 /* Writes do not use seq_file */
3967 mutex_lock(&trace_types_lock);
3969 for_each_tracing_cpu(cpu) {
3970 if (iter->buffer_iter[cpu])
3971 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3974 if (iter->trace && iter->trace->close)
3975 iter->trace->close(iter);
3977 if (!iter->snapshot)
3978 /* reenable tracing if it was previously enabled */
3979 tracing_start_tr(tr);
3981 __trace_array_put(tr);
3983 mutex_unlock(&trace_types_lock);
3985 mutex_destroy(&iter->mutex);
3986 free_cpumask_var(iter->started);
3988 kfree(iter->buffer_iter);
3989 seq_release_private(inode, file);
3994 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3996 struct trace_array *tr = inode->i_private;
3998 trace_array_put(tr);
4002 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4004 struct trace_array *tr = inode->i_private;
4006 trace_array_put(tr);
4008 return single_release(inode, file);
4011 static int tracing_open(struct inode *inode, struct file *file)
4013 struct trace_array *tr = inode->i_private;
4014 struct trace_iterator *iter;
4017 if (trace_array_get(tr) < 0)
4020 /* If this file was open for write, then erase contents */
4021 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4022 int cpu = tracing_get_cpu(inode);
4023 struct trace_buffer *trace_buf = &tr->trace_buffer;
4025 #ifdef CONFIG_TRACER_MAX_TRACE
4026 if (tr->current_trace->print_max)
4027 trace_buf = &tr->max_buffer;
4030 if (cpu == RING_BUFFER_ALL_CPUS)
4031 tracing_reset_online_cpus(trace_buf);
4033 tracing_reset(trace_buf, cpu);
4036 if (file->f_mode & FMODE_READ) {
4037 iter = __tracing_open(inode, file, false);
4039 ret = PTR_ERR(iter);
4040 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4041 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4045 trace_array_put(tr);
4051 * Some tracers are not suitable for instance buffers.
4052 * A tracer is always available for the global array (toplevel)
4053 * or if it explicitly states that it is.
4056 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4058 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4061 /* Find the next tracer that this trace array may use */
4062 static struct tracer *
4063 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4065 while (t && !trace_ok_for_array(t, tr))
4072 t_next(struct seq_file *m, void *v, loff_t *pos)
4074 struct trace_array *tr = m->private;
4075 struct tracer *t = v;
4080 t = get_tracer_for_array(tr, t->next);
4085 static void *t_start(struct seq_file *m, loff_t *pos)
4087 struct trace_array *tr = m->private;
4091 mutex_lock(&trace_types_lock);
4093 t = get_tracer_for_array(tr, trace_types);
4094 for (; t && l < *pos; t = t_next(m, t, &l))
4100 static void t_stop(struct seq_file *m, void *p)
4102 mutex_unlock(&trace_types_lock);
4105 static int t_show(struct seq_file *m, void *v)
4107 struct tracer *t = v;
4112 seq_puts(m, t->name);
4121 static const struct seq_operations show_traces_seq_ops = {
4128 static int show_traces_open(struct inode *inode, struct file *file)
4130 struct trace_array *tr = inode->i_private;
4134 if (tracing_disabled)
4137 ret = seq_open(file, &show_traces_seq_ops);
4141 m = file->private_data;
4148 tracing_write_stub(struct file *filp, const char __user *ubuf,
4149 size_t count, loff_t *ppos)
4154 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4158 if (file->f_mode & FMODE_READ)
4159 ret = seq_lseek(file, offset, whence);
4161 file->f_pos = ret = 0;
4166 static const struct file_operations tracing_fops = {
4167 .open = tracing_open,
4169 .write = tracing_write_stub,
4170 .llseek = tracing_lseek,
4171 .release = tracing_release,
4174 static const struct file_operations show_traces_fops = {
4175 .open = show_traces_open,
4177 .release = seq_release,
4178 .llseek = seq_lseek,
4182 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4183 size_t count, loff_t *ppos)
4185 struct trace_array *tr = file_inode(filp)->i_private;
4189 len = snprintf(NULL, 0, "%*pb\n",
4190 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4191 mask_str = kmalloc(len, GFP_KERNEL);
4195 len = snprintf(mask_str, len, "%*pb\n",
4196 cpumask_pr_args(tr->tracing_cpumask));
4201 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4210 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4211 size_t count, loff_t *ppos)
4213 struct trace_array *tr = file_inode(filp)->i_private;
4214 cpumask_var_t tracing_cpumask_new;
4217 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4220 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4224 local_irq_disable();
4225 arch_spin_lock(&tr->max_lock);
4226 for_each_tracing_cpu(cpu) {
4228 * Increase/decrease the disabled counter if we are
4229 * about to flip a bit in the cpumask:
4231 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4232 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4233 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4234 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
4236 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4237 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4238 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4239 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
4242 arch_spin_unlock(&tr->max_lock);
4245 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4246 free_cpumask_var(tracing_cpumask_new);
4251 free_cpumask_var(tracing_cpumask_new);
4256 static const struct file_operations tracing_cpumask_fops = {
4257 .open = tracing_open_generic_tr,
4258 .read = tracing_cpumask_read,
4259 .write = tracing_cpumask_write,
4260 .release = tracing_release_generic_tr,
4261 .llseek = generic_file_llseek,
4264 static int tracing_trace_options_show(struct seq_file *m, void *v)
4266 struct tracer_opt *trace_opts;
4267 struct trace_array *tr = m->private;
4271 mutex_lock(&trace_types_lock);
4272 tracer_flags = tr->current_trace->flags->val;
4273 trace_opts = tr->current_trace->flags->opts;
4275 for (i = 0; trace_options[i]; i++) {
4276 if (tr->trace_flags & (1 << i))
4277 seq_printf(m, "%s\n", trace_options[i]);
4279 seq_printf(m, "no%s\n", trace_options[i]);
4282 for (i = 0; trace_opts[i].name; i++) {
4283 if (tracer_flags & trace_opts[i].bit)
4284 seq_printf(m, "%s\n", trace_opts[i].name);
4286 seq_printf(m, "no%s\n", trace_opts[i].name);
4288 mutex_unlock(&trace_types_lock);
4293 static int __set_tracer_option(struct trace_array *tr,
4294 struct tracer_flags *tracer_flags,
4295 struct tracer_opt *opts, int neg)
4297 struct tracer *trace = tracer_flags->trace;
4300 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4305 tracer_flags->val &= ~opts->bit;
4307 tracer_flags->val |= opts->bit;
4311 /* Try to assign a tracer specific option */
4312 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4314 struct tracer *trace = tr->current_trace;
4315 struct tracer_flags *tracer_flags = trace->flags;
4316 struct tracer_opt *opts = NULL;
4319 for (i = 0; tracer_flags->opts[i].name; i++) {
4320 opts = &tracer_flags->opts[i];
4322 if (strcmp(cmp, opts->name) == 0)
4323 return __set_tracer_option(tr, trace->flags, opts, neg);
4329 /* Some tracers require overwrite to stay enabled */
4330 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4332 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4338 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4340 /* do nothing if flag is already set */
4341 if (!!(tr->trace_flags & mask) == !!enabled)
4344 /* Give the tracer a chance to approve the change */
4345 if (tr->current_trace->flag_changed)
4346 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4350 tr->trace_flags |= mask;
4352 tr->trace_flags &= ~mask;
4354 if (mask == TRACE_ITER_RECORD_CMD)
4355 trace_event_enable_cmd_record(enabled);
4357 if (mask == TRACE_ITER_RECORD_TGID) {
4359 tgid_map = kzalloc((PID_MAX_DEFAULT + 1) * sizeof(*tgid_map),
4362 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4366 trace_event_enable_tgid_record(enabled);
4369 if (mask == TRACE_ITER_EVENT_FORK)
4370 trace_event_follow_fork(tr, enabled);
4372 if (mask == TRACE_ITER_FUNC_FORK)
4373 ftrace_pid_follow_fork(tr, enabled);
4375 if (mask == TRACE_ITER_OVERWRITE) {
4376 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
4377 #ifdef CONFIG_TRACER_MAX_TRACE
4378 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4382 if (mask == TRACE_ITER_PRINTK) {
4383 trace_printk_start_stop_comm(enabled);
4384 trace_printk_control(enabled);
4390 static int trace_set_options(struct trace_array *tr, char *option)
4396 size_t orig_len = strlen(option);
4398 cmp = strstrip(option);
4400 if (strncmp(cmp, "no", 2) == 0) {
4405 mutex_lock(&trace_types_lock);
4407 for (i = 0; trace_options[i]; i++) {
4408 if (strcmp(cmp, trace_options[i]) == 0) {
4409 ret = set_tracer_flag(tr, 1 << i, !neg);
4414 /* If no option could be set, test the specific tracer options */
4415 if (!trace_options[i])
4416 ret = set_tracer_option(tr, cmp, neg);
4418 mutex_unlock(&trace_types_lock);
4421 * If the first trailing whitespace is replaced with '\0' by strstrip,
4422 * turn it back into a space.
4424 if (orig_len > strlen(option))
4425 option[strlen(option)] = ' ';
4430 static void __init apply_trace_boot_options(void)
4432 char *buf = trace_boot_options_buf;
4436 option = strsep(&buf, ",");
4442 trace_set_options(&global_trace, option);
4444 /* Put back the comma to allow this to be called again */
4451 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4452 size_t cnt, loff_t *ppos)
4454 struct seq_file *m = filp->private_data;
4455 struct trace_array *tr = m->private;
4459 if (cnt >= sizeof(buf))
4462 if (copy_from_user(buf, ubuf, cnt))
4467 ret = trace_set_options(tr, buf);
4476 static int tracing_trace_options_open(struct inode *inode, struct file *file)
4478 struct trace_array *tr = inode->i_private;
4481 if (tracing_disabled)
4484 if (trace_array_get(tr) < 0)
4487 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4489 trace_array_put(tr);
4494 static const struct file_operations tracing_iter_fops = {
4495 .open = tracing_trace_options_open,
4497 .llseek = seq_lseek,
4498 .release = tracing_single_release_tr,
4499 .write = tracing_trace_options_write,
4502 static const char readme_msg[] =
4503 "tracing mini-HOWTO:\n\n"
4504 "# echo 0 > tracing_on : quick way to disable tracing\n"
4505 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4506 " Important files:\n"
4507 " trace\t\t\t- The static contents of the buffer\n"
4508 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4509 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4510 " current_tracer\t- function and latency tracers\n"
4511 " available_tracers\t- list of configured tracers for current_tracer\n"
4512 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4513 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4514 " trace_clock\t\t-change the clock used to order events\n"
4515 " local: Per cpu clock but may not be synced across CPUs\n"
4516 " global: Synced across CPUs but slows tracing down.\n"
4517 " counter: Not a clock, but just an increment\n"
4518 " uptime: Jiffy counter from time of boot\n"
4519 " perf: Same clock that perf events use\n"
4520 #ifdef CONFIG_X86_64
4521 " x86-tsc: TSC cycle counter\n"
4523 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4524 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4525 " tracing_cpumask\t- Limit which CPUs to trace\n"
4526 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4527 "\t\t\t Remove sub-buffer with rmdir\n"
4528 " trace_options\t\t- Set format or modify how tracing happens\n"
4529 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4530 "\t\t\t option name\n"
4531 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4532 #ifdef CONFIG_DYNAMIC_FTRACE
4533 "\n available_filter_functions - list of functions that can be filtered on\n"
4534 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4535 "\t\t\t functions\n"
4536 "\t accepts: func_full_name or glob-matching-pattern\n"
4537 "\t modules: Can select a group via module\n"
4538 "\t Format: :mod:<module-name>\n"
4539 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4540 "\t triggers: a command to perform when function is hit\n"
4541 "\t Format: <function>:<trigger>[:count]\n"
4542 "\t trigger: traceon, traceoff\n"
4543 "\t\t enable_event:<system>:<event>\n"
4544 "\t\t disable_event:<system>:<event>\n"
4545 #ifdef CONFIG_STACKTRACE
4548 #ifdef CONFIG_TRACER_SNAPSHOT
4553 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4554 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4555 "\t The first one will disable tracing every time do_fault is hit\n"
4556 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4557 "\t The first time do trap is hit and it disables tracing, the\n"
4558 "\t counter will decrement to 2. If tracing is already disabled,\n"
4559 "\t the counter will not decrement. It only decrements when the\n"
4560 "\t trigger did work\n"
4561 "\t To remove trigger without count:\n"
4562 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4563 "\t To remove trigger with a count:\n"
4564 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4565 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
4566 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4567 "\t modules: Can select a group via module command :mod:\n"
4568 "\t Does not accept triggers\n"
4569 #endif /* CONFIG_DYNAMIC_FTRACE */
4570 #ifdef CONFIG_FUNCTION_TRACER
4571 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4574 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4575 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4576 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4577 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4579 #ifdef CONFIG_TRACER_SNAPSHOT
4580 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4581 "\t\t\t snapshot buffer. Read the contents for more\n"
4582 "\t\t\t information\n"
4584 #ifdef CONFIG_STACK_TRACER
4585 " stack_trace\t\t- Shows the max stack trace when active\n"
4586 " stack_max_size\t- Shows current max stack size that was traced\n"
4587 "\t\t\t Write into this file to reset the max size (trigger a\n"
4588 "\t\t\t new trace)\n"
4589 #ifdef CONFIG_DYNAMIC_FTRACE
4590 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4593 #endif /* CONFIG_STACK_TRACER */
4594 #ifdef CONFIG_KPROBE_EVENTS
4595 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4596 "\t\t\t Write into this file to define/undefine new trace events.\n"
4598 #ifdef CONFIG_UPROBE_EVENTS
4599 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4600 "\t\t\t Write into this file to define/undefine new trace events.\n"
4602 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4603 "\t accepts: event-definitions (one definition per line)\n"
4604 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4605 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
4606 "\t -:[<group>/]<event>\n"
4607 #ifdef CONFIG_KPROBE_EVENTS
4608 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4609 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4611 #ifdef CONFIG_UPROBE_EVENTS
4612 "\t place: <path>:<offset>\n"
4614 "\t args: <name>=fetcharg[:type]\n"
4615 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4616 "\t $stack<index>, $stack, $retval, $comm\n"
4617 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4618 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4620 " events/\t\t- Directory containing all trace event subsystems:\n"
4621 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4622 " events/<system>/\t- Directory containing all trace events for <system>:\n"
4623 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4625 " filter\t\t- If set, only events passing filter are traced\n"
4626 " events/<system>/<event>/\t- Directory containing control files for\n"
4628 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4629 " filter\t\t- If set, only events passing filter are traced\n"
4630 " trigger\t\t- If set, a command to perform when event is hit\n"
4631 "\t Format: <trigger>[:count][if <filter>]\n"
4632 "\t trigger: traceon, traceoff\n"
4633 "\t enable_event:<system>:<event>\n"
4634 "\t disable_event:<system>:<event>\n"
4635 #ifdef CONFIG_HIST_TRIGGERS
4636 "\t enable_hist:<system>:<event>\n"
4637 "\t disable_hist:<system>:<event>\n"
4639 #ifdef CONFIG_STACKTRACE
4642 #ifdef CONFIG_TRACER_SNAPSHOT
4645 #ifdef CONFIG_HIST_TRIGGERS
4646 "\t\t hist (see below)\n"
4648 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4649 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4650 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4651 "\t events/block/block_unplug/trigger\n"
4652 "\t The first disables tracing every time block_unplug is hit.\n"
4653 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4654 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4655 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4656 "\t Like function triggers, the counter is only decremented if it\n"
4657 "\t enabled or disabled tracing.\n"
4658 "\t To remove a trigger without a count:\n"
4659 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4660 "\t To remove a trigger with a count:\n"
4661 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4662 "\t Filters can be ignored when removing a trigger.\n"
4663 #ifdef CONFIG_HIST_TRIGGERS
4664 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
4665 "\t Format: hist:keys=<field1[,field2,...]>\n"
4666 "\t [:values=<field1[,field2,...]>]\n"
4667 "\t [:sort=<field1[,field2,...]>]\n"
4668 "\t [:size=#entries]\n"
4669 "\t [:pause][:continue][:clear]\n"
4670 "\t [:name=histname1]\n"
4671 "\t [if <filter>]\n\n"
4672 "\t When a matching event is hit, an entry is added to a hash\n"
4673 "\t table using the key(s) and value(s) named, and the value of a\n"
4674 "\t sum called 'hitcount' is incremented. Keys and values\n"
4675 "\t correspond to fields in the event's format description. Keys\n"
4676 "\t can be any field, or the special string 'stacktrace'.\n"
4677 "\t Compound keys consisting of up to two fields can be specified\n"
4678 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4679 "\t fields. Sort keys consisting of up to two fields can be\n"
4680 "\t specified using the 'sort' keyword. The sort direction can\n"
4681 "\t be modified by appending '.descending' or '.ascending' to a\n"
4682 "\t sort field. The 'size' parameter can be used to specify more\n"
4683 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4684 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4685 "\t its histogram data will be shared with other triggers of the\n"
4686 "\t same name, and trigger hits will update this common data.\n\n"
4687 "\t Reading the 'hist' file for the event will dump the hash\n"
4688 "\t table in its entirety to stdout. If there are multiple hist\n"
4689 "\t triggers attached to an event, there will be a table for each\n"
4690 "\t trigger in the output. The table displayed for a named\n"
4691 "\t trigger will be the same as any other instance having the\n"
4692 "\t same name. The default format used to display a given field\n"
4693 "\t can be modified by appending any of the following modifiers\n"
4694 "\t to the field name, as applicable:\n\n"
4695 "\t .hex display a number as a hex value\n"
4696 "\t .sym display an address as a symbol\n"
4697 "\t .sym-offset display an address as a symbol and offset\n"
4698 "\t .execname display a common_pid as a program name\n"
4699 "\t .syscall display a syscall id as a syscall name\n\n"
4700 "\t .log2 display log2 value rather than raw number\n\n"
4701 "\t The 'pause' parameter can be used to pause an existing hist\n"
4702 "\t trigger or to start a hist trigger but not log any events\n"
4703 "\t until told to do so. 'continue' can be used to start or\n"
4704 "\t restart a paused hist trigger.\n\n"
4705 "\t The 'clear' parameter will clear the contents of a running\n"
4706 "\t hist trigger and leave its current paused/active state\n"
4708 "\t The enable_hist and disable_hist triggers can be used to\n"
4709 "\t have one event conditionally start and stop another event's\n"
4710 "\t already-attached hist trigger. The syntax is analagous to\n"
4711 "\t the enable_event and disable_event triggers.\n"
4716 tracing_readme_read(struct file *filp, char __user *ubuf,
4717 size_t cnt, loff_t *ppos)
4719 return simple_read_from_buffer(ubuf, cnt, ppos,
4720 readme_msg, strlen(readme_msg));
4723 static const struct file_operations tracing_readme_fops = {
4724 .open = tracing_open_generic,
4725 .read = tracing_readme_read,
4726 .llseek = generic_file_llseek,
4729 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4733 if (*pos || m->count)
4738 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4739 if (trace_find_tgid(*ptr))
4746 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
4756 v = saved_tgids_next(m, v, &l);
4764 static void saved_tgids_stop(struct seq_file *m, void *v)
4768 static int saved_tgids_show(struct seq_file *m, void *v)
4770 int pid = (int *)v - tgid_map;
4772 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
4776 static const struct seq_operations tracing_saved_tgids_seq_ops = {
4777 .start = saved_tgids_start,
4778 .stop = saved_tgids_stop,
4779 .next = saved_tgids_next,
4780 .show = saved_tgids_show,
4783 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
4785 if (tracing_disabled)
4788 return seq_open(filp, &tracing_saved_tgids_seq_ops);
4792 static const struct file_operations tracing_saved_tgids_fops = {
4793 .open = tracing_saved_tgids_open,
4795 .llseek = seq_lseek,
4796 .release = seq_release,
4799 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
4801 unsigned int *ptr = v;
4803 if (*pos || m->count)
4808 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4810 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
4819 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4825 arch_spin_lock(&trace_cmdline_lock);
4827 v = &savedcmd->map_cmdline_to_pid[0];
4829 v = saved_cmdlines_next(m, v, &l);
4837 static void saved_cmdlines_stop(struct seq_file *m, void *v)
4839 arch_spin_unlock(&trace_cmdline_lock);
4843 static int saved_cmdlines_show(struct seq_file *m, void *v)
4845 char buf[TASK_COMM_LEN];
4846 unsigned int *pid = v;
4848 __trace_find_cmdline(*pid, buf);
4849 seq_printf(m, "%d %s\n", *pid, buf);
4853 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4854 .start = saved_cmdlines_start,
4855 .next = saved_cmdlines_next,
4856 .stop = saved_cmdlines_stop,
4857 .show = saved_cmdlines_show,
4860 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4862 if (tracing_disabled)
4865 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
4868 static const struct file_operations tracing_saved_cmdlines_fops = {
4869 .open = tracing_saved_cmdlines_open,
4871 .llseek = seq_lseek,
4872 .release = seq_release,
4876 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4877 size_t cnt, loff_t *ppos)
4882 arch_spin_lock(&trace_cmdline_lock);
4883 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
4884 arch_spin_unlock(&trace_cmdline_lock);
4886 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4889 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4891 kfree(s->saved_cmdlines);
4892 kfree(s->map_cmdline_to_pid);
4896 static int tracing_resize_saved_cmdlines(unsigned int val)
4898 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4900 s = kmalloc(sizeof(*s), GFP_KERNEL);
4904 if (allocate_cmdlines_buffer(val, s) < 0) {
4909 arch_spin_lock(&trace_cmdline_lock);
4910 savedcmd_temp = savedcmd;
4912 arch_spin_unlock(&trace_cmdline_lock);
4913 free_saved_cmdlines_buffer(savedcmd_temp);
4919 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4920 size_t cnt, loff_t *ppos)
4925 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4929 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4930 if (!val || val > PID_MAX_DEFAULT)
4933 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4942 static const struct file_operations tracing_saved_cmdlines_size_fops = {
4943 .open = tracing_open_generic,
4944 .read = tracing_saved_cmdlines_size_read,
4945 .write = tracing_saved_cmdlines_size_write,
4948 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
4949 static union trace_eval_map_item *
4950 update_eval_map(union trace_eval_map_item *ptr)
4952 if (!ptr->map.eval_string) {
4953 if (ptr->tail.next) {
4954 ptr = ptr->tail.next;
4955 /* Set ptr to the next real item (skip head) */
4963 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
4965 union trace_eval_map_item *ptr = v;
4968 * Paranoid! If ptr points to end, we don't want to increment past it.
4969 * This really should never happen.
4971 ptr = update_eval_map(ptr);
4972 if (WARN_ON_ONCE(!ptr))
4979 ptr = update_eval_map(ptr);
4984 static void *eval_map_start(struct seq_file *m, loff_t *pos)
4986 union trace_eval_map_item *v;
4989 mutex_lock(&trace_eval_mutex);
4991 v = trace_eval_maps;
4995 while (v && l < *pos) {
4996 v = eval_map_next(m, v, &l);
5002 static void eval_map_stop(struct seq_file *m, void *v)
5004 mutex_unlock(&trace_eval_mutex);
5007 static int eval_map_show(struct seq_file *m, void *v)
5009 union trace_eval_map_item *ptr = v;
5011 seq_printf(m, "%s %ld (%s)\n",
5012 ptr->map.eval_string, ptr->map.eval_value,
5018 static const struct seq_operations tracing_eval_map_seq_ops = {
5019 .start = eval_map_start,
5020 .next = eval_map_next,
5021 .stop = eval_map_stop,
5022 .show = eval_map_show,
5025 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5027 if (tracing_disabled)
5030 return seq_open(filp, &tracing_eval_map_seq_ops);
5033 static const struct file_operations tracing_eval_map_fops = {
5034 .open = tracing_eval_map_open,
5036 .llseek = seq_lseek,
5037 .release = seq_release,
5040 static inline union trace_eval_map_item *
5041 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5043 /* Return tail of array given the head */
5044 return ptr + ptr->head.length + 1;
5048 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5051 struct trace_eval_map **stop;
5052 struct trace_eval_map **map;
5053 union trace_eval_map_item *map_array;
5054 union trace_eval_map_item *ptr;
5059 * The trace_eval_maps contains the map plus a head and tail item,
5060 * where the head holds the module and length of array, and the
5061 * tail holds a pointer to the next list.
5063 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
5065 pr_warn("Unable to allocate trace eval mapping\n");
5069 mutex_lock(&trace_eval_mutex);
5071 if (!trace_eval_maps)
5072 trace_eval_maps = map_array;
5074 ptr = trace_eval_maps;
5076 ptr = trace_eval_jmp_to_tail(ptr);
5077 if (!ptr->tail.next)
5079 ptr = ptr->tail.next;
5082 ptr->tail.next = map_array;
5084 map_array->head.mod = mod;
5085 map_array->head.length = len;
5088 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5089 map_array->map = **map;
5092 memset(map_array, 0, sizeof(*map_array));
5094 mutex_unlock(&trace_eval_mutex);
5097 static void trace_create_eval_file(struct dentry *d_tracer)
5099 trace_create_file("eval_map", 0444, d_tracer,
5100 NULL, &tracing_eval_map_fops);
5103 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5104 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5105 static inline void trace_insert_eval_map_file(struct module *mod,
5106 struct trace_eval_map **start, int len) { }
5107 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5109 static void trace_insert_eval_map(struct module *mod,
5110 struct trace_eval_map **start, int len)
5112 struct trace_eval_map **map;
5119 trace_event_eval_update(map, len);
5121 trace_insert_eval_map_file(mod, start, len);
5125 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5126 size_t cnt, loff_t *ppos)
5128 struct trace_array *tr = filp->private_data;
5129 char buf[MAX_TRACER_SIZE+2];
5132 mutex_lock(&trace_types_lock);
5133 r = sprintf(buf, "%s\n", tr->current_trace->name);
5134 mutex_unlock(&trace_types_lock);
5136 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5139 int tracer_init(struct tracer *t, struct trace_array *tr)
5141 tracing_reset_online_cpus(&tr->trace_buffer);
5145 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
5149 for_each_tracing_cpu(cpu)
5150 per_cpu_ptr(buf->data, cpu)->entries = val;
5153 #ifdef CONFIG_TRACER_MAX_TRACE
5154 /* resize @tr's buffer to the size of @size_tr's entries */
5155 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5156 struct trace_buffer *size_buf, int cpu_id)
5160 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5161 for_each_tracing_cpu(cpu) {
5162 ret = ring_buffer_resize(trace_buf->buffer,
5163 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5166 per_cpu_ptr(trace_buf->data, cpu)->entries =
5167 per_cpu_ptr(size_buf->data, cpu)->entries;
5170 ret = ring_buffer_resize(trace_buf->buffer,
5171 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5173 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5174 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5179 #endif /* CONFIG_TRACER_MAX_TRACE */
5181 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5182 unsigned long size, int cpu)
5187 * If kernel or user changes the size of the ring buffer
5188 * we use the size that was given, and we can forget about
5189 * expanding it later.
5191 ring_buffer_expanded = true;
5193 /* May be called before buffers are initialized */
5194 if (!tr->trace_buffer.buffer)
5197 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
5201 #ifdef CONFIG_TRACER_MAX_TRACE
5202 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5203 !tr->current_trace->use_max_tr)
5206 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5208 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5209 &tr->trace_buffer, cpu);
5212 * AARGH! We are left with different
5213 * size max buffer!!!!
5214 * The max buffer is our "snapshot" buffer.
5215 * When a tracer needs a snapshot (one of the
5216 * latency tracers), it swaps the max buffer
5217 * with the saved snap shot. We succeeded to
5218 * update the size of the main buffer, but failed to
5219 * update the size of the max buffer. But when we tried
5220 * to reset the main buffer to the original size, we
5221 * failed there too. This is very unlikely to
5222 * happen, but if it does, warn and kill all
5226 tracing_disabled = 1;
5231 if (cpu == RING_BUFFER_ALL_CPUS)
5232 set_buffer_entries(&tr->max_buffer, size);
5234 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5237 #endif /* CONFIG_TRACER_MAX_TRACE */
5239 if (cpu == RING_BUFFER_ALL_CPUS)
5240 set_buffer_entries(&tr->trace_buffer, size);
5242 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
5247 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5248 unsigned long size, int cpu_id)
5252 mutex_lock(&trace_types_lock);
5254 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5255 /* make sure, this cpu is enabled in the mask */
5256 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5262 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5267 mutex_unlock(&trace_types_lock);
5274 * tracing_update_buffers - used by tracing facility to expand ring buffers
5276 * To save on memory when the tracing is never used on a system with it
5277 * configured in. The ring buffers are set to a minimum size. But once
5278 * a user starts to use the tracing facility, then they need to grow
5279 * to their default size.
5281 * This function is to be called when a tracer is about to be used.
5283 int tracing_update_buffers(void)
5287 mutex_lock(&trace_types_lock);
5288 if (!ring_buffer_expanded)
5289 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5290 RING_BUFFER_ALL_CPUS);
5291 mutex_unlock(&trace_types_lock);
5296 struct trace_option_dentry;
5299 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5302 * Used to clear out the tracer before deletion of an instance.
5303 * Must have trace_types_lock held.
5305 static void tracing_set_nop(struct trace_array *tr)
5307 if (tr->current_trace == &nop_trace)
5310 tr->current_trace->enabled--;
5312 if (tr->current_trace->reset)
5313 tr->current_trace->reset(tr);
5315 tr->current_trace = &nop_trace;
5318 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5320 /* Only enable if the directory has been created already. */
5324 create_trace_option_files(tr, t);
5327 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5330 #ifdef CONFIG_TRACER_MAX_TRACE
5335 mutex_lock(&trace_types_lock);
5337 if (!ring_buffer_expanded) {
5338 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5339 RING_BUFFER_ALL_CPUS);
5345 for (t = trace_types; t; t = t->next) {
5346 if (strcmp(t->name, buf) == 0)
5353 if (t == tr->current_trace)
5356 /* Some tracers won't work on kernel command line */
5357 if (system_state < SYSTEM_RUNNING && t->noboot) {
5358 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5363 /* Some tracers are only allowed for the top level buffer */
5364 if (!trace_ok_for_array(t, tr)) {
5369 /* If trace pipe files are being read, we can't change the tracer */
5370 if (tr->current_trace->ref) {
5375 trace_branch_disable();
5377 tr->current_trace->enabled--;
5379 if (tr->current_trace->reset)
5380 tr->current_trace->reset(tr);
5382 /* Current trace needs to be nop_trace before synchronize_sched */
5383 tr->current_trace = &nop_trace;
5385 #ifdef CONFIG_TRACER_MAX_TRACE
5386 had_max_tr = tr->allocated_snapshot;
5388 if (had_max_tr && !t->use_max_tr) {
5390 * We need to make sure that the update_max_tr sees that
5391 * current_trace changed to nop_trace to keep it from
5392 * swapping the buffers after we resize it.
5393 * The update_max_tr is called from interrupts disabled
5394 * so a synchronized_sched() is sufficient.
5396 synchronize_sched();
5401 #ifdef CONFIG_TRACER_MAX_TRACE
5402 if (t->use_max_tr && !had_max_tr) {
5403 ret = alloc_snapshot(tr);
5410 ret = tracer_init(t, tr);
5415 tr->current_trace = t;
5416 tr->current_trace->enabled++;
5417 trace_branch_enable(tr);
5419 mutex_unlock(&trace_types_lock);
5425 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5426 size_t cnt, loff_t *ppos)
5428 struct trace_array *tr = filp->private_data;
5429 char buf[MAX_TRACER_SIZE+1];
5436 if (cnt > MAX_TRACER_SIZE)
5437 cnt = MAX_TRACER_SIZE;
5439 if (copy_from_user(buf, ubuf, cnt))
5444 /* strip ending whitespace. */
5445 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5448 err = tracing_set_tracer(tr, buf);
5458 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5459 size_t cnt, loff_t *ppos)
5464 r = snprintf(buf, sizeof(buf), "%ld\n",
5465 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
5466 if (r > sizeof(buf))
5468 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5472 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5473 size_t cnt, loff_t *ppos)
5478 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5488 tracing_thresh_read(struct file *filp, char __user *ubuf,
5489 size_t cnt, loff_t *ppos)
5491 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5495 tracing_thresh_write(struct file *filp, const char __user *ubuf,
5496 size_t cnt, loff_t *ppos)
5498 struct trace_array *tr = filp->private_data;
5501 mutex_lock(&trace_types_lock);
5502 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5506 if (tr->current_trace->update_thresh) {
5507 ret = tr->current_trace->update_thresh(tr);
5514 mutex_unlock(&trace_types_lock);
5519 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5522 tracing_max_lat_read(struct file *filp, char __user *ubuf,
5523 size_t cnt, loff_t *ppos)
5525 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5529 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5530 size_t cnt, loff_t *ppos)
5532 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5537 static int tracing_open_pipe(struct inode *inode, struct file *filp)
5539 struct trace_array *tr = inode->i_private;
5540 struct trace_iterator *iter;
5543 if (tracing_disabled)
5546 if (trace_array_get(tr) < 0)
5549 mutex_lock(&trace_types_lock);
5551 /* create a buffer to store the information to pass to userspace */
5552 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5555 __trace_array_put(tr);
5559 trace_seq_init(&iter->seq);
5560 iter->trace = tr->current_trace;
5562 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5567 /* trace pipe does not show start of buffer */
5568 cpumask_setall(iter->started);
5570 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5571 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5573 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5574 if (trace_clocks[tr->clock_id].in_ns)
5575 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5578 iter->trace_buffer = &tr->trace_buffer;
5579 iter->cpu_file = tracing_get_cpu(inode);
5580 mutex_init(&iter->mutex);
5581 filp->private_data = iter;
5583 if (iter->trace->pipe_open)
5584 iter->trace->pipe_open(iter);
5586 nonseekable_open(inode, filp);
5588 tr->current_trace->ref++;
5590 mutex_unlock(&trace_types_lock);
5596 __trace_array_put(tr);
5597 mutex_unlock(&trace_types_lock);
5601 static int tracing_release_pipe(struct inode *inode, struct file *file)
5603 struct trace_iterator *iter = file->private_data;
5604 struct trace_array *tr = inode->i_private;
5606 mutex_lock(&trace_types_lock);
5608 tr->current_trace->ref--;
5610 if (iter->trace->pipe_close)
5611 iter->trace->pipe_close(iter);
5613 mutex_unlock(&trace_types_lock);
5615 free_cpumask_var(iter->started);
5616 mutex_destroy(&iter->mutex);
5619 trace_array_put(tr);
5625 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
5627 struct trace_array *tr = iter->tr;
5629 /* Iterators are static, they should be filled or empty */
5630 if (trace_buffer_iter(iter, iter->cpu_file))
5631 return POLLIN | POLLRDNORM;
5633 if (tr->trace_flags & TRACE_ITER_BLOCK)
5635 * Always select as readable when in blocking mode
5637 return POLLIN | POLLRDNORM;
5639 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
5644 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5646 struct trace_iterator *iter = filp->private_data;
5648 return trace_poll(iter, filp, poll_table);
5651 /* Must be called with iter->mutex held. */
5652 static int tracing_wait_pipe(struct file *filp)
5654 struct trace_iterator *iter = filp->private_data;
5657 while (trace_empty(iter)) {
5659 if ((filp->f_flags & O_NONBLOCK)) {
5664 * We block until we read something and tracing is disabled.
5665 * We still block if tracing is disabled, but we have never
5666 * read anything. This allows a user to cat this file, and
5667 * then enable tracing. But after we have read something,
5668 * we give an EOF when tracing is again disabled.
5670 * iter->pos will be 0 if we haven't read anything.
5672 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
5675 mutex_unlock(&iter->mutex);
5677 ret = wait_on_pipe(iter, false);
5679 mutex_lock(&iter->mutex);
5692 tracing_read_pipe(struct file *filp, char __user *ubuf,
5693 size_t cnt, loff_t *ppos)
5695 struct trace_iterator *iter = filp->private_data;
5699 * Avoid more than one consumer on a single file descriptor
5700 * This is just a matter of traces coherency, the ring buffer itself
5703 mutex_lock(&iter->mutex);
5705 /* return any leftover data */
5706 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5710 trace_seq_init(&iter->seq);
5712 if (iter->trace->read) {
5713 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5719 sret = tracing_wait_pipe(filp);
5723 /* stop when tracing is finished */
5724 if (trace_empty(iter)) {
5729 if (cnt >= PAGE_SIZE)
5730 cnt = PAGE_SIZE - 1;
5732 /* reset all but tr, trace, and overruns */
5733 memset(&iter->seq, 0,
5734 sizeof(struct trace_iterator) -
5735 offsetof(struct trace_iterator, seq));
5736 cpumask_clear(iter->started);
5739 trace_event_read_lock();
5740 trace_access_lock(iter->cpu_file);
5741 while (trace_find_next_entry_inc(iter) != NULL) {
5742 enum print_line_t ret;
5743 int save_len = iter->seq.seq.len;
5745 ret = print_trace_line(iter);
5746 if (ret == TRACE_TYPE_PARTIAL_LINE) {
5747 /* don't print partial lines */
5748 iter->seq.seq.len = save_len;
5751 if (ret != TRACE_TYPE_NO_CONSUME)
5752 trace_consume(iter);
5754 if (trace_seq_used(&iter->seq) >= cnt)
5758 * Setting the full flag means we reached the trace_seq buffer
5759 * size and we should leave by partial output condition above.
5760 * One of the trace_seq_* functions is not used properly.
5762 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5765 trace_access_unlock(iter->cpu_file);
5766 trace_event_read_unlock();
5768 /* Now copy what we have to the user */
5769 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5770 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
5771 trace_seq_init(&iter->seq);
5774 * If there was nothing to send to user, in spite of consuming trace
5775 * entries, go back to wait for more entries.
5781 mutex_unlock(&iter->mutex);
5786 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5789 __free_page(spd->pages[idx]);
5792 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
5794 .confirm = generic_pipe_buf_confirm,
5795 .release = generic_pipe_buf_release,
5796 .steal = generic_pipe_buf_steal,
5797 .get = generic_pipe_buf_get,
5801 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
5807 /* Seq buffer is page-sized, exactly what we need. */
5809 save_len = iter->seq.seq.len;
5810 ret = print_trace_line(iter);
5812 if (trace_seq_has_overflowed(&iter->seq)) {
5813 iter->seq.seq.len = save_len;
5818 * This should not be hit, because it should only
5819 * be set if the iter->seq overflowed. But check it
5820 * anyway to be safe.
5822 if (ret == TRACE_TYPE_PARTIAL_LINE) {
5823 iter->seq.seq.len = save_len;
5827 count = trace_seq_used(&iter->seq) - save_len;
5830 iter->seq.seq.len = save_len;
5834 if (ret != TRACE_TYPE_NO_CONSUME)
5835 trace_consume(iter);
5837 if (!trace_find_next_entry_inc(iter)) {
5847 static ssize_t tracing_splice_read_pipe(struct file *filp,
5849 struct pipe_inode_info *pipe,
5853 struct page *pages_def[PIPE_DEF_BUFFERS];
5854 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5855 struct trace_iterator *iter = filp->private_data;
5856 struct splice_pipe_desc spd = {
5858 .partial = partial_def,
5859 .nr_pages = 0, /* This gets updated below. */
5860 .nr_pages_max = PIPE_DEF_BUFFERS,
5861 .ops = &tracing_pipe_buf_ops,
5862 .spd_release = tracing_spd_release_pipe,
5868 if (splice_grow_spd(pipe, &spd))
5871 mutex_lock(&iter->mutex);
5873 if (iter->trace->splice_read) {
5874 ret = iter->trace->splice_read(iter, filp,
5875 ppos, pipe, len, flags);
5880 ret = tracing_wait_pipe(filp);
5884 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
5889 trace_event_read_lock();
5890 trace_access_lock(iter->cpu_file);
5892 /* Fill as many pages as possible. */
5893 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
5894 spd.pages[i] = alloc_page(GFP_KERNEL);
5898 rem = tracing_fill_pipe_page(rem, iter);
5900 /* Copy the data into the page, so we can start over. */
5901 ret = trace_seq_to_buffer(&iter->seq,
5902 page_address(spd.pages[i]),
5903 trace_seq_used(&iter->seq));
5905 __free_page(spd.pages[i]);
5908 spd.partial[i].offset = 0;
5909 spd.partial[i].len = trace_seq_used(&iter->seq);
5911 trace_seq_init(&iter->seq);
5914 trace_access_unlock(iter->cpu_file);
5915 trace_event_read_unlock();
5916 mutex_unlock(&iter->mutex);
5921 ret = splice_to_pipe(pipe, &spd);
5925 splice_shrink_spd(&spd);
5929 mutex_unlock(&iter->mutex);
5934 tracing_entries_read(struct file *filp, char __user *ubuf,
5935 size_t cnt, loff_t *ppos)
5937 struct inode *inode = file_inode(filp);
5938 struct trace_array *tr = inode->i_private;
5939 int cpu = tracing_get_cpu(inode);
5944 mutex_lock(&trace_types_lock);
5946 if (cpu == RING_BUFFER_ALL_CPUS) {
5947 int cpu, buf_size_same;
5952 /* check if all cpu sizes are same */
5953 for_each_tracing_cpu(cpu) {
5954 /* fill in the size from first enabled cpu */
5956 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5957 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
5963 if (buf_size_same) {
5964 if (!ring_buffer_expanded)
5965 r = sprintf(buf, "%lu (expanded: %lu)\n",
5967 trace_buf_size >> 10);
5969 r = sprintf(buf, "%lu\n", size >> 10);
5971 r = sprintf(buf, "X\n");
5973 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
5975 mutex_unlock(&trace_types_lock);
5977 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5982 tracing_entries_write(struct file *filp, const char __user *ubuf,
5983 size_t cnt, loff_t *ppos)
5985 struct inode *inode = file_inode(filp);
5986 struct trace_array *tr = inode->i_private;
5990 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5994 /* must have at least 1 entry */
5998 /* value is in KB */
6000 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6010 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6011 size_t cnt, loff_t *ppos)
6013 struct trace_array *tr = filp->private_data;
6016 unsigned long size = 0, expanded_size = 0;
6018 mutex_lock(&trace_types_lock);
6019 for_each_tracing_cpu(cpu) {
6020 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
6021 if (!ring_buffer_expanded)
6022 expanded_size += trace_buf_size >> 10;
6024 if (ring_buffer_expanded)
6025 r = sprintf(buf, "%lu\n", size);
6027 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6028 mutex_unlock(&trace_types_lock);
6030 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6034 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6035 size_t cnt, loff_t *ppos)
6038 * There is no need to read what the user has written, this function
6039 * is just to make sure that there is no error when "echo" is used
6048 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6050 struct trace_array *tr = inode->i_private;
6052 /* disable tracing ? */
6053 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6054 tracer_tracing_off(tr);
6055 /* resize the ring buffer to 0 */
6056 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6058 trace_array_put(tr);
6064 tracing_mark_write(struct file *filp, const char __user *ubuf,
6065 size_t cnt, loff_t *fpos)
6067 struct trace_array *tr = filp->private_data;
6068 struct ring_buffer_event *event;
6069 struct ring_buffer *buffer;
6070 struct print_entry *entry;
6071 unsigned long irq_flags;
6072 const char faulted[] = "<faulted>";
6077 /* Used in tracing_mark_raw_write() as well */
6078 #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
6080 if (tracing_disabled)
6083 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6086 if (cnt > TRACE_BUF_SIZE)
6087 cnt = TRACE_BUF_SIZE;
6089 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6091 local_save_flags(irq_flags);
6092 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6094 /* If less than "<faulted>", then make sure we can still add that */
6095 if (cnt < FAULTED_SIZE)
6096 size += FAULTED_SIZE - cnt;
6098 buffer = tr->trace_buffer.buffer;
6099 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6100 irq_flags, preempt_count());
6101 if (unlikely(!event))
6102 /* Ring buffer disabled, return as if not open for write */
6105 entry = ring_buffer_event_data(event);
6106 entry->ip = _THIS_IP_;
6108 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6110 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6117 if (entry->buf[cnt - 1] != '\n') {
6118 entry->buf[cnt] = '\n';
6119 entry->buf[cnt + 1] = '\0';
6121 entry->buf[cnt] = '\0';
6123 __buffer_unlock_commit(buffer, event);
6131 /* Limit it for now to 3K (including tag) */
6132 #define RAW_DATA_MAX_SIZE (1024*3)
6135 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6136 size_t cnt, loff_t *fpos)
6138 struct trace_array *tr = filp->private_data;
6139 struct ring_buffer_event *event;
6140 struct ring_buffer *buffer;
6141 struct raw_data_entry *entry;
6142 const char faulted[] = "<faulted>";
6143 unsigned long irq_flags;
6148 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6150 if (tracing_disabled)
6153 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6156 /* The marker must at least have a tag id */
6157 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6160 if (cnt > TRACE_BUF_SIZE)
6161 cnt = TRACE_BUF_SIZE;
6163 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6165 local_save_flags(irq_flags);
6166 size = sizeof(*entry) + cnt;
6167 if (cnt < FAULT_SIZE_ID)
6168 size += FAULT_SIZE_ID - cnt;
6170 buffer = tr->trace_buffer.buffer;
6171 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6172 irq_flags, preempt_count());
6174 /* Ring buffer disabled, return as if not open for write */
6177 entry = ring_buffer_event_data(event);
6179 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6182 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6187 __buffer_unlock_commit(buffer, event);
6195 static int tracing_clock_show(struct seq_file *m, void *v)
6197 struct trace_array *tr = m->private;
6200 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6202 "%s%s%s%s", i ? " " : "",
6203 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6204 i == tr->clock_id ? "]" : "");
6210 static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6214 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6215 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6218 if (i == ARRAY_SIZE(trace_clocks))
6221 mutex_lock(&trace_types_lock);
6225 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
6228 * New clock may not be consistent with the previous clock.
6229 * Reset the buffer so that it doesn't have incomparable timestamps.
6231 tracing_reset_online_cpus(&tr->trace_buffer);
6233 #ifdef CONFIG_TRACER_MAX_TRACE
6234 if (tr->max_buffer.buffer)
6235 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6236 tracing_reset_online_cpus(&tr->max_buffer);
6239 mutex_unlock(&trace_types_lock);
6244 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6245 size_t cnt, loff_t *fpos)
6247 struct seq_file *m = filp->private_data;
6248 struct trace_array *tr = m->private;
6250 const char *clockstr;
6253 if (cnt >= sizeof(buf))
6256 if (copy_from_user(buf, ubuf, cnt))
6261 clockstr = strstrip(buf);
6263 ret = tracing_set_clock(tr, clockstr);
6272 static int tracing_clock_open(struct inode *inode, struct file *file)
6274 struct trace_array *tr = inode->i_private;
6277 if (tracing_disabled)
6280 if (trace_array_get(tr))
6283 ret = single_open(file, tracing_clock_show, inode->i_private);
6285 trace_array_put(tr);
6290 struct ftrace_buffer_info {
6291 struct trace_iterator iter;
6293 unsigned int spare_cpu;
6297 #ifdef CONFIG_TRACER_SNAPSHOT
6298 static int tracing_snapshot_open(struct inode *inode, struct file *file)
6300 struct trace_array *tr = inode->i_private;
6301 struct trace_iterator *iter;
6305 if (trace_array_get(tr) < 0)
6308 if (file->f_mode & FMODE_READ) {
6309 iter = __tracing_open(inode, file, true);
6311 ret = PTR_ERR(iter);
6313 /* Writes still need the seq_file to hold the private data */
6315 m = kzalloc(sizeof(*m), GFP_KERNEL);
6318 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6326 iter->trace_buffer = &tr->max_buffer;
6327 iter->cpu_file = tracing_get_cpu(inode);
6329 file->private_data = m;
6333 trace_array_put(tr);
6339 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6342 struct seq_file *m = filp->private_data;
6343 struct trace_iterator *iter = m->private;
6344 struct trace_array *tr = iter->tr;
6348 ret = tracing_update_buffers();
6352 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6356 mutex_lock(&trace_types_lock);
6358 if (tr->current_trace->use_max_tr) {
6365 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6369 if (tr->allocated_snapshot)
6373 /* Only allow per-cpu swap if the ring buffer supports it */
6374 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6375 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6380 if (!tr->allocated_snapshot) {
6381 ret = alloc_snapshot(tr);
6385 local_irq_disable();
6386 /* Now, we're going to swap */
6387 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6388 update_max_tr(tr, current, smp_processor_id());
6390 update_max_tr_single(tr, current, iter->cpu_file);
6394 if (tr->allocated_snapshot) {
6395 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6396 tracing_reset_online_cpus(&tr->max_buffer);
6398 tracing_reset(&tr->max_buffer, iter->cpu_file);
6408 mutex_unlock(&trace_types_lock);
6412 static int tracing_snapshot_release(struct inode *inode, struct file *file)
6414 struct seq_file *m = file->private_data;
6417 ret = tracing_release(inode, file);
6419 if (file->f_mode & FMODE_READ)
6422 /* If write only, the seq_file is just a stub */
6430 static int tracing_buffers_open(struct inode *inode, struct file *filp);
6431 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6432 size_t count, loff_t *ppos);
6433 static int tracing_buffers_release(struct inode *inode, struct file *file);
6434 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6435 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6437 static int snapshot_raw_open(struct inode *inode, struct file *filp)
6439 struct ftrace_buffer_info *info;
6442 ret = tracing_buffers_open(inode, filp);
6446 info = filp->private_data;
6448 if (info->iter.trace->use_max_tr) {
6449 tracing_buffers_release(inode, filp);
6453 info->iter.snapshot = true;
6454 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6459 #endif /* CONFIG_TRACER_SNAPSHOT */
6462 static const struct file_operations tracing_thresh_fops = {
6463 .open = tracing_open_generic,
6464 .read = tracing_thresh_read,
6465 .write = tracing_thresh_write,
6466 .llseek = generic_file_llseek,
6469 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6470 static const struct file_operations tracing_max_lat_fops = {
6471 .open = tracing_open_generic,
6472 .read = tracing_max_lat_read,
6473 .write = tracing_max_lat_write,
6474 .llseek = generic_file_llseek,
6478 static const struct file_operations set_tracer_fops = {
6479 .open = tracing_open_generic,
6480 .read = tracing_set_trace_read,
6481 .write = tracing_set_trace_write,
6482 .llseek = generic_file_llseek,
6485 static const struct file_operations tracing_pipe_fops = {
6486 .open = tracing_open_pipe,
6487 .poll = tracing_poll_pipe,
6488 .read = tracing_read_pipe,
6489 .splice_read = tracing_splice_read_pipe,
6490 .release = tracing_release_pipe,
6491 .llseek = no_llseek,
6494 static const struct file_operations tracing_entries_fops = {
6495 .open = tracing_open_generic_tr,
6496 .read = tracing_entries_read,
6497 .write = tracing_entries_write,
6498 .llseek = generic_file_llseek,
6499 .release = tracing_release_generic_tr,
6502 static const struct file_operations tracing_total_entries_fops = {
6503 .open = tracing_open_generic_tr,
6504 .read = tracing_total_entries_read,
6505 .llseek = generic_file_llseek,
6506 .release = tracing_release_generic_tr,
6509 static const struct file_operations tracing_free_buffer_fops = {
6510 .open = tracing_open_generic_tr,
6511 .write = tracing_free_buffer_write,
6512 .release = tracing_free_buffer_release,
6515 static const struct file_operations tracing_mark_fops = {
6516 .open = tracing_open_generic_tr,
6517 .write = tracing_mark_write,
6518 .llseek = generic_file_llseek,
6519 .release = tracing_release_generic_tr,
6522 static const struct file_operations tracing_mark_raw_fops = {
6523 .open = tracing_open_generic_tr,
6524 .write = tracing_mark_raw_write,
6525 .llseek = generic_file_llseek,
6526 .release = tracing_release_generic_tr,
6529 static const struct file_operations trace_clock_fops = {
6530 .open = tracing_clock_open,
6532 .llseek = seq_lseek,
6533 .release = tracing_single_release_tr,
6534 .write = tracing_clock_write,
6537 #ifdef CONFIG_TRACER_SNAPSHOT
6538 static const struct file_operations snapshot_fops = {
6539 .open = tracing_snapshot_open,
6541 .write = tracing_snapshot_write,
6542 .llseek = tracing_lseek,
6543 .release = tracing_snapshot_release,
6546 static const struct file_operations snapshot_raw_fops = {
6547 .open = snapshot_raw_open,
6548 .read = tracing_buffers_read,
6549 .release = tracing_buffers_release,
6550 .splice_read = tracing_buffers_splice_read,
6551 .llseek = no_llseek,
6554 #endif /* CONFIG_TRACER_SNAPSHOT */
6556 static int tracing_buffers_open(struct inode *inode, struct file *filp)
6558 struct trace_array *tr = inode->i_private;
6559 struct ftrace_buffer_info *info;
6562 if (tracing_disabled)
6565 if (trace_array_get(tr) < 0)
6568 info = kzalloc(sizeof(*info), GFP_KERNEL);
6570 trace_array_put(tr);
6574 mutex_lock(&trace_types_lock);
6577 info->iter.cpu_file = tracing_get_cpu(inode);
6578 info->iter.trace = tr->current_trace;
6579 info->iter.trace_buffer = &tr->trace_buffer;
6581 /* Force reading ring buffer for first read */
6582 info->read = (unsigned int)-1;
6584 filp->private_data = info;
6586 tr->current_trace->ref++;
6588 mutex_unlock(&trace_types_lock);
6590 ret = nonseekable_open(inode, filp);
6592 trace_array_put(tr);
6598 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6600 struct ftrace_buffer_info *info = filp->private_data;
6601 struct trace_iterator *iter = &info->iter;
6603 return trace_poll(iter, filp, poll_table);
6607 tracing_buffers_read(struct file *filp, char __user *ubuf,
6608 size_t count, loff_t *ppos)
6610 struct ftrace_buffer_info *info = filp->private_data;
6611 struct trace_iterator *iter = &info->iter;
6618 #ifdef CONFIG_TRACER_MAX_TRACE
6619 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6624 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6626 if (IS_ERR(info->spare)) {
6627 ret = PTR_ERR(info->spare);
6630 info->spare_cpu = iter->cpu_file;
6636 /* Do we have previous read data to read? */
6637 if (info->read < PAGE_SIZE)
6641 trace_access_lock(iter->cpu_file);
6642 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
6646 trace_access_unlock(iter->cpu_file);
6649 if (trace_empty(iter)) {
6650 if ((filp->f_flags & O_NONBLOCK))
6653 ret = wait_on_pipe(iter, false);
6664 size = PAGE_SIZE - info->read;
6668 ret = copy_to_user(ubuf, info->spare + info->read, size);
6680 static int tracing_buffers_release(struct inode *inode, struct file *file)
6682 struct ftrace_buffer_info *info = file->private_data;
6683 struct trace_iterator *iter = &info->iter;
6685 mutex_lock(&trace_types_lock);
6687 iter->tr->current_trace->ref--;
6689 __trace_array_put(iter->tr);
6692 ring_buffer_free_read_page(iter->trace_buffer->buffer,
6693 info->spare_cpu, info->spare);
6696 mutex_unlock(&trace_types_lock);
6702 struct ring_buffer *buffer;
6708 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6709 struct pipe_buffer *buf)
6711 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6716 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
6721 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6722 struct pipe_buffer *buf)
6724 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6729 /* Pipe buffer operations for a buffer. */
6730 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
6732 .confirm = generic_pipe_buf_confirm,
6733 .release = buffer_pipe_buf_release,
6734 .steal = generic_pipe_buf_steal,
6735 .get = buffer_pipe_buf_get,
6739 * Callback from splice_to_pipe(), if we need to release some pages
6740 * at the end of the spd in case we error'ed out in filling the pipe.
6742 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6744 struct buffer_ref *ref =
6745 (struct buffer_ref *)spd->partial[i].private;
6750 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
6752 spd->partial[i].private = 0;
6756 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6757 struct pipe_inode_info *pipe, size_t len,
6760 struct ftrace_buffer_info *info = file->private_data;
6761 struct trace_iterator *iter = &info->iter;
6762 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6763 struct page *pages_def[PIPE_DEF_BUFFERS];
6764 struct splice_pipe_desc spd = {
6766 .partial = partial_def,
6767 .nr_pages_max = PIPE_DEF_BUFFERS,
6768 .ops = &buffer_pipe_buf_ops,
6769 .spd_release = buffer_spd_release,
6771 struct buffer_ref *ref;
6775 #ifdef CONFIG_TRACER_MAX_TRACE
6776 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6780 if (*ppos & (PAGE_SIZE - 1))
6783 if (len & (PAGE_SIZE - 1)) {
6784 if (len < PAGE_SIZE)
6789 if (splice_grow_spd(pipe, &spd))
6793 trace_access_lock(iter->cpu_file);
6794 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
6796 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
6800 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
6807 ref->buffer = iter->trace_buffer->buffer;
6808 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
6809 if (IS_ERR(ref->page)) {
6810 ret = PTR_ERR(ref->page);
6815 ref->cpu = iter->cpu_file;
6817 r = ring_buffer_read_page(ref->buffer, &ref->page,
6818 len, iter->cpu_file, 1);
6820 ring_buffer_free_read_page(ref->buffer, ref->cpu,
6826 page = virt_to_page(ref->page);
6828 spd.pages[i] = page;
6829 spd.partial[i].len = PAGE_SIZE;
6830 spd.partial[i].offset = 0;
6831 spd.partial[i].private = (unsigned long)ref;
6835 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
6838 trace_access_unlock(iter->cpu_file);
6841 /* did we read anything? */
6842 if (!spd.nr_pages) {
6847 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
6850 ret = wait_on_pipe(iter, true);
6857 ret = splice_to_pipe(pipe, &spd);
6859 splice_shrink_spd(&spd);
6864 static const struct file_operations tracing_buffers_fops = {
6865 .open = tracing_buffers_open,
6866 .read = tracing_buffers_read,
6867 .poll = tracing_buffers_poll,
6868 .release = tracing_buffers_release,
6869 .splice_read = tracing_buffers_splice_read,
6870 .llseek = no_llseek,
6874 tracing_stats_read(struct file *filp, char __user *ubuf,
6875 size_t count, loff_t *ppos)
6877 struct inode *inode = file_inode(filp);
6878 struct trace_array *tr = inode->i_private;
6879 struct trace_buffer *trace_buf = &tr->trace_buffer;
6880 int cpu = tracing_get_cpu(inode);
6881 struct trace_seq *s;
6883 unsigned long long t;
6884 unsigned long usec_rem;
6886 s = kmalloc(sizeof(*s), GFP_KERNEL);
6892 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
6893 trace_seq_printf(s, "entries: %ld\n", cnt);
6895 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
6896 trace_seq_printf(s, "overrun: %ld\n", cnt);
6898 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
6899 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6901 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
6902 trace_seq_printf(s, "bytes: %ld\n", cnt);
6904 if (trace_clocks[tr->clock_id].in_ns) {
6905 /* local or global for trace_clock */
6906 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
6907 usec_rem = do_div(t, USEC_PER_SEC);
6908 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6911 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
6912 usec_rem = do_div(t, USEC_PER_SEC);
6913 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6915 /* counter or tsc mode for trace_clock */
6916 trace_seq_printf(s, "oldest event ts: %llu\n",
6917 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
6919 trace_seq_printf(s, "now ts: %llu\n",
6920 ring_buffer_time_stamp(trace_buf->buffer, cpu));
6923 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
6924 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6926 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
6927 trace_seq_printf(s, "read events: %ld\n", cnt);
6929 count = simple_read_from_buffer(ubuf, count, ppos,
6930 s->buffer, trace_seq_used(s));
6937 static const struct file_operations tracing_stats_fops = {
6938 .open = tracing_open_generic_tr,
6939 .read = tracing_stats_read,
6940 .llseek = generic_file_llseek,
6941 .release = tracing_release_generic_tr,
6944 #ifdef CONFIG_DYNAMIC_FTRACE
6947 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
6948 size_t cnt, loff_t *ppos)
6950 unsigned long *p = filp->private_data;
6951 char buf[64]; /* Not too big for a shallow stack */
6954 r = scnprintf(buf, 63, "%ld", *p);
6957 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6960 static const struct file_operations tracing_dyn_info_fops = {
6961 .open = tracing_open_generic,
6962 .read = tracing_read_dyn_info,
6963 .llseek = generic_file_llseek,
6965 #endif /* CONFIG_DYNAMIC_FTRACE */
6967 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6969 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
6970 struct trace_array *tr, struct ftrace_probe_ops *ops,
6973 tracing_snapshot_instance(tr);
6977 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
6978 struct trace_array *tr, struct ftrace_probe_ops *ops,
6981 struct ftrace_func_mapper *mapper = data;
6985 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
6995 tracing_snapshot_instance(tr);
6999 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7000 struct ftrace_probe_ops *ops, void *data)
7002 struct ftrace_func_mapper *mapper = data;
7005 seq_printf(m, "%ps:", (void *)ip);
7007 seq_puts(m, "snapshot");
7010 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7013 seq_printf(m, ":count=%ld\n", *count);
7015 seq_puts(m, ":unlimited\n");
7021 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
7022 unsigned long ip, void *init_data, void **data)
7024 struct ftrace_func_mapper *mapper = *data;
7027 mapper = allocate_ftrace_func_mapper();
7033 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
7037 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
7038 unsigned long ip, void *data)
7040 struct ftrace_func_mapper *mapper = data;
7045 free_ftrace_func_mapper(mapper, NULL);
7049 ftrace_func_mapper_remove_ip(mapper, ip);
7052 static struct ftrace_probe_ops snapshot_probe_ops = {
7053 .func = ftrace_snapshot,
7054 .print = ftrace_snapshot_print,
7057 static struct ftrace_probe_ops snapshot_count_probe_ops = {
7058 .func = ftrace_count_snapshot,
7059 .print = ftrace_snapshot_print,
7060 .init = ftrace_snapshot_init,
7061 .free = ftrace_snapshot_free,
7065 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
7066 char *glob, char *cmd, char *param, int enable)
7068 struct ftrace_probe_ops *ops;
7069 void *count = (void *)-1;
7076 /* hash funcs only work with set_ftrace_filter */
7080 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7083 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
7088 number = strsep(¶m, ":");
7090 if (!strlen(number))
7094 * We use the callback data field (which is a pointer)
7097 ret = kstrtoul(number, 0, (unsigned long *)&count);
7102 ret = alloc_snapshot(tr);
7106 ret = register_ftrace_function_probe(glob, tr, ops, count);
7109 return ret < 0 ? ret : 0;
7112 static struct ftrace_func_command ftrace_snapshot_cmd = {
7114 .func = ftrace_trace_snapshot_callback,
7117 static __init int register_snapshot_cmd(void)
7119 return register_ftrace_command(&ftrace_snapshot_cmd);
7122 static inline __init int register_snapshot_cmd(void) { return 0; }
7123 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7125 static struct dentry *tracing_get_dentry(struct trace_array *tr)
7127 if (WARN_ON(!tr->dir))
7128 return ERR_PTR(-ENODEV);
7130 /* Top directory uses NULL as the parent */
7131 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7134 /* All sub buffers have a descriptor */
7138 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7140 struct dentry *d_tracer;
7143 return tr->percpu_dir;
7145 d_tracer = tracing_get_dentry(tr);
7146 if (IS_ERR(d_tracer))
7149 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
7151 WARN_ONCE(!tr->percpu_dir,
7152 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
7154 return tr->percpu_dir;
7157 static struct dentry *
7158 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7159 void *data, long cpu, const struct file_operations *fops)
7161 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7163 if (ret) /* See tracing_get_cpu() */
7164 d_inode(ret)->i_cdev = (void *)(cpu + 1);
7169 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
7171 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
7172 struct dentry *d_cpu;
7173 char cpu_dir[30]; /* 30 characters should be more than enough */
7178 snprintf(cpu_dir, 30, "cpu%ld", cpu);
7179 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
7181 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
7185 /* per cpu trace_pipe */
7186 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
7187 tr, cpu, &tracing_pipe_fops);
7190 trace_create_cpu_file("trace", 0644, d_cpu,
7191 tr, cpu, &tracing_fops);
7193 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
7194 tr, cpu, &tracing_buffers_fops);
7196 trace_create_cpu_file("stats", 0444, d_cpu,
7197 tr, cpu, &tracing_stats_fops);
7199 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
7200 tr, cpu, &tracing_entries_fops);
7202 #ifdef CONFIG_TRACER_SNAPSHOT
7203 trace_create_cpu_file("snapshot", 0644, d_cpu,
7204 tr, cpu, &snapshot_fops);
7206 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
7207 tr, cpu, &snapshot_raw_fops);
7211 #ifdef CONFIG_FTRACE_SELFTEST
7212 /* Let selftest have access to static functions in this file */
7213 #include "trace_selftest.c"
7217 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7220 struct trace_option_dentry *topt = filp->private_data;
7223 if (topt->flags->val & topt->opt->bit)
7228 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7232 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7235 struct trace_option_dentry *topt = filp->private_data;
7239 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7243 if (val != 0 && val != 1)
7246 if (!!(topt->flags->val & topt->opt->bit) != val) {
7247 mutex_lock(&trace_types_lock);
7248 ret = __set_tracer_option(topt->tr, topt->flags,
7250 mutex_unlock(&trace_types_lock);
7261 static const struct file_operations trace_options_fops = {
7262 .open = tracing_open_generic,
7263 .read = trace_options_read,
7264 .write = trace_options_write,
7265 .llseek = generic_file_llseek,
7269 * In order to pass in both the trace_array descriptor as well as the index
7270 * to the flag that the trace option file represents, the trace_array
7271 * has a character array of trace_flags_index[], which holds the index
7272 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7273 * The address of this character array is passed to the flag option file
7274 * read/write callbacks.
7276 * In order to extract both the index and the trace_array descriptor,
7277 * get_tr_index() uses the following algorithm.
7281 * As the pointer itself contains the address of the index (remember
7284 * Then to get the trace_array descriptor, by subtracting that index
7285 * from the ptr, we get to the start of the index itself.
7287 * ptr - idx == &index[0]
7289 * Then a simple container_of() from that pointer gets us to the
7290 * trace_array descriptor.
7292 static void get_tr_index(void *data, struct trace_array **ptr,
7293 unsigned int *pindex)
7295 *pindex = *(unsigned char *)data;
7297 *ptr = container_of(data - *pindex, struct trace_array,
7302 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7305 void *tr_index = filp->private_data;
7306 struct trace_array *tr;
7310 get_tr_index(tr_index, &tr, &index);
7312 if (tr->trace_flags & (1 << index))
7317 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7321 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7324 void *tr_index = filp->private_data;
7325 struct trace_array *tr;
7330 get_tr_index(tr_index, &tr, &index);
7332 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7336 if (val != 0 && val != 1)
7339 mutex_lock(&trace_types_lock);
7340 ret = set_tracer_flag(tr, 1 << index, val);
7341 mutex_unlock(&trace_types_lock);
7351 static const struct file_operations trace_options_core_fops = {
7352 .open = tracing_open_generic,
7353 .read = trace_options_core_read,
7354 .write = trace_options_core_write,
7355 .llseek = generic_file_llseek,
7358 struct dentry *trace_create_file(const char *name,
7360 struct dentry *parent,
7362 const struct file_operations *fops)
7366 ret = tracefs_create_file(name, mode, parent, data, fops);
7368 pr_warn("Could not create tracefs '%s' entry\n", name);
7374 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
7376 struct dentry *d_tracer;
7381 d_tracer = tracing_get_dentry(tr);
7382 if (IS_ERR(d_tracer))
7385 tr->options = tracefs_create_dir("options", d_tracer);
7387 pr_warn("Could not create tracefs directory 'options'\n");
7395 create_trace_option_file(struct trace_array *tr,
7396 struct trace_option_dentry *topt,
7397 struct tracer_flags *flags,
7398 struct tracer_opt *opt)
7400 struct dentry *t_options;
7402 t_options = trace_options_init_dentry(tr);
7406 topt->flags = flags;
7410 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
7411 &trace_options_fops);
7416 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
7418 struct trace_option_dentry *topts;
7419 struct trace_options *tr_topts;
7420 struct tracer_flags *flags;
7421 struct tracer_opt *opts;
7428 flags = tracer->flags;
7430 if (!flags || !flags->opts)
7434 * If this is an instance, only create flags for tracers
7435 * the instance may have.
7437 if (!trace_ok_for_array(tracer, tr))
7440 for (i = 0; i < tr->nr_topts; i++) {
7441 /* Make sure there's no duplicate flags. */
7442 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
7448 for (cnt = 0; opts[cnt].name; cnt++)
7451 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
7455 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7462 tr->topts = tr_topts;
7463 tr->topts[tr->nr_topts].tracer = tracer;
7464 tr->topts[tr->nr_topts].topts = topts;
7467 for (cnt = 0; opts[cnt].name; cnt++) {
7468 create_trace_option_file(tr, &topts[cnt], flags,
7470 WARN_ONCE(topts[cnt].entry == NULL,
7471 "Failed to create trace option: %s",
7476 static struct dentry *
7477 create_trace_option_core_file(struct trace_array *tr,
7478 const char *option, long index)
7480 struct dentry *t_options;
7482 t_options = trace_options_init_dentry(tr);
7486 return trace_create_file(option, 0644, t_options,
7487 (void *)&tr->trace_flags_index[index],
7488 &trace_options_core_fops);
7491 static void create_trace_options_dir(struct trace_array *tr)
7493 struct dentry *t_options;
7494 bool top_level = tr == &global_trace;
7497 t_options = trace_options_init_dentry(tr);
7501 for (i = 0; trace_options[i]; i++) {
7503 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7504 create_trace_option_core_file(tr, trace_options[i], i);
7509 rb_simple_read(struct file *filp, char __user *ubuf,
7510 size_t cnt, loff_t *ppos)
7512 struct trace_array *tr = filp->private_data;
7516 r = tracer_tracing_is_on(tr);
7517 r = sprintf(buf, "%d\n", r);
7519 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7523 rb_simple_write(struct file *filp, const char __user *ubuf,
7524 size_t cnt, loff_t *ppos)
7526 struct trace_array *tr = filp->private_data;
7527 struct ring_buffer *buffer = tr->trace_buffer.buffer;
7531 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7536 mutex_lock(&trace_types_lock);
7538 tracer_tracing_on(tr);
7539 if (tr->current_trace->start)
7540 tr->current_trace->start(tr);
7542 tracer_tracing_off(tr);
7543 if (tr->current_trace->stop)
7544 tr->current_trace->stop(tr);
7546 mutex_unlock(&trace_types_lock);
7554 static const struct file_operations rb_simple_fops = {
7555 .open = tracing_open_generic_tr,
7556 .read = rb_simple_read,
7557 .write = rb_simple_write,
7558 .release = tracing_release_generic_tr,
7559 .llseek = default_llseek,
7562 struct dentry *trace_instance_dir;
7565 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
7568 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
7570 enum ring_buffer_flags rb_flags;
7572 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
7576 buf->buffer = ring_buffer_alloc(size, rb_flags);
7580 buf->data = alloc_percpu(struct trace_array_cpu);
7582 ring_buffer_free(buf->buffer);
7587 /* Allocate the first page for all buffers */
7588 set_buffer_entries(&tr->trace_buffer,
7589 ring_buffer_size(tr->trace_buffer.buffer, 0));
7594 static int allocate_trace_buffers(struct trace_array *tr, int size)
7598 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7602 #ifdef CONFIG_TRACER_MAX_TRACE
7603 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7604 allocate_snapshot ? size : 1);
7606 ring_buffer_free(tr->trace_buffer.buffer);
7607 tr->trace_buffer.buffer = NULL;
7608 free_percpu(tr->trace_buffer.data);
7609 tr->trace_buffer.data = NULL;
7612 tr->allocated_snapshot = allocate_snapshot;
7615 * Only the top level trace array gets its snapshot allocated
7616 * from the kernel command line.
7618 allocate_snapshot = false;
7623 static void free_trace_buffer(struct trace_buffer *buf)
7626 ring_buffer_free(buf->buffer);
7628 free_percpu(buf->data);
7633 static void free_trace_buffers(struct trace_array *tr)
7638 free_trace_buffer(&tr->trace_buffer);
7640 #ifdef CONFIG_TRACER_MAX_TRACE
7641 free_trace_buffer(&tr->max_buffer);
7645 static void init_trace_flags_index(struct trace_array *tr)
7649 /* Used by the trace options files */
7650 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7651 tr->trace_flags_index[i] = i;
7654 static void __update_tracer_options(struct trace_array *tr)
7658 for (t = trace_types; t; t = t->next)
7659 add_tracer_options(tr, t);
7662 static void update_tracer_options(struct trace_array *tr)
7664 mutex_lock(&trace_types_lock);
7665 __update_tracer_options(tr);
7666 mutex_unlock(&trace_types_lock);
7669 static int instance_mkdir(const char *name)
7671 struct trace_array *tr;
7674 mutex_lock(&trace_types_lock);
7677 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7678 if (tr->name && strcmp(tr->name, name) == 0)
7683 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7687 tr->name = kstrdup(name, GFP_KERNEL);
7691 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7694 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
7696 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7698 raw_spin_lock_init(&tr->start_lock);
7700 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7702 tr->current_trace = &nop_trace;
7704 INIT_LIST_HEAD(&tr->systems);
7705 INIT_LIST_HEAD(&tr->events);
7707 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
7710 tr->dir = tracefs_create_dir(name, trace_instance_dir);
7714 ret = event_trace_add_tracer(tr->dir, tr);
7716 tracefs_remove_recursive(tr->dir);
7720 ftrace_init_trace_array(tr);
7722 init_tracer_tracefs(tr, tr->dir);
7723 init_trace_flags_index(tr);
7724 __update_tracer_options(tr);
7726 list_add(&tr->list, &ftrace_trace_arrays);
7728 mutex_unlock(&trace_types_lock);
7733 free_trace_buffers(tr);
7734 free_cpumask_var(tr->tracing_cpumask);
7739 mutex_unlock(&trace_types_lock);
7745 static int instance_rmdir(const char *name)
7747 struct trace_array *tr;
7752 mutex_lock(&trace_types_lock);
7755 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7756 if (tr->name && strcmp(tr->name, name) == 0) {
7765 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
7768 list_del(&tr->list);
7770 /* Disable all the flags that were enabled coming in */
7771 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7772 if ((1 << i) & ZEROED_TRACE_FLAGS)
7773 set_tracer_flag(tr, 1 << i, 0);
7776 tracing_set_nop(tr);
7777 clear_ftrace_function_probes(tr);
7778 event_trace_del_tracer(tr);
7779 ftrace_clear_pids(tr);
7780 ftrace_destroy_function_files(tr);
7781 tracefs_remove_recursive(tr->dir);
7782 free_trace_buffers(tr);
7784 for (i = 0; i < tr->nr_topts; i++) {
7785 kfree(tr->topts[i].topts);
7789 free_cpumask_var(tr->tracing_cpumask);
7796 mutex_unlock(&trace_types_lock);
7801 static __init void create_trace_instances(struct dentry *d_tracer)
7803 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7806 if (WARN_ON(!trace_instance_dir))
7811 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
7815 trace_create_file("available_tracers", 0444, d_tracer,
7816 tr, &show_traces_fops);
7818 trace_create_file("current_tracer", 0644, d_tracer,
7819 tr, &set_tracer_fops);
7821 trace_create_file("tracing_cpumask", 0644, d_tracer,
7822 tr, &tracing_cpumask_fops);
7824 trace_create_file("trace_options", 0644, d_tracer,
7825 tr, &tracing_iter_fops);
7827 trace_create_file("trace", 0644, d_tracer,
7830 trace_create_file("trace_pipe", 0444, d_tracer,
7831 tr, &tracing_pipe_fops);
7833 trace_create_file("buffer_size_kb", 0644, d_tracer,
7834 tr, &tracing_entries_fops);
7836 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7837 tr, &tracing_total_entries_fops);
7839 trace_create_file("free_buffer", 0200, d_tracer,
7840 tr, &tracing_free_buffer_fops);
7842 trace_create_file("trace_marker", 0220, d_tracer,
7843 tr, &tracing_mark_fops);
7845 trace_create_file("trace_marker_raw", 0220, d_tracer,
7846 tr, &tracing_mark_raw_fops);
7848 trace_create_file("trace_clock", 0644, d_tracer, tr,
7851 trace_create_file("tracing_on", 0644, d_tracer,
7852 tr, &rb_simple_fops);
7854 create_trace_options_dir(tr);
7856 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7857 trace_create_file("tracing_max_latency", 0644, d_tracer,
7858 &tr->max_latency, &tracing_max_lat_fops);
7861 if (ftrace_create_function_files(tr, d_tracer))
7862 WARN(1, "Could not allocate function filter files");
7864 #ifdef CONFIG_TRACER_SNAPSHOT
7865 trace_create_file("snapshot", 0644, d_tracer,
7866 tr, &snapshot_fops);
7869 for_each_tracing_cpu(cpu)
7870 tracing_init_tracefs_percpu(tr, cpu);
7872 ftrace_init_tracefs(tr, d_tracer);
7875 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
7877 struct vfsmount *mnt;
7878 struct file_system_type *type;
7881 * To maintain backward compatibility for tools that mount
7882 * debugfs to get to the tracing facility, tracefs is automatically
7883 * mounted to the debugfs/tracing directory.
7885 type = get_fs_type("tracefs");
7888 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
7889 put_filesystem(type);
7898 * tracing_init_dentry - initialize top level trace array
7900 * This is called when creating files or directories in the tracing
7901 * directory. It is called via fs_initcall() by any of the boot up code
7902 * and expects to return the dentry of the top level tracing directory.
7904 struct dentry *tracing_init_dentry(void)
7906 struct trace_array *tr = &global_trace;
7908 /* The top level trace array uses NULL as parent */
7912 if (WARN_ON(!tracefs_initialized()) ||
7913 (IS_ENABLED(CONFIG_DEBUG_FS) &&
7914 WARN_ON(!debugfs_initialized())))
7915 return ERR_PTR(-ENODEV);
7918 * As there may still be users that expect the tracing
7919 * files to exist in debugfs/tracing, we must automount
7920 * the tracefs file system there, so older tools still
7921 * work with the newer kerenl.
7923 tr->dir = debugfs_create_automount("tracing", NULL,
7924 trace_automount, NULL);
7926 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7927 return ERR_PTR(-ENOMEM);
7933 extern struct trace_eval_map *__start_ftrace_eval_maps[];
7934 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
7936 static void __init trace_eval_init(void)
7940 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
7941 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
7944 #ifdef CONFIG_MODULES
7945 static void trace_module_add_evals(struct module *mod)
7947 if (!mod->num_trace_evals)
7951 * Modules with bad taint do not have events created, do
7952 * not bother with enums either.
7954 if (trace_module_has_bad_taint(mod))
7957 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
7960 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
7961 static void trace_module_remove_evals(struct module *mod)
7963 union trace_eval_map_item *map;
7964 union trace_eval_map_item **last = &trace_eval_maps;
7966 if (!mod->num_trace_evals)
7969 mutex_lock(&trace_eval_mutex);
7971 map = trace_eval_maps;
7974 if (map->head.mod == mod)
7976 map = trace_eval_jmp_to_tail(map);
7977 last = &map->tail.next;
7978 map = map->tail.next;
7983 *last = trace_eval_jmp_to_tail(map)->tail.next;
7986 mutex_unlock(&trace_eval_mutex);
7989 static inline void trace_module_remove_evals(struct module *mod) { }
7990 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
7992 static int trace_module_notify(struct notifier_block *self,
7993 unsigned long val, void *data)
7995 struct module *mod = data;
7998 case MODULE_STATE_COMING:
7999 trace_module_add_evals(mod);
8001 case MODULE_STATE_GOING:
8002 trace_module_remove_evals(mod);
8009 static struct notifier_block trace_module_nb = {
8010 .notifier_call = trace_module_notify,
8013 #endif /* CONFIG_MODULES */
8015 static __init int tracer_init_tracefs(void)
8017 struct dentry *d_tracer;
8019 trace_access_lock_init();
8021 d_tracer = tracing_init_dentry();
8022 if (IS_ERR(d_tracer))
8025 init_tracer_tracefs(&global_trace, d_tracer);
8026 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
8028 trace_create_file("tracing_thresh", 0644, d_tracer,
8029 &global_trace, &tracing_thresh_fops);
8031 trace_create_file("README", 0444, d_tracer,
8032 NULL, &tracing_readme_fops);
8034 trace_create_file("saved_cmdlines", 0444, d_tracer,
8035 NULL, &tracing_saved_cmdlines_fops);
8037 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8038 NULL, &tracing_saved_cmdlines_size_fops);
8040 trace_create_file("saved_tgids", 0444, d_tracer,
8041 NULL, &tracing_saved_tgids_fops);
8045 trace_create_eval_file(d_tracer);
8047 #ifdef CONFIG_MODULES
8048 register_module_notifier(&trace_module_nb);
8051 #ifdef CONFIG_DYNAMIC_FTRACE
8052 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8053 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
8056 create_trace_instances(d_tracer);
8058 update_tracer_options(&global_trace);
8063 static int trace_panic_handler(struct notifier_block *this,
8064 unsigned long event, void *unused)
8066 if (ftrace_dump_on_oops)
8067 ftrace_dump(ftrace_dump_on_oops);
8071 static struct notifier_block trace_panic_notifier = {
8072 .notifier_call = trace_panic_handler,
8074 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8077 static int trace_die_handler(struct notifier_block *self,
8083 if (ftrace_dump_on_oops)
8084 ftrace_dump(ftrace_dump_on_oops);
8092 static struct notifier_block trace_die_notifier = {
8093 .notifier_call = trace_die_handler,
8098 * printk is set to max of 1024, we really don't need it that big.
8099 * Nothing should be printing 1000 characters anyway.
8101 #define TRACE_MAX_PRINT 1000
8104 * Define here KERN_TRACE so that we have one place to modify
8105 * it if we decide to change what log level the ftrace dump
8108 #define KERN_TRACE KERN_EMERG
8111 trace_printk_seq(struct trace_seq *s)
8113 /* Probably should print a warning here. */
8114 if (s->seq.len >= TRACE_MAX_PRINT)
8115 s->seq.len = TRACE_MAX_PRINT;
8118 * More paranoid code. Although the buffer size is set to
8119 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8120 * an extra layer of protection.
8122 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8123 s->seq.len = s->seq.size - 1;
8125 /* should be zero ended, but we are paranoid. */
8126 s->buffer[s->seq.len] = 0;
8128 printk(KERN_TRACE "%s", s->buffer);
8133 void trace_init_global_iter(struct trace_iterator *iter)
8135 iter->tr = &global_trace;
8136 iter->trace = iter->tr->current_trace;
8137 iter->cpu_file = RING_BUFFER_ALL_CPUS;
8138 iter->trace_buffer = &global_trace.trace_buffer;
8140 if (iter->trace && iter->trace->open)
8141 iter->trace->open(iter);
8143 /* Annotate start of buffers if we had overruns */
8144 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8145 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8147 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8148 if (trace_clocks[iter->tr->clock_id].in_ns)
8149 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
8152 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
8154 /* use static because iter can be a bit big for the stack */
8155 static struct trace_iterator iter;
8156 static atomic_t dump_running;
8157 struct trace_array *tr = &global_trace;
8158 unsigned int old_userobj;
8159 unsigned long flags;
8162 /* Only allow one dump user at a time. */
8163 if (atomic_inc_return(&dump_running) != 1) {
8164 atomic_dec(&dump_running);
8169 * Always turn off tracing when we dump.
8170 * We don't need to show trace output of what happens
8171 * between multiple crashes.
8173 * If the user does a sysrq-z, then they can re-enable
8174 * tracing with echo 1 > tracing_on.
8178 local_irq_save(flags);
8180 /* Simulate the iterator */
8181 trace_init_global_iter(&iter);
8183 for_each_tracing_cpu(cpu) {
8184 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8187 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
8189 /* don't look at user memory in panic mode */
8190 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
8192 switch (oops_dump_mode) {
8194 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8197 iter.cpu_file = raw_smp_processor_id();
8202 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
8203 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8206 printk(KERN_TRACE "Dumping ftrace buffer:\n");
8208 /* Did function tracer already get disabled? */
8209 if (ftrace_is_dead()) {
8210 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8211 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8215 * We need to stop all tracing on all CPUS to read the
8216 * the next buffer. This is a bit expensive, but is
8217 * not done often. We fill all what we can read,
8218 * and then release the locks again.
8221 while (!trace_empty(&iter)) {
8224 printk(KERN_TRACE "---------------------------------\n");
8228 /* reset all but tr, trace, and overruns */
8229 memset(&iter.seq, 0,
8230 sizeof(struct trace_iterator) -
8231 offsetof(struct trace_iterator, seq));
8232 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8235 if (trace_find_next_entry_inc(&iter) != NULL) {
8238 ret = print_trace_line(&iter);
8239 if (ret != TRACE_TYPE_NO_CONSUME)
8240 trace_consume(&iter);
8242 touch_nmi_watchdog();
8244 trace_printk_seq(&iter.seq);
8248 printk(KERN_TRACE " (ftrace buffer empty)\n");
8250 printk(KERN_TRACE "---------------------------------\n");
8253 tr->trace_flags |= old_userobj;
8255 for_each_tracing_cpu(cpu) {
8256 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8258 atomic_dec(&dump_running);
8259 local_irq_restore(flags);
8261 EXPORT_SYMBOL_GPL(ftrace_dump);
8263 __init static int tracer_alloc_buffers(void)
8269 * Make sure we don't accidently add more trace options
8270 * than we have bits for.
8272 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
8274 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8277 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
8278 goto out_free_buffer_mask;
8280 /* Only allocate trace_printk buffers if a trace_printk exists */
8281 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
8282 /* Must be called before global_trace.buffer is allocated */
8283 trace_printk_init_buffers();
8285 /* To save memory, keep the ring buffer size to its minimum */
8286 if (ring_buffer_expanded)
8287 ring_buf_size = trace_buf_size;
8291 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
8292 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
8294 raw_spin_lock_init(&global_trace.start_lock);
8297 * The prepare callbacks allocates some memory for the ring buffer. We
8298 * don't free the buffer if the if the CPU goes down. If we were to free
8299 * the buffer, then the user would lose any trace that was in the
8300 * buffer. The memory will be removed once the "instance" is removed.
8302 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8303 "trace/RB:preapre", trace_rb_cpu_prepare,
8306 goto out_free_cpumask;
8307 /* Used for event triggers */
8309 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8311 goto out_rm_hp_state;
8313 if (trace_create_savedcmd() < 0)
8314 goto out_free_temp_buffer;
8316 /* TODO: make the number of buffers hot pluggable with CPUS */
8317 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
8318 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8320 goto out_free_savedcmd;
8323 if (global_trace.buffer_disabled)
8326 if (trace_boot_clock) {
8327 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8329 pr_warn("Trace clock %s not defined, going back to default\n",
8334 * register_tracer() might reference current_trace, so it
8335 * needs to be set before we register anything. This is
8336 * just a bootstrap of current_trace anyway.
8338 global_trace.current_trace = &nop_trace;
8340 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8342 ftrace_init_global_array_ops(&global_trace);
8344 init_trace_flags_index(&global_trace);
8346 register_tracer(&nop_trace);
8348 /* Function tracing may start here (via kernel command line) */
8349 init_function_trace();
8351 /* All seems OK, enable tracing */
8352 tracing_disabled = 0;
8354 atomic_notifier_chain_register(&panic_notifier_list,
8355 &trace_panic_notifier);
8357 register_die_notifier(&trace_die_notifier);
8359 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8361 INIT_LIST_HEAD(&global_trace.systems);
8362 INIT_LIST_HEAD(&global_trace.events);
8363 list_add(&global_trace.list, &ftrace_trace_arrays);
8365 apply_trace_boot_options();
8367 register_snapshot_cmd();
8372 free_saved_cmdlines_buffer(savedcmd);
8373 out_free_temp_buffer:
8374 ring_buffer_free(temp_buffer);
8376 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
8378 free_cpumask_var(global_trace.tracing_cpumask);
8379 out_free_buffer_mask:
8380 free_cpumask_var(tracing_buffer_mask);
8385 void __init early_trace_init(void)
8387 if (tracepoint_printk) {
8388 tracepoint_print_iter =
8389 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8390 if (WARN_ON(!tracepoint_print_iter))
8391 tracepoint_printk = 0;
8393 static_key_enable(&tracepoint_printk_key.key);
8395 tracer_alloc_buffers();
8398 void __init trace_init(void)
8403 __init static int clear_boot_tracer(void)
8406 * The default tracer at boot buffer is an init section.
8407 * This function is called in lateinit. If we did not
8408 * find the boot tracer, then clear it out, to prevent
8409 * later registration from accessing the buffer that is
8410 * about to be freed.
8412 if (!default_bootup_tracer)
8415 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8416 default_bootup_tracer);
8417 default_bootup_tracer = NULL;
8422 fs_initcall(tracer_init_tracefs);
8423 late_initcall_sync(clear_boot_tracer);