2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/tracefs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/mount.h>
36 #include <linux/rwsem.h>
37 #include <linux/slab.h>
38 #include <linux/ctype.h>
39 #include <linux/init.h>
40 #include <linux/poll.h>
41 #include <linux/nmi.h>
43 #include <linux/trace.h>
44 #include <linux/sched/rt.h>
47 #include "trace_output.h"
50 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing.
53 bool ring_buffer_expanded;
56 * We need to change this state when a selftest is running.
57 * A selftest will lurk into the ring-buffer to count the
58 * entries inserted during the selftest although some concurrent
59 * insertions into the ring-buffer such as trace_printk could occurred
60 * at the same time, giving false positive or negative results.
62 static bool __read_mostly tracing_selftest_running;
65 * If a tracer is running, we do not want to run SELFTEST.
67 bool __read_mostly tracing_selftest_disabled;
69 /* Pipe tracepoints to printk */
70 struct trace_iterator *tracepoint_print_iter;
71 int tracepoint_printk;
73 /* For tracers that don't implement custom flags */
74 static struct tracer_opt dummy_tracer_opt[] = {
79 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
85 * To prevent the comm cache from being overwritten when no
86 * tracing is active, only save the comm when a trace event
89 static DEFINE_PER_CPU(bool, trace_cmdline_save);
92 * Kill all tracing for good (never come back).
93 * It is initialized to 1 but will turn to zero if the initialization
94 * of the tracer is successful. But that is the only place that sets
97 static int tracing_disabled = 1;
99 cpumask_var_t __read_mostly tracing_buffer_mask;
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
117 enum ftrace_dump_mode ftrace_dump_on_oops;
119 /* When set, tracing will stop when a WARN*() is hit */
120 int __disable_trace_on_warning;
122 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
123 /* Map of enums to their values, for "enum_map" file */
124 struct trace_enum_map_head {
126 unsigned long length;
129 union trace_enum_map_item;
131 struct trace_enum_map_tail {
133 * "end" is first and points to NULL as it must be different
134 * than "mod" or "enum_string"
136 union trace_enum_map_item *next;
137 const char *end; /* points to NULL */
140 static DEFINE_MUTEX(trace_enum_mutex);
143 * The trace_enum_maps are saved in an array with two extra elements,
144 * one at the beginning, and one at the end. The beginning item contains
145 * the count of the saved maps (head.length), and the module they
146 * belong to if not built in (head.mod). The ending item contains a
147 * pointer to the next array of saved enum_map items.
149 union trace_enum_map_item {
150 struct trace_enum_map map;
151 struct trace_enum_map_head head;
152 struct trace_enum_map_tail tail;
155 static union trace_enum_map_item *trace_enum_maps;
156 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
158 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
160 #define MAX_TRACER_SIZE 100
161 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
162 static char *default_bootup_tracer;
164 static bool allocate_snapshot;
166 static int __init set_cmdline_ftrace(char *str)
168 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
169 default_bootup_tracer = bootup_tracer_buf;
170 /* We are using ftrace early, expand it */
171 ring_buffer_expanded = true;
174 __setup("ftrace=", set_cmdline_ftrace);
176 static int __init set_ftrace_dump_on_oops(char *str)
178 if (*str++ != '=' || !*str) {
179 ftrace_dump_on_oops = DUMP_ALL;
183 if (!strcmp("orig_cpu", str)) {
184 ftrace_dump_on_oops = DUMP_ORIG;
190 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
192 static int __init stop_trace_on_warning(char *str)
194 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
195 __disable_trace_on_warning = 1;
198 __setup("traceoff_on_warning", stop_trace_on_warning);
200 static int __init boot_alloc_snapshot(char *str)
202 allocate_snapshot = true;
203 /* We also need the main ring buffer expanded */
204 ring_buffer_expanded = true;
207 __setup("alloc_snapshot", boot_alloc_snapshot);
210 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
212 static int __init set_trace_boot_options(char *str)
214 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
217 __setup("trace_options=", set_trace_boot_options);
219 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
220 static char *trace_boot_clock __initdata;
222 static int __init set_trace_boot_clock(char *str)
224 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
225 trace_boot_clock = trace_boot_clock_buf;
228 __setup("trace_clock=", set_trace_boot_clock);
230 static int __init set_tracepoint_printk(char *str)
232 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
233 tracepoint_printk = 1;
236 __setup("tp_printk", set_tracepoint_printk);
238 unsigned long long ns2usecs(cycle_t nsec)
245 /* trace_flags holds trace_options default values */
246 #define TRACE_DEFAULT_FLAGS \
247 (FUNCTION_DEFAULT_FLAGS | \
248 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
249 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
250 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
251 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
253 /* trace_options that are only supported by global_trace */
254 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
255 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
257 /* trace_flags that are default zero for instances */
258 #define ZEROED_TRACE_FLAGS \
259 TRACE_ITER_EVENT_FORK
262 * The global_trace is the descriptor that holds the tracing
263 * buffers for the live tracing. For each CPU, it contains
264 * a link list of pages that will store trace entries. The
265 * page descriptor of the pages in the memory is used to hold
266 * the link list by linking the lru item in the page descriptor
267 * to each of the pages in the buffer per CPU.
269 * For each active CPU there is a data field that holds the
270 * pages for the buffer for that CPU. Each CPU has the same number
271 * of pages allocated for its buffer.
273 static struct trace_array global_trace = {
274 .trace_flags = TRACE_DEFAULT_FLAGS,
277 LIST_HEAD(ftrace_trace_arrays);
279 int trace_array_get(struct trace_array *this_tr)
281 struct trace_array *tr;
284 mutex_lock(&trace_types_lock);
285 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
292 mutex_unlock(&trace_types_lock);
297 static void __trace_array_put(struct trace_array *this_tr)
299 WARN_ON(!this_tr->ref);
303 void trace_array_put(struct trace_array *this_tr)
305 mutex_lock(&trace_types_lock);
306 __trace_array_put(this_tr);
307 mutex_unlock(&trace_types_lock);
310 int call_filter_check_discard(struct trace_event_call *call, void *rec,
311 struct ring_buffer *buffer,
312 struct ring_buffer_event *event)
314 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
315 !filter_match_preds(call->filter, rec)) {
316 __trace_event_discard_commit(buffer, event);
323 void trace_free_pid_list(struct trace_pid_list *pid_list)
325 vfree(pid_list->pids);
330 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
331 * @filtered_pids: The list of pids to check
332 * @search_pid: The PID to find in @filtered_pids
334 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
337 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
340 * If pid_max changed after filtered_pids was created, we
341 * by default ignore all pids greater than the previous pid_max.
343 if (search_pid >= filtered_pids->pid_max)
346 return test_bit(search_pid, filtered_pids->pids);
350 * trace_ignore_this_task - should a task be ignored for tracing
351 * @filtered_pids: The list of pids to check
352 * @task: The task that should be ignored if not filtered
354 * Checks if @task should be traced or not from @filtered_pids.
355 * Returns true if @task should *NOT* be traced.
356 * Returns false if @task should be traced.
359 trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
362 * Return false, because if filtered_pids does not exist,
363 * all pids are good to trace.
368 return !trace_find_filtered_pid(filtered_pids, task->pid);
372 * trace_pid_filter_add_remove - Add or remove a task from a pid_list
373 * @pid_list: The list to modify
374 * @self: The current task for fork or NULL for exit
375 * @task: The task to add or remove
377 * If adding a task, if @self is defined, the task is only added if @self
378 * is also included in @pid_list. This happens on fork and tasks should
379 * only be added when the parent is listed. If @self is NULL, then the
380 * @task pid will be removed from the list, which would happen on exit
383 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
384 struct task_struct *self,
385 struct task_struct *task)
390 /* For forks, we only add if the forking task is listed */
392 if (!trace_find_filtered_pid(pid_list, self->pid))
396 /* Sorry, but we don't support pid_max changing after setting */
397 if (task->pid >= pid_list->pid_max)
400 /* "self" is set for forks, and NULL for exits */
402 set_bit(task->pid, pid_list->pids);
404 clear_bit(task->pid, pid_list->pids);
408 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
409 * @pid_list: The pid list to show
410 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
411 * @pos: The position of the file
413 * This is used by the seq_file "next" operation to iterate the pids
414 * listed in a trace_pid_list structure.
416 * Returns the pid+1 as we want to display pid of zero, but NULL would
417 * stop the iteration.
419 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
421 unsigned long pid = (unsigned long)v;
425 /* pid already is +1 of the actual prevous bit */
426 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
428 /* Return pid + 1 to allow zero to be represented */
429 if (pid < pid_list->pid_max)
430 return (void *)(pid + 1);
436 * trace_pid_start - Used for seq_file to start reading pid lists
437 * @pid_list: The pid list to show
438 * @pos: The position of the file
440 * This is used by seq_file "start" operation to start the iteration
443 * Returns the pid+1 as we want to display pid of zero, but NULL would
444 * stop the iteration.
446 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
451 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
452 if (pid >= pid_list->pid_max)
455 /* Return pid + 1 so that zero can be the exit value */
456 for (pid++; pid && l < *pos;
457 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
463 * trace_pid_show - show the current pid in seq_file processing
464 * @m: The seq_file structure to write into
465 * @v: A void pointer of the pid (+1) value to display
467 * Can be directly used by seq_file operations to display the current
470 int trace_pid_show(struct seq_file *m, void *v)
472 unsigned long pid = (unsigned long)v - 1;
474 seq_printf(m, "%lu\n", pid);
478 /* 128 should be much more than enough */
479 #define PID_BUF_SIZE 127
481 int trace_pid_write(struct trace_pid_list *filtered_pids,
482 struct trace_pid_list **new_pid_list,
483 const char __user *ubuf, size_t cnt)
485 struct trace_pid_list *pid_list;
486 struct trace_parser parser;
494 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
498 * Always recreate a new array. The write is an all or nothing
499 * operation. Always create a new array when adding new pids by
500 * the user. If the operation fails, then the current list is
503 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
507 pid_list->pid_max = READ_ONCE(pid_max);
509 /* Only truncating will shrink pid_max */
510 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
511 pid_list->pid_max = filtered_pids->pid_max;
513 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
514 if (!pid_list->pids) {
520 /* copy the current bits to the new max */
521 for_each_set_bit(pid, filtered_pids->pids,
522 filtered_pids->pid_max) {
523 set_bit(pid, pid_list->pids);
532 ret = trace_get_user(&parser, ubuf, cnt, &pos);
533 if (ret < 0 || !trace_parser_loaded(&parser))
540 parser.buffer[parser.idx] = 0;
543 if (kstrtoul(parser.buffer, 0, &val))
545 if (val >= pid_list->pid_max)
550 set_bit(pid, pid_list->pids);
553 trace_parser_clear(&parser);
556 trace_parser_put(&parser);
559 trace_free_pid_list(pid_list);
564 /* Cleared the list of pids */
565 trace_free_pid_list(pid_list);
570 *new_pid_list = pid_list;
575 static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
579 /* Early boot up does not have a buffer yet */
581 return trace_clock_local();
583 ts = ring_buffer_time_stamp(buf->buffer, cpu);
584 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
589 cycle_t ftrace_now(int cpu)
591 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
595 * tracing_is_enabled - Show if global_trace has been disabled
597 * Shows if the global trace has been enabled or not. It uses the
598 * mirror flag "buffer_disabled" to be used in fast paths such as for
599 * the irqsoff tracer. But it may be inaccurate due to races. If you
600 * need to know the accurate state, use tracing_is_on() which is a little
601 * slower, but accurate.
603 int tracing_is_enabled(void)
606 * For quick access (irqsoff uses this in fast path), just
607 * return the mirror variable of the state of the ring buffer.
608 * It's a little racy, but we don't really care.
611 return !global_trace.buffer_disabled;
615 * trace_buf_size is the size in bytes that is allocated
616 * for a buffer. Note, the number of bytes is always rounded
619 * This number is purposely set to a low number of 16384.
620 * If the dump on oops happens, it will be much appreciated
621 * to not have to wait for all that output. Anyway this can be
622 * boot time and run time configurable.
624 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
626 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
628 /* trace_types holds a link list of available tracers. */
629 static struct tracer *trace_types __read_mostly;
632 * trace_types_lock is used to protect the trace_types list.
634 DEFINE_MUTEX(trace_types_lock);
637 * serialize the access of the ring buffer
639 * ring buffer serializes readers, but it is low level protection.
640 * The validity of the events (which returns by ring_buffer_peek() ..etc)
641 * are not protected by ring buffer.
643 * The content of events may become garbage if we allow other process consumes
644 * these events concurrently:
645 * A) the page of the consumed events may become a normal page
646 * (not reader page) in ring buffer, and this page will be rewrited
647 * by events producer.
648 * B) The page of the consumed events may become a page for splice_read,
649 * and this page will be returned to system.
651 * These primitives allow multi process access to different cpu ring buffer
654 * These primitives don't distinguish read-only and read-consume access.
655 * Multi read-only access are also serialized.
659 static DECLARE_RWSEM(all_cpu_access_lock);
660 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
662 static inline void trace_access_lock(int cpu)
664 if (cpu == RING_BUFFER_ALL_CPUS) {
665 /* gain it for accessing the whole ring buffer. */
666 down_write(&all_cpu_access_lock);
668 /* gain it for accessing a cpu ring buffer. */
670 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
671 down_read(&all_cpu_access_lock);
673 /* Secondly block other access to this @cpu ring buffer. */
674 mutex_lock(&per_cpu(cpu_access_lock, cpu));
678 static inline void trace_access_unlock(int cpu)
680 if (cpu == RING_BUFFER_ALL_CPUS) {
681 up_write(&all_cpu_access_lock);
683 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
684 up_read(&all_cpu_access_lock);
688 static inline void trace_access_lock_init(void)
692 for_each_possible_cpu(cpu)
693 mutex_init(&per_cpu(cpu_access_lock, cpu));
698 static DEFINE_MUTEX(access_lock);
700 static inline void trace_access_lock(int cpu)
703 mutex_lock(&access_lock);
706 static inline void trace_access_unlock(int cpu)
709 mutex_unlock(&access_lock);
712 static inline void trace_access_lock_init(void)
718 #ifdef CONFIG_STACKTRACE
719 static void __ftrace_trace_stack(struct ring_buffer *buffer,
721 int skip, int pc, struct pt_regs *regs);
722 static inline void ftrace_trace_stack(struct trace_array *tr,
723 struct ring_buffer *buffer,
725 int skip, int pc, struct pt_regs *regs);
728 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
730 int skip, int pc, struct pt_regs *regs)
733 static inline void ftrace_trace_stack(struct trace_array *tr,
734 struct ring_buffer *buffer,
736 int skip, int pc, struct pt_regs *regs)
742 static void tracer_tracing_on(struct trace_array *tr)
744 if (tr->trace_buffer.buffer)
745 ring_buffer_record_on(tr->trace_buffer.buffer);
747 * This flag is looked at when buffers haven't been allocated
748 * yet, or by some tracers (like irqsoff), that just want to
749 * know if the ring buffer has been disabled, but it can handle
750 * races of where it gets disabled but we still do a record.
751 * As the check is in the fast path of the tracers, it is more
752 * important to be fast than accurate.
754 tr->buffer_disabled = 0;
755 /* Make the flag seen by readers */
760 * tracing_on - enable tracing buffers
762 * This function enables tracing buffers that may have been
763 * disabled with tracing_off.
765 void tracing_on(void)
767 tracer_tracing_on(&global_trace);
769 EXPORT_SYMBOL_GPL(tracing_on);
772 * __trace_puts - write a constant string into the trace buffer.
773 * @ip: The address of the caller
774 * @str: The constant string to write
775 * @size: The size of the string.
777 int __trace_puts(unsigned long ip, const char *str, int size)
779 struct ring_buffer_event *event;
780 struct ring_buffer *buffer;
781 struct print_entry *entry;
782 unsigned long irq_flags;
786 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
789 pc = preempt_count();
791 if (unlikely(tracing_selftest_running || tracing_disabled))
794 alloc = sizeof(*entry) + size + 2; /* possible \n added */
796 local_save_flags(irq_flags);
797 buffer = global_trace.trace_buffer.buffer;
798 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
803 entry = ring_buffer_event_data(event);
806 memcpy(&entry->buf, str, size);
808 /* Add a newline if necessary */
809 if (entry->buf[size - 1] != '\n') {
810 entry->buf[size] = '\n';
811 entry->buf[size + 1] = '\0';
813 entry->buf[size] = '\0';
815 __buffer_unlock_commit(buffer, event);
816 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
820 EXPORT_SYMBOL_GPL(__trace_puts);
823 * __trace_bputs - write the pointer to a constant string into trace buffer
824 * @ip: The address of the caller
825 * @str: The constant string to write to the buffer to
827 int __trace_bputs(unsigned long ip, const char *str)
829 struct ring_buffer_event *event;
830 struct ring_buffer *buffer;
831 struct bputs_entry *entry;
832 unsigned long irq_flags;
833 int size = sizeof(struct bputs_entry);
836 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
839 pc = preempt_count();
841 if (unlikely(tracing_selftest_running || tracing_disabled))
844 local_save_flags(irq_flags);
845 buffer = global_trace.trace_buffer.buffer;
846 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
851 entry = ring_buffer_event_data(event);
855 __buffer_unlock_commit(buffer, event);
856 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
860 EXPORT_SYMBOL_GPL(__trace_bputs);
862 #ifdef CONFIG_TRACER_SNAPSHOT
864 * trace_snapshot - take a snapshot of the current buffer.
866 * This causes a swap between the snapshot buffer and the current live
867 * tracing buffer. You can use this to take snapshots of the live
868 * trace when some condition is triggered, but continue to trace.
870 * Note, make sure to allocate the snapshot with either
871 * a tracing_snapshot_alloc(), or by doing it manually
872 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
874 * If the snapshot buffer is not allocated, it will stop tracing.
875 * Basically making a permanent snapshot.
877 void tracing_snapshot(void)
879 struct trace_array *tr = &global_trace;
880 struct tracer *tracer = tr->current_trace;
884 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
885 internal_trace_puts("*** snapshot is being ignored ***\n");
889 if (!tr->allocated_snapshot) {
890 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
891 internal_trace_puts("*** stopping trace here! ***\n");
896 /* Note, snapshot can not be used when the tracer uses it */
897 if (tracer->use_max_tr) {
898 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
899 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
903 local_irq_save(flags);
904 update_max_tr(tr, current, smp_processor_id());
905 local_irq_restore(flags);
907 EXPORT_SYMBOL_GPL(tracing_snapshot);
909 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
910 struct trace_buffer *size_buf, int cpu_id);
911 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
913 static int alloc_snapshot(struct trace_array *tr)
917 if (!tr->allocated_snapshot) {
919 /* allocate spare buffer */
920 ret = resize_buffer_duplicate_size(&tr->max_buffer,
921 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
925 tr->allocated_snapshot = true;
931 static void free_snapshot(struct trace_array *tr)
934 * We don't free the ring buffer. instead, resize it because
935 * The max_tr ring buffer has some state (e.g. ring->clock) and
936 * we want preserve it.
938 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
939 set_buffer_entries(&tr->max_buffer, 1);
940 tracing_reset_online_cpus(&tr->max_buffer);
941 tr->allocated_snapshot = false;
945 * tracing_alloc_snapshot - allocate snapshot buffer.
947 * This only allocates the snapshot buffer if it isn't already
948 * allocated - it doesn't also take a snapshot.
950 * This is meant to be used in cases where the snapshot buffer needs
951 * to be set up for events that can't sleep but need to be able to
952 * trigger a snapshot.
954 int tracing_alloc_snapshot(void)
956 struct trace_array *tr = &global_trace;
959 ret = alloc_snapshot(tr);
964 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
967 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
969 * This is similar to trace_snapshot(), but it will allocate the
970 * snapshot buffer if it isn't already allocated. Use this only
971 * where it is safe to sleep, as the allocation may sleep.
973 * This causes a swap between the snapshot buffer and the current live
974 * tracing buffer. You can use this to take snapshots of the live
975 * trace when some condition is triggered, but continue to trace.
977 void tracing_snapshot_alloc(void)
981 ret = tracing_alloc_snapshot();
987 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
989 void tracing_snapshot(void)
991 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
993 EXPORT_SYMBOL_GPL(tracing_snapshot);
994 int tracing_alloc_snapshot(void)
996 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
999 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1000 void tracing_snapshot_alloc(void)
1005 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1006 #endif /* CONFIG_TRACER_SNAPSHOT */
1008 static void tracer_tracing_off(struct trace_array *tr)
1010 if (tr->trace_buffer.buffer)
1011 ring_buffer_record_off(tr->trace_buffer.buffer);
1013 * This flag is looked at when buffers haven't been allocated
1014 * yet, or by some tracers (like irqsoff), that just want to
1015 * know if the ring buffer has been disabled, but it can handle
1016 * races of where it gets disabled but we still do a record.
1017 * As the check is in the fast path of the tracers, it is more
1018 * important to be fast than accurate.
1020 tr->buffer_disabled = 1;
1021 /* Make the flag seen by readers */
1026 * tracing_off - turn off tracing buffers
1028 * This function stops the tracing buffers from recording data.
1029 * It does not disable any overhead the tracers themselves may
1030 * be causing. This function simply causes all recording to
1031 * the ring buffers to fail.
1033 void tracing_off(void)
1035 tracer_tracing_off(&global_trace);
1037 EXPORT_SYMBOL_GPL(tracing_off);
1039 void disable_trace_on_warning(void)
1041 if (__disable_trace_on_warning)
1046 * tracer_tracing_is_on - show real state of ring buffer enabled
1047 * @tr : the trace array to know if ring buffer is enabled
1049 * Shows real state of the ring buffer if it is enabled or not.
1051 int tracer_tracing_is_on(struct trace_array *tr)
1053 if (tr->trace_buffer.buffer)
1054 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1055 return !tr->buffer_disabled;
1059 * tracing_is_on - show state of ring buffers enabled
1061 int tracing_is_on(void)
1063 return tracer_tracing_is_on(&global_trace);
1065 EXPORT_SYMBOL_GPL(tracing_is_on);
1067 static int __init set_buf_size(char *str)
1069 unsigned long buf_size;
1073 buf_size = memparse(str, &str);
1074 /* nr_entries can not be zero */
1077 trace_buf_size = buf_size;
1080 __setup("trace_buf_size=", set_buf_size);
1082 static int __init set_tracing_thresh(char *str)
1084 unsigned long threshold;
1089 ret = kstrtoul(str, 0, &threshold);
1092 tracing_thresh = threshold * 1000;
1095 __setup("tracing_thresh=", set_tracing_thresh);
1097 unsigned long nsecs_to_usecs(unsigned long nsecs)
1099 return nsecs / 1000;
1103 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1104 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
1105 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1106 * of strings in the order that the enums were defined.
1111 /* These must match the bit postions in trace_iterator_flags */
1112 static const char *trace_options[] = {
1120 int in_ns; /* is this clock in nanoseconds? */
1121 } trace_clocks[] = {
1122 { trace_clock_local, "local", 1 },
1123 { trace_clock_global, "global", 1 },
1124 { trace_clock_counter, "counter", 0 },
1125 { trace_clock_jiffies, "uptime", 0 },
1126 { trace_clock, "perf", 1 },
1127 { ktime_get_mono_fast_ns, "mono", 1 },
1128 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1133 * trace_parser_get_init - gets the buffer for trace parser
1135 int trace_parser_get_init(struct trace_parser *parser, int size)
1137 memset(parser, 0, sizeof(*parser));
1139 parser->buffer = kmalloc(size, GFP_KERNEL);
1140 if (!parser->buffer)
1143 parser->size = size;
1148 * trace_parser_put - frees the buffer for trace parser
1150 void trace_parser_put(struct trace_parser *parser)
1152 kfree(parser->buffer);
1156 * trace_get_user - reads the user input string separated by space
1157 * (matched by isspace(ch))
1159 * For each string found the 'struct trace_parser' is updated,
1160 * and the function returns.
1162 * Returns number of bytes read.
1164 * See kernel/trace/trace.h for 'struct trace_parser' details.
1166 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1167 size_t cnt, loff_t *ppos)
1174 trace_parser_clear(parser);
1176 ret = get_user(ch, ubuf++);
1184 * The parser is not finished with the last write,
1185 * continue reading the user input without skipping spaces.
1187 if (!parser->cont) {
1188 /* skip white space */
1189 while (cnt && isspace(ch)) {
1190 ret = get_user(ch, ubuf++);
1197 /* only spaces were written */
1207 /* read the non-space input */
1208 while (cnt && !isspace(ch)) {
1209 if (parser->idx < parser->size - 1)
1210 parser->buffer[parser->idx++] = ch;
1215 ret = get_user(ch, ubuf++);
1222 /* We either got finished input or we have to wait for another call. */
1224 parser->buffer[parser->idx] = 0;
1225 parser->cont = false;
1226 } else if (parser->idx < parser->size - 1) {
1227 parser->cont = true;
1228 parser->buffer[parser->idx++] = ch;
1241 /* TODO add a seq_buf_to_buffer() */
1242 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1246 if (trace_seq_used(s) <= s->seq.readpos)
1249 len = trace_seq_used(s) - s->seq.readpos;
1252 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1254 s->seq.readpos += cnt;
1258 unsigned long __read_mostly tracing_thresh;
1260 #ifdef CONFIG_TRACER_MAX_TRACE
1262 * Copy the new maximum trace into the separate maximum-trace
1263 * structure. (this way the maximum trace is permanently saved,
1264 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1267 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1269 struct trace_buffer *trace_buf = &tr->trace_buffer;
1270 struct trace_buffer *max_buf = &tr->max_buffer;
1271 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1272 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1275 max_buf->time_start = data->preempt_timestamp;
1277 max_data->saved_latency = tr->max_latency;
1278 max_data->critical_start = data->critical_start;
1279 max_data->critical_end = data->critical_end;
1281 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1282 max_data->pid = tsk->pid;
1284 * If tsk == current, then use current_uid(), as that does not use
1285 * RCU. The irq tracer can be called out of RCU scope.
1288 max_data->uid = current_uid();
1290 max_data->uid = task_uid(tsk);
1292 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1293 max_data->policy = tsk->policy;
1294 max_data->rt_priority = tsk->rt_priority;
1296 /* record this tasks comm */
1297 tracing_record_cmdline(tsk);
1301 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1303 * @tsk: the task with the latency
1304 * @cpu: The cpu that initiated the trace.
1306 * Flip the buffers between the @tr and the max_tr and record information
1307 * about which task was the cause of this latency.
1310 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1312 struct ring_buffer *buf;
1317 WARN_ON_ONCE(!irqs_disabled());
1319 if (!tr->allocated_snapshot) {
1320 /* Only the nop tracer should hit this when disabling */
1321 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1325 arch_spin_lock(&tr->max_lock);
1327 buf = tr->trace_buffer.buffer;
1328 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1329 tr->max_buffer.buffer = buf;
1331 __update_max_tr(tr, tsk, cpu);
1332 arch_spin_unlock(&tr->max_lock);
1336 * update_max_tr_single - only copy one trace over, and reset the rest
1338 * @tsk - task with the latency
1339 * @cpu - the cpu of the buffer to copy.
1341 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1344 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1351 WARN_ON_ONCE(!irqs_disabled());
1352 if (!tr->allocated_snapshot) {
1353 /* Only the nop tracer should hit this when disabling */
1354 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1358 arch_spin_lock(&tr->max_lock);
1360 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1362 if (ret == -EBUSY) {
1364 * We failed to swap the buffer due to a commit taking
1365 * place on this CPU. We fail to record, but we reset
1366 * the max trace buffer (no one writes directly to it)
1367 * and flag that it failed.
1369 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1370 "Failed to swap buffers due to commit in progress\n");
1373 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1375 __update_max_tr(tr, tsk, cpu);
1376 arch_spin_unlock(&tr->max_lock);
1378 #endif /* CONFIG_TRACER_MAX_TRACE */
1380 static int wait_on_pipe(struct trace_iterator *iter, bool full)
1382 /* Iterators are static, they should be filled or empty */
1383 if (trace_buffer_iter(iter, iter->cpu_file))
1386 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1390 #ifdef CONFIG_FTRACE_STARTUP_TEST
1391 static int run_tracer_selftest(struct tracer *type)
1393 struct trace_array *tr = &global_trace;
1394 struct tracer *saved_tracer = tr->current_trace;
1397 if (!type->selftest || tracing_selftest_disabled)
1401 * Run a selftest on this tracer.
1402 * Here we reset the trace buffer, and set the current
1403 * tracer to be this tracer. The tracer can then run some
1404 * internal tracing to verify that everything is in order.
1405 * If we fail, we do not register this tracer.
1407 tracing_reset_online_cpus(&tr->trace_buffer);
1409 tr->current_trace = type;
1411 #ifdef CONFIG_TRACER_MAX_TRACE
1412 if (type->use_max_tr) {
1413 /* If we expanded the buffers, make sure the max is expanded too */
1414 if (ring_buffer_expanded)
1415 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1416 RING_BUFFER_ALL_CPUS);
1417 tr->allocated_snapshot = true;
1421 /* the test is responsible for initializing and enabling */
1422 pr_info("Testing tracer %s: ", type->name);
1423 ret = type->selftest(type, tr);
1424 /* the test is responsible for resetting too */
1425 tr->current_trace = saved_tracer;
1427 printk(KERN_CONT "FAILED!\n");
1428 /* Add the warning after printing 'FAILED' */
1432 /* Only reset on passing, to avoid touching corrupted buffers */
1433 tracing_reset_online_cpus(&tr->trace_buffer);
1435 #ifdef CONFIG_TRACER_MAX_TRACE
1436 if (type->use_max_tr) {
1437 tr->allocated_snapshot = false;
1439 /* Shrink the max buffer again */
1440 if (ring_buffer_expanded)
1441 ring_buffer_resize(tr->max_buffer.buffer, 1,
1442 RING_BUFFER_ALL_CPUS);
1446 printk(KERN_CONT "PASSED\n");
1450 static inline int run_tracer_selftest(struct tracer *type)
1454 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1456 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1458 static void __init apply_trace_boot_options(void);
1461 * register_tracer - register a tracer with the ftrace system.
1462 * @type - the plugin for the tracer
1464 * Register a new plugin tracer.
1466 int __init register_tracer(struct tracer *type)
1472 pr_info("Tracer must have a name\n");
1476 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1477 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1481 mutex_lock(&trace_types_lock);
1483 tracing_selftest_running = true;
1485 for (t = trace_types; t; t = t->next) {
1486 if (strcmp(type->name, t->name) == 0) {
1488 pr_info("Tracer %s already registered\n",
1495 if (!type->set_flag)
1496 type->set_flag = &dummy_set_flag;
1498 /*allocate a dummy tracer_flags*/
1499 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1504 type->flags->val = 0;
1505 type->flags->opts = dummy_tracer_opt;
1507 if (!type->flags->opts)
1508 type->flags->opts = dummy_tracer_opt;
1510 /* store the tracer for __set_tracer_option */
1511 type->flags->trace = type;
1513 ret = run_tracer_selftest(type);
1517 type->next = trace_types;
1519 add_tracer_options(&global_trace, type);
1522 tracing_selftest_running = false;
1523 mutex_unlock(&trace_types_lock);
1525 if (ret || !default_bootup_tracer)
1528 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1531 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1532 /* Do we want this tracer to start on bootup? */
1533 tracing_set_tracer(&global_trace, type->name);
1534 default_bootup_tracer = NULL;
1536 apply_trace_boot_options();
1538 /* disable other selftests, since this will break it. */
1539 tracing_selftest_disabled = true;
1540 #ifdef CONFIG_FTRACE_STARTUP_TEST
1541 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1549 void tracing_reset(struct trace_buffer *buf, int cpu)
1551 struct ring_buffer *buffer = buf->buffer;
1556 ring_buffer_record_disable(buffer);
1558 /* Make sure all commits have finished */
1559 synchronize_sched();
1560 ring_buffer_reset_cpu(buffer, cpu);
1562 ring_buffer_record_enable(buffer);
1565 void tracing_reset_online_cpus(struct trace_buffer *buf)
1567 struct ring_buffer *buffer = buf->buffer;
1573 ring_buffer_record_disable(buffer);
1575 /* Make sure all commits have finished */
1576 synchronize_sched();
1578 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1580 for_each_online_cpu(cpu)
1581 ring_buffer_reset_cpu(buffer, cpu);
1583 ring_buffer_record_enable(buffer);
1586 /* Must have trace_types_lock held */
1587 void tracing_reset_all_online_cpus(void)
1589 struct trace_array *tr;
1591 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1592 tracing_reset_online_cpus(&tr->trace_buffer);
1593 #ifdef CONFIG_TRACER_MAX_TRACE
1594 tracing_reset_online_cpus(&tr->max_buffer);
1599 #define SAVED_CMDLINES_DEFAULT 128
1600 #define NO_CMDLINE_MAP UINT_MAX
1601 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1602 struct saved_cmdlines_buffer {
1603 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1604 unsigned *map_cmdline_to_pid;
1605 unsigned cmdline_num;
1607 char *saved_cmdlines;
1609 static struct saved_cmdlines_buffer *savedcmd;
1611 /* temporary disable recording */
1612 static atomic_t trace_record_cmdline_disabled __read_mostly;
1614 static inline char *get_saved_cmdlines(int idx)
1616 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1619 static inline void set_cmdline(int idx, const char *cmdline)
1621 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1624 static int allocate_cmdlines_buffer(unsigned int val,
1625 struct saved_cmdlines_buffer *s)
1627 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1629 if (!s->map_cmdline_to_pid)
1632 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1633 if (!s->saved_cmdlines) {
1634 kfree(s->map_cmdline_to_pid);
1639 s->cmdline_num = val;
1640 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1641 sizeof(s->map_pid_to_cmdline));
1642 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1643 val * sizeof(*s->map_cmdline_to_pid));
1648 static int trace_create_savedcmd(void)
1652 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1656 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1666 int is_tracing_stopped(void)
1668 return global_trace.stop_count;
1672 * tracing_start - quick start of the tracer
1674 * If tracing is enabled but was stopped by tracing_stop,
1675 * this will start the tracer back up.
1677 void tracing_start(void)
1679 struct ring_buffer *buffer;
1680 unsigned long flags;
1682 if (tracing_disabled)
1685 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1686 if (--global_trace.stop_count) {
1687 if (global_trace.stop_count < 0) {
1688 /* Someone screwed up their debugging */
1690 global_trace.stop_count = 0;
1695 /* Prevent the buffers from switching */
1696 arch_spin_lock(&global_trace.max_lock);
1698 buffer = global_trace.trace_buffer.buffer;
1700 ring_buffer_record_enable(buffer);
1702 #ifdef CONFIG_TRACER_MAX_TRACE
1703 buffer = global_trace.max_buffer.buffer;
1705 ring_buffer_record_enable(buffer);
1708 arch_spin_unlock(&global_trace.max_lock);
1711 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1714 static void tracing_start_tr(struct trace_array *tr)
1716 struct ring_buffer *buffer;
1717 unsigned long flags;
1719 if (tracing_disabled)
1722 /* If global, we need to also start the max tracer */
1723 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1724 return tracing_start();
1726 raw_spin_lock_irqsave(&tr->start_lock, flags);
1728 if (--tr->stop_count) {
1729 if (tr->stop_count < 0) {
1730 /* Someone screwed up their debugging */
1737 buffer = tr->trace_buffer.buffer;
1739 ring_buffer_record_enable(buffer);
1742 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1746 * tracing_stop - quick stop of the tracer
1748 * Light weight way to stop tracing. Use in conjunction with
1751 void tracing_stop(void)
1753 struct ring_buffer *buffer;
1754 unsigned long flags;
1756 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1757 if (global_trace.stop_count++)
1760 /* Prevent the buffers from switching */
1761 arch_spin_lock(&global_trace.max_lock);
1763 buffer = global_trace.trace_buffer.buffer;
1765 ring_buffer_record_disable(buffer);
1767 #ifdef CONFIG_TRACER_MAX_TRACE
1768 buffer = global_trace.max_buffer.buffer;
1770 ring_buffer_record_disable(buffer);
1773 arch_spin_unlock(&global_trace.max_lock);
1776 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1779 static void tracing_stop_tr(struct trace_array *tr)
1781 struct ring_buffer *buffer;
1782 unsigned long flags;
1784 /* If global, we need to also stop the max tracer */
1785 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1786 return tracing_stop();
1788 raw_spin_lock_irqsave(&tr->start_lock, flags);
1789 if (tr->stop_count++)
1792 buffer = tr->trace_buffer.buffer;
1794 ring_buffer_record_disable(buffer);
1797 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1800 void trace_stop_cmdline_recording(void);
1802 static int trace_save_cmdline(struct task_struct *tsk)
1806 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1810 * It's not the end of the world if we don't get
1811 * the lock, but we also don't want to spin
1812 * nor do we want to disable interrupts,
1813 * so if we miss here, then better luck next time.
1815 if (!arch_spin_trylock(&trace_cmdline_lock))
1818 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1819 if (idx == NO_CMDLINE_MAP) {
1820 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1823 * Check whether the cmdline buffer at idx has a pid
1824 * mapped. We are going to overwrite that entry so we
1825 * need to clear the map_pid_to_cmdline. Otherwise we
1826 * would read the new comm for the old pid.
1828 pid = savedcmd->map_cmdline_to_pid[idx];
1829 if (pid != NO_CMDLINE_MAP)
1830 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1832 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1833 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1835 savedcmd->cmdline_idx = idx;
1838 set_cmdline(idx, tsk->comm);
1840 arch_spin_unlock(&trace_cmdline_lock);
1845 static void __trace_find_cmdline(int pid, char comm[])
1850 strcpy(comm, "<idle>");
1854 if (WARN_ON_ONCE(pid < 0)) {
1855 strcpy(comm, "<XXX>");
1859 if (pid > PID_MAX_DEFAULT) {
1860 strcpy(comm, "<...>");
1864 map = savedcmd->map_pid_to_cmdline[pid];
1865 if (map != NO_CMDLINE_MAP)
1866 strcpy(comm, get_saved_cmdlines(map));
1868 strcpy(comm, "<...>");
1871 void trace_find_cmdline(int pid, char comm[])
1874 arch_spin_lock(&trace_cmdline_lock);
1876 __trace_find_cmdline(pid, comm);
1878 arch_spin_unlock(&trace_cmdline_lock);
1882 void tracing_record_cmdline(struct task_struct *tsk)
1884 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1887 if (!__this_cpu_read(trace_cmdline_save))
1890 if (trace_save_cmdline(tsk))
1891 __this_cpu_write(trace_cmdline_save, false);
1895 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1898 struct task_struct *tsk = current;
1900 entry->preempt_count = pc & 0xff;
1901 entry->pid = (tsk) ? tsk->pid : 0;
1903 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1904 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1906 TRACE_FLAG_IRQS_NOSUPPORT |
1908 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
1909 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1910 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1911 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1912 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1914 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1916 static __always_inline void
1917 trace_event_setup(struct ring_buffer_event *event,
1918 int type, unsigned long flags, int pc)
1920 struct trace_entry *ent = ring_buffer_event_data(event);
1922 tracing_generic_entry_update(ent, flags, pc);
1926 struct ring_buffer_event *
1927 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1930 unsigned long flags, int pc)
1932 struct ring_buffer_event *event;
1934 event = ring_buffer_lock_reserve(buffer, len);
1936 trace_event_setup(event, type, flags, pc);
1941 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1942 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
1943 static int trace_buffered_event_ref;
1946 * trace_buffered_event_enable - enable buffering events
1948 * When events are being filtered, it is quicker to use a temporary
1949 * buffer to write the event data into if there's a likely chance
1950 * that it will not be committed. The discard of the ring buffer
1951 * is not as fast as committing, and is much slower than copying
1954 * When an event is to be filtered, allocate per cpu buffers to
1955 * write the event data into, and if the event is filtered and discarded
1956 * it is simply dropped, otherwise, the entire data is to be committed
1959 void trace_buffered_event_enable(void)
1961 struct ring_buffer_event *event;
1965 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
1967 if (trace_buffered_event_ref++)
1970 for_each_tracing_cpu(cpu) {
1971 page = alloc_pages_node(cpu_to_node(cpu),
1972 GFP_KERNEL | __GFP_NORETRY, 0);
1976 event = page_address(page);
1977 memset(event, 0, sizeof(*event));
1979 per_cpu(trace_buffered_event, cpu) = event;
1982 if (cpu == smp_processor_id() &&
1983 this_cpu_read(trace_buffered_event) !=
1984 per_cpu(trace_buffered_event, cpu))
1991 trace_buffered_event_disable();
1994 static void enable_trace_buffered_event(void *data)
1996 /* Probably not needed, but do it anyway */
1998 this_cpu_dec(trace_buffered_event_cnt);
2001 static void disable_trace_buffered_event(void *data)
2003 this_cpu_inc(trace_buffered_event_cnt);
2007 * trace_buffered_event_disable - disable buffering events
2009 * When a filter is removed, it is faster to not use the buffered
2010 * events, and to commit directly into the ring buffer. Free up
2011 * the temp buffers when there are no more users. This requires
2012 * special synchronization with current events.
2014 void trace_buffered_event_disable(void)
2018 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2020 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2023 if (--trace_buffered_event_ref)
2027 /* For each CPU, set the buffer as used. */
2028 smp_call_function_many(tracing_buffer_mask,
2029 disable_trace_buffered_event, NULL, 1);
2032 /* Wait for all current users to finish */
2033 synchronize_sched();
2035 for_each_tracing_cpu(cpu) {
2036 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2037 per_cpu(trace_buffered_event, cpu) = NULL;
2040 * Make sure trace_buffered_event is NULL before clearing
2041 * trace_buffered_event_cnt.
2046 /* Do the work on each cpu */
2047 smp_call_function_many(tracing_buffer_mask,
2048 enable_trace_buffered_event, NULL, 1);
2053 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
2055 __this_cpu_write(trace_cmdline_save, true);
2057 /* If this is the temp buffer, we need to commit fully */
2058 if (this_cpu_read(trace_buffered_event) == event) {
2059 /* Length is in event->array[0] */
2060 ring_buffer_write(buffer, event->array[0], &event->array[1]);
2061 /* Release the temp buffer */
2062 this_cpu_dec(trace_buffered_event_cnt);
2064 ring_buffer_unlock_commit(buffer, event);
2067 static struct ring_buffer *temp_buffer;
2069 struct ring_buffer_event *
2070 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
2071 struct trace_event_file *trace_file,
2072 int type, unsigned long len,
2073 unsigned long flags, int pc)
2075 struct ring_buffer_event *entry;
2078 *current_rb = trace_file->tr->trace_buffer.buffer;
2080 if ((trace_file->flags &
2081 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2082 (entry = this_cpu_read(trace_buffered_event))) {
2083 /* Try to use the per cpu buffer first */
2084 val = this_cpu_inc_return(trace_buffered_event_cnt);
2086 trace_event_setup(entry, type, flags, pc);
2087 entry->array[0] = len;
2090 this_cpu_dec(trace_buffered_event_cnt);
2093 entry = trace_buffer_lock_reserve(*current_rb,
2094 type, len, flags, pc);
2096 * If tracing is off, but we have triggers enabled
2097 * we still need to look at the event data. Use the temp_buffer
2098 * to store the trace event for the tigger to use. It's recusive
2099 * safe and will not be recorded anywhere.
2101 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2102 *current_rb = temp_buffer;
2103 entry = trace_buffer_lock_reserve(*current_rb,
2104 type, len, flags, pc);
2108 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2110 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2111 struct ring_buffer *buffer,
2112 struct ring_buffer_event *event,
2113 unsigned long flags, int pc,
2114 struct pt_regs *regs)
2116 __buffer_unlock_commit(buffer, event);
2119 * If regs is not set, then skip the following callers:
2120 * trace_buffer_unlock_commit_regs
2121 * event_trigger_unlock_commit
2122 * trace_event_buffer_commit
2123 * trace_event_raw_event_sched_switch
2124 * Note, we can still get here via blktrace, wakeup tracer
2125 * and mmiotrace, but that's ok if they lose a function or
2126 * two. They are that meaningful.
2128 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
2129 ftrace_trace_userstack(buffer, flags, pc);
2133 trace_process_export(struct trace_export *export,
2134 struct ring_buffer_event *event)
2136 struct trace_entry *entry;
2137 unsigned int size = 0;
2139 entry = ring_buffer_event_data(event);
2140 size = ring_buffer_event_length(event);
2141 export->write(entry, size);
2144 static DEFINE_MUTEX(ftrace_export_lock);
2146 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2148 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2150 static inline void ftrace_exports_enable(void)
2152 static_branch_enable(&ftrace_exports_enabled);
2155 static inline void ftrace_exports_disable(void)
2157 static_branch_disable(&ftrace_exports_enabled);
2160 void ftrace_exports(struct ring_buffer_event *event)
2162 struct trace_export *export;
2164 preempt_disable_notrace();
2166 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2168 trace_process_export(export, event);
2169 export = rcu_dereference_raw_notrace(export->next);
2172 preempt_enable_notrace();
2176 add_trace_export(struct trace_export **list, struct trace_export *export)
2178 rcu_assign_pointer(export->next, *list);
2180 * We are entering export into the list but another
2181 * CPU might be walking that list. We need to make sure
2182 * the export->next pointer is valid before another CPU sees
2183 * the export pointer included into the list.
2185 rcu_assign_pointer(*list, export);
2189 rm_trace_export(struct trace_export **list, struct trace_export *export)
2191 struct trace_export **p;
2193 for (p = list; *p != NULL; p = &(*p)->next)
2200 rcu_assign_pointer(*p, (*p)->next);
2206 add_ftrace_export(struct trace_export **list, struct trace_export *export)
2209 ftrace_exports_enable();
2211 add_trace_export(list, export);
2215 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2219 ret = rm_trace_export(list, export);
2221 ftrace_exports_disable();
2226 int register_ftrace_export(struct trace_export *export)
2228 if (WARN_ON_ONCE(!export->write))
2231 mutex_lock(&ftrace_export_lock);
2233 add_ftrace_export(&ftrace_exports_list, export);
2235 mutex_unlock(&ftrace_export_lock);
2239 EXPORT_SYMBOL_GPL(register_ftrace_export);
2241 int unregister_ftrace_export(struct trace_export *export)
2245 mutex_lock(&ftrace_export_lock);
2247 ret = rm_ftrace_export(&ftrace_exports_list, export);
2249 mutex_unlock(&ftrace_export_lock);
2253 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2256 trace_function(struct trace_array *tr,
2257 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2260 struct trace_event_call *call = &event_function;
2261 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2262 struct ring_buffer_event *event;
2263 struct ftrace_entry *entry;
2265 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2269 entry = ring_buffer_event_data(event);
2271 entry->parent_ip = parent_ip;
2273 if (!call_filter_check_discard(call, entry, buffer, event)) {
2274 if (static_branch_unlikely(&ftrace_exports_enabled))
2275 ftrace_exports(event);
2276 __buffer_unlock_commit(buffer, event);
2280 #ifdef CONFIG_STACKTRACE
2282 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2283 struct ftrace_stack {
2284 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2287 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2288 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2290 static void __ftrace_trace_stack(struct ring_buffer *buffer,
2291 unsigned long flags,
2292 int skip, int pc, struct pt_regs *regs)
2294 struct trace_event_call *call = &event_kernel_stack;
2295 struct ring_buffer_event *event;
2296 struct stack_entry *entry;
2297 struct stack_trace trace;
2299 int size = FTRACE_STACK_ENTRIES;
2301 trace.nr_entries = 0;
2305 * Add two, for this function and the call to save_stack_trace()
2306 * If regs is set, then these functions will not be in the way.
2312 * Since events can happen in NMIs there's no safe way to
2313 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2314 * or NMI comes in, it will just have to use the default
2315 * FTRACE_STACK_SIZE.
2317 preempt_disable_notrace();
2319 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
2321 * We don't need any atomic variables, just a barrier.
2322 * If an interrupt comes in, we don't care, because it would
2323 * have exited and put the counter back to what we want.
2324 * We just need a barrier to keep gcc from moving things
2328 if (use_stack == 1) {
2329 trace.entries = this_cpu_ptr(ftrace_stack.calls);
2330 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2333 save_stack_trace_regs(regs, &trace);
2335 save_stack_trace(&trace);
2337 if (trace.nr_entries > size)
2338 size = trace.nr_entries;
2340 /* From now on, use_stack is a boolean */
2343 size *= sizeof(unsigned long);
2345 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
2346 sizeof(*entry) + size, flags, pc);
2349 entry = ring_buffer_event_data(event);
2351 memset(&entry->caller, 0, size);
2354 memcpy(&entry->caller, trace.entries,
2355 trace.nr_entries * sizeof(unsigned long));
2357 trace.max_entries = FTRACE_STACK_ENTRIES;
2358 trace.entries = entry->caller;
2360 save_stack_trace_regs(regs, &trace);
2362 save_stack_trace(&trace);
2365 entry->size = trace.nr_entries;
2367 if (!call_filter_check_discard(call, entry, buffer, event))
2368 __buffer_unlock_commit(buffer, event);
2371 /* Again, don't let gcc optimize things here */
2373 __this_cpu_dec(ftrace_stack_reserve);
2374 preempt_enable_notrace();
2378 static inline void ftrace_trace_stack(struct trace_array *tr,
2379 struct ring_buffer *buffer,
2380 unsigned long flags,
2381 int skip, int pc, struct pt_regs *regs)
2383 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2386 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
2389 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2392 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
2396 * trace_dump_stack - record a stack back trace in the trace buffer
2397 * @skip: Number of functions to skip (helper handlers)
2399 void trace_dump_stack(int skip)
2401 unsigned long flags;
2403 if (tracing_disabled || tracing_selftest_running)
2406 local_save_flags(flags);
2409 * Skip 3 more, seems to get us at the caller of
2413 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2414 flags, skip, preempt_count(), NULL);
2417 static DEFINE_PER_CPU(int, user_stack_count);
2420 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
2422 struct trace_event_call *call = &event_user_stack;
2423 struct ring_buffer_event *event;
2424 struct userstack_entry *entry;
2425 struct stack_trace trace;
2427 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
2431 * NMIs can not handle page faults, even with fix ups.
2432 * The save user stack can (and often does) fault.
2434 if (unlikely(in_nmi()))
2438 * prevent recursion, since the user stack tracing may
2439 * trigger other kernel events.
2442 if (__this_cpu_read(user_stack_count))
2445 __this_cpu_inc(user_stack_count);
2447 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2448 sizeof(*entry), flags, pc);
2450 goto out_drop_count;
2451 entry = ring_buffer_event_data(event);
2453 entry->tgid = current->tgid;
2454 memset(&entry->caller, 0, sizeof(entry->caller));
2456 trace.nr_entries = 0;
2457 trace.max_entries = FTRACE_STACK_ENTRIES;
2459 trace.entries = entry->caller;
2461 save_stack_trace_user(&trace);
2462 if (!call_filter_check_discard(call, entry, buffer, event))
2463 __buffer_unlock_commit(buffer, event);
2466 __this_cpu_dec(user_stack_count);
2472 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
2474 ftrace_trace_userstack(tr, flags, preempt_count());
2478 #endif /* CONFIG_STACKTRACE */
2480 /* created for use with alloc_percpu */
2481 struct trace_buffer_struct {
2483 char buffer[4][TRACE_BUF_SIZE];
2486 static struct trace_buffer_struct *trace_percpu_buffer;
2489 * Thise allows for lockless recording. If we're nested too deeply, then
2490 * this returns NULL.
2492 static char *get_trace_buf(void)
2494 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
2496 if (!buffer || buffer->nesting >= 4)
2499 return &buffer->buffer[buffer->nesting++][0];
2502 static void put_trace_buf(void)
2504 this_cpu_dec(trace_percpu_buffer->nesting);
2507 static int alloc_percpu_trace_buffer(void)
2509 struct trace_buffer_struct *buffers;
2511 buffers = alloc_percpu(struct trace_buffer_struct);
2512 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2515 trace_percpu_buffer = buffers;
2519 static int buffers_allocated;
2521 void trace_printk_init_buffers(void)
2523 if (buffers_allocated)
2526 if (alloc_percpu_trace_buffer())
2529 /* trace_printk() is for debug use only. Don't use it in production. */
2532 pr_warn("**********************************************************\n");
2533 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2535 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2537 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2538 pr_warn("** unsafe for production use. **\n");
2540 pr_warn("** If you see this message and you are not debugging **\n");
2541 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2543 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2544 pr_warn("**********************************************************\n");
2546 /* Expand the buffers to set size */
2547 tracing_update_buffers();
2549 buffers_allocated = 1;
2552 * trace_printk_init_buffers() can be called by modules.
2553 * If that happens, then we need to start cmdline recording
2554 * directly here. If the global_trace.buffer is already
2555 * allocated here, then this was called by module code.
2557 if (global_trace.trace_buffer.buffer)
2558 tracing_start_cmdline_record();
2561 void trace_printk_start_comm(void)
2563 /* Start tracing comms if trace printk is set */
2564 if (!buffers_allocated)
2566 tracing_start_cmdline_record();
2569 static void trace_printk_start_stop_comm(int enabled)
2571 if (!buffers_allocated)
2575 tracing_start_cmdline_record();
2577 tracing_stop_cmdline_record();
2581 * trace_vbprintk - write binary msg to tracing buffer
2584 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2586 struct trace_event_call *call = &event_bprint;
2587 struct ring_buffer_event *event;
2588 struct ring_buffer *buffer;
2589 struct trace_array *tr = &global_trace;
2590 struct bprint_entry *entry;
2591 unsigned long flags;
2593 int len = 0, size, pc;
2595 if (unlikely(tracing_selftest_running || tracing_disabled))
2598 /* Don't pollute graph traces with trace_vprintk internals */
2599 pause_graph_tracing();
2601 pc = preempt_count();
2602 preempt_disable_notrace();
2604 tbuffer = get_trace_buf();
2610 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2612 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2615 local_save_flags(flags);
2616 size = sizeof(*entry) + sizeof(u32) * len;
2617 buffer = tr->trace_buffer.buffer;
2618 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2622 entry = ring_buffer_event_data(event);
2626 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2627 if (!call_filter_check_discard(call, entry, buffer, event)) {
2628 __buffer_unlock_commit(buffer, event);
2629 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2636 preempt_enable_notrace();
2637 unpause_graph_tracing();
2641 EXPORT_SYMBOL_GPL(trace_vbprintk);
2644 __trace_array_vprintk(struct ring_buffer *buffer,
2645 unsigned long ip, const char *fmt, va_list args)
2647 struct trace_event_call *call = &event_print;
2648 struct ring_buffer_event *event;
2649 int len = 0, size, pc;
2650 struct print_entry *entry;
2651 unsigned long flags;
2654 if (tracing_disabled || tracing_selftest_running)
2657 /* Don't pollute graph traces with trace_vprintk internals */
2658 pause_graph_tracing();
2660 pc = preempt_count();
2661 preempt_disable_notrace();
2664 tbuffer = get_trace_buf();
2670 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2672 local_save_flags(flags);
2673 size = sizeof(*entry) + len + 1;
2674 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2678 entry = ring_buffer_event_data(event);
2681 memcpy(&entry->buf, tbuffer, len + 1);
2682 if (!call_filter_check_discard(call, entry, buffer, event)) {
2683 __buffer_unlock_commit(buffer, event);
2684 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
2691 preempt_enable_notrace();
2692 unpause_graph_tracing();
2697 int trace_array_vprintk(struct trace_array *tr,
2698 unsigned long ip, const char *fmt, va_list args)
2700 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2703 int trace_array_printk(struct trace_array *tr,
2704 unsigned long ip, const char *fmt, ...)
2709 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2713 ret = trace_array_vprintk(tr, ip, fmt, ap);
2718 int trace_array_printk_buf(struct ring_buffer *buffer,
2719 unsigned long ip, const char *fmt, ...)
2724 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2728 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2733 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2735 return trace_array_vprintk(&global_trace, ip, fmt, args);
2737 EXPORT_SYMBOL_GPL(trace_vprintk);
2739 static void trace_iterator_increment(struct trace_iterator *iter)
2741 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2745 ring_buffer_read(buf_iter, NULL);
2748 static struct trace_entry *
2749 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2750 unsigned long *lost_events)
2752 struct ring_buffer_event *event;
2753 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2756 event = ring_buffer_iter_peek(buf_iter, ts);
2758 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2762 iter->ent_size = ring_buffer_event_length(event);
2763 return ring_buffer_event_data(event);
2769 static struct trace_entry *
2770 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2771 unsigned long *missing_events, u64 *ent_ts)
2773 struct ring_buffer *buffer = iter->trace_buffer->buffer;
2774 struct trace_entry *ent, *next = NULL;
2775 unsigned long lost_events = 0, next_lost = 0;
2776 int cpu_file = iter->cpu_file;
2777 u64 next_ts = 0, ts;
2783 * If we are in a per_cpu trace file, don't bother by iterating over
2784 * all cpu and peek directly.
2786 if (cpu_file > RING_BUFFER_ALL_CPUS) {
2787 if (ring_buffer_empty_cpu(buffer, cpu_file))
2789 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2791 *ent_cpu = cpu_file;
2796 for_each_tracing_cpu(cpu) {
2798 if (ring_buffer_empty_cpu(buffer, cpu))
2801 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2804 * Pick the entry with the smallest timestamp:
2806 if (ent && (!next || ts < next_ts)) {
2810 next_lost = lost_events;
2811 next_size = iter->ent_size;
2815 iter->ent_size = next_size;
2818 *ent_cpu = next_cpu;
2824 *missing_events = next_lost;
2829 /* Find the next real entry, without updating the iterator itself */
2830 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2831 int *ent_cpu, u64 *ent_ts)
2833 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2836 /* Find the next real entry, and increment the iterator to the next entry */
2837 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2839 iter->ent = __find_next_entry(iter, &iter->cpu,
2840 &iter->lost_events, &iter->ts);
2843 trace_iterator_increment(iter);
2845 return iter->ent ? iter : NULL;
2848 static void trace_consume(struct trace_iterator *iter)
2850 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2851 &iter->lost_events);
2854 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2856 struct trace_iterator *iter = m->private;
2860 WARN_ON_ONCE(iter->leftover);
2864 /* can't go backwards */
2869 ent = trace_find_next_entry_inc(iter);
2873 while (ent && iter->idx < i)
2874 ent = trace_find_next_entry_inc(iter);
2881 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2883 struct ring_buffer_event *event;
2884 struct ring_buffer_iter *buf_iter;
2885 unsigned long entries = 0;
2888 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2890 buf_iter = trace_buffer_iter(iter, cpu);
2894 ring_buffer_iter_reset(buf_iter);
2897 * We could have the case with the max latency tracers
2898 * that a reset never took place on a cpu. This is evident
2899 * by the timestamp being before the start of the buffer.
2901 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2902 if (ts >= iter->trace_buffer->time_start)
2905 ring_buffer_read(buf_iter, NULL);
2908 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2912 * The current tracer is copied to avoid a global locking
2915 static void *s_start(struct seq_file *m, loff_t *pos)
2917 struct trace_iterator *iter = m->private;
2918 struct trace_array *tr = iter->tr;
2919 int cpu_file = iter->cpu_file;
2925 * copy the tracer to avoid using a global lock all around.
2926 * iter->trace is a copy of current_trace, the pointer to the
2927 * name may be used instead of a strcmp(), as iter->trace->name
2928 * will point to the same string as current_trace->name.
2930 mutex_lock(&trace_types_lock);
2931 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2932 *iter->trace = *tr->current_trace;
2933 mutex_unlock(&trace_types_lock);
2935 #ifdef CONFIG_TRACER_MAX_TRACE
2936 if (iter->snapshot && iter->trace->use_max_tr)
2937 return ERR_PTR(-EBUSY);
2940 if (!iter->snapshot)
2941 atomic_inc(&trace_record_cmdline_disabled);
2943 if (*pos != iter->pos) {
2948 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2949 for_each_tracing_cpu(cpu)
2950 tracing_iter_reset(iter, cpu);
2952 tracing_iter_reset(iter, cpu_file);
2955 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2960 * If we overflowed the seq_file before, then we want
2961 * to just reuse the trace_seq buffer again.
2967 p = s_next(m, p, &l);
2971 trace_event_read_lock();
2972 trace_access_lock(cpu_file);
2976 static void s_stop(struct seq_file *m, void *p)
2978 struct trace_iterator *iter = m->private;
2980 #ifdef CONFIG_TRACER_MAX_TRACE
2981 if (iter->snapshot && iter->trace->use_max_tr)
2985 if (!iter->snapshot)
2986 atomic_dec(&trace_record_cmdline_disabled);
2988 trace_access_unlock(iter->cpu_file);
2989 trace_event_read_unlock();
2993 get_total_entries(struct trace_buffer *buf,
2994 unsigned long *total, unsigned long *entries)
2996 unsigned long count;
3002 for_each_tracing_cpu(cpu) {
3003 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3005 * If this buffer has skipped entries, then we hold all
3006 * entries for the trace and we need to ignore the
3007 * ones before the time stamp.
3009 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3010 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3011 /* total is the same as the entries */
3015 ring_buffer_overrun_cpu(buf->buffer, cpu);
3020 static void print_lat_help_header(struct seq_file *m)
3022 seq_puts(m, "# _------=> CPU# \n"
3023 "# / _-----=> irqs-off \n"
3024 "# | / _----=> need-resched \n"
3025 "# || / _---=> hardirq/softirq \n"
3026 "# ||| / _--=> preempt-depth \n"
3028 "# cmd pid ||||| time | caller \n"
3029 "# \\ / ||||| \\ | / \n");
3032 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
3034 unsigned long total;
3035 unsigned long entries;
3037 get_total_entries(buf, &total, &entries);
3038 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3039 entries, total, num_online_cpus());
3043 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
3045 print_event_info(buf, m);
3046 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
3050 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
3052 print_event_info(buf, m);
3053 seq_puts(m, "# _-----=> irqs-off\n"
3054 "# / _----=> need-resched\n"
3055 "# | / _---=> hardirq/softirq\n"
3056 "# || / _--=> preempt-depth\n"
3058 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
3059 "# | | | |||| | |\n");
3063 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3065 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3066 struct trace_buffer *buf = iter->trace_buffer;
3067 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3068 struct tracer *type = iter->trace;
3069 unsigned long entries;
3070 unsigned long total;
3071 const char *name = "preemption";
3075 get_total_entries(buf, &total, &entries);
3077 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3079 seq_puts(m, "# -----------------------------------"
3080 "---------------------------------\n");
3081 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3082 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3083 nsecs_to_usecs(data->saved_latency),
3087 #if defined(CONFIG_PREEMPT_NONE)
3089 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3091 #elif defined(CONFIG_PREEMPT)
3096 /* These are reserved for later use */
3099 seq_printf(m, " #P:%d)\n", num_online_cpus());
3103 seq_puts(m, "# -----------------\n");
3104 seq_printf(m, "# | task: %.16s-%d "
3105 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3106 data->comm, data->pid,
3107 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3108 data->policy, data->rt_priority);
3109 seq_puts(m, "# -----------------\n");
3111 if (data->critical_start) {
3112 seq_puts(m, "# => started at: ");
3113 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3114 trace_print_seq(m, &iter->seq);
3115 seq_puts(m, "\n# => ended at: ");
3116 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3117 trace_print_seq(m, &iter->seq);
3118 seq_puts(m, "\n#\n");
3124 static void test_cpu_buff_start(struct trace_iterator *iter)
3126 struct trace_seq *s = &iter->seq;
3127 struct trace_array *tr = iter->tr;
3129 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3132 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3135 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
3138 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
3142 cpumask_set_cpu(iter->cpu, iter->started);
3144 /* Don't print started cpu buffer for the first entry of the trace */
3146 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3150 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3152 struct trace_array *tr = iter->tr;
3153 struct trace_seq *s = &iter->seq;
3154 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3155 struct trace_entry *entry;
3156 struct trace_event *event;
3160 test_cpu_buff_start(iter);
3162 event = ftrace_find_event(entry->type);
3164 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3165 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3166 trace_print_lat_context(iter);
3168 trace_print_context(iter);
3171 if (trace_seq_has_overflowed(s))
3172 return TRACE_TYPE_PARTIAL_LINE;
3175 return event->funcs->trace(iter, sym_flags, event);
3177 trace_seq_printf(s, "Unknown type %d\n", entry->type);
3179 return trace_handle_return(s);
3182 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3184 struct trace_array *tr = iter->tr;
3185 struct trace_seq *s = &iter->seq;
3186 struct trace_entry *entry;
3187 struct trace_event *event;
3191 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3192 trace_seq_printf(s, "%d %d %llu ",
3193 entry->pid, iter->cpu, iter->ts);
3195 if (trace_seq_has_overflowed(s))
3196 return TRACE_TYPE_PARTIAL_LINE;
3198 event = ftrace_find_event(entry->type);
3200 return event->funcs->raw(iter, 0, event);
3202 trace_seq_printf(s, "%d ?\n", entry->type);
3204 return trace_handle_return(s);
3207 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3209 struct trace_array *tr = iter->tr;
3210 struct trace_seq *s = &iter->seq;
3211 unsigned char newline = '\n';
3212 struct trace_entry *entry;
3213 struct trace_event *event;
3217 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3218 SEQ_PUT_HEX_FIELD(s, entry->pid);
3219 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3220 SEQ_PUT_HEX_FIELD(s, iter->ts);
3221 if (trace_seq_has_overflowed(s))
3222 return TRACE_TYPE_PARTIAL_LINE;
3225 event = ftrace_find_event(entry->type);
3227 enum print_line_t ret = event->funcs->hex(iter, 0, event);
3228 if (ret != TRACE_TYPE_HANDLED)
3232 SEQ_PUT_FIELD(s, newline);
3234 return trace_handle_return(s);
3237 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
3239 struct trace_array *tr = iter->tr;
3240 struct trace_seq *s = &iter->seq;
3241 struct trace_entry *entry;
3242 struct trace_event *event;
3246 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3247 SEQ_PUT_FIELD(s, entry->pid);
3248 SEQ_PUT_FIELD(s, iter->cpu);
3249 SEQ_PUT_FIELD(s, iter->ts);
3250 if (trace_seq_has_overflowed(s))
3251 return TRACE_TYPE_PARTIAL_LINE;
3254 event = ftrace_find_event(entry->type);
3255 return event ? event->funcs->binary(iter, 0, event) :
3259 int trace_empty(struct trace_iterator *iter)
3261 struct ring_buffer_iter *buf_iter;
3264 /* If we are looking at one CPU buffer, only check that one */
3265 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
3266 cpu = iter->cpu_file;
3267 buf_iter = trace_buffer_iter(iter, cpu);
3269 if (!ring_buffer_iter_empty(buf_iter))
3272 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3278 for_each_tracing_cpu(cpu) {
3279 buf_iter = trace_buffer_iter(iter, cpu);
3281 if (!ring_buffer_iter_empty(buf_iter))
3284 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3292 /* Called with trace_event_read_lock() held. */
3293 enum print_line_t print_trace_line(struct trace_iterator *iter)
3295 struct trace_array *tr = iter->tr;
3296 unsigned long trace_flags = tr->trace_flags;
3297 enum print_line_t ret;
3299 if (iter->lost_events) {
3300 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3301 iter->cpu, iter->lost_events);
3302 if (trace_seq_has_overflowed(&iter->seq))
3303 return TRACE_TYPE_PARTIAL_LINE;
3306 if (iter->trace && iter->trace->print_line) {
3307 ret = iter->trace->print_line(iter);
3308 if (ret != TRACE_TYPE_UNHANDLED)
3312 if (iter->ent->type == TRACE_BPUTS &&
3313 trace_flags & TRACE_ITER_PRINTK &&
3314 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3315 return trace_print_bputs_msg_only(iter);
3317 if (iter->ent->type == TRACE_BPRINT &&
3318 trace_flags & TRACE_ITER_PRINTK &&
3319 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3320 return trace_print_bprintk_msg_only(iter);
3322 if (iter->ent->type == TRACE_PRINT &&
3323 trace_flags & TRACE_ITER_PRINTK &&
3324 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3325 return trace_print_printk_msg_only(iter);
3327 if (trace_flags & TRACE_ITER_BIN)
3328 return print_bin_fmt(iter);
3330 if (trace_flags & TRACE_ITER_HEX)
3331 return print_hex_fmt(iter);
3333 if (trace_flags & TRACE_ITER_RAW)
3334 return print_raw_fmt(iter);
3336 return print_trace_fmt(iter);
3339 void trace_latency_header(struct seq_file *m)
3341 struct trace_iterator *iter = m->private;
3342 struct trace_array *tr = iter->tr;
3344 /* print nothing if the buffers are empty */
3345 if (trace_empty(iter))
3348 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3349 print_trace_header(m, iter);
3351 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
3352 print_lat_help_header(m);
3355 void trace_default_header(struct seq_file *m)
3357 struct trace_iterator *iter = m->private;
3358 struct trace_array *tr = iter->tr;
3359 unsigned long trace_flags = tr->trace_flags;
3361 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3364 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3365 /* print nothing if the buffers are empty */
3366 if (trace_empty(iter))
3368 print_trace_header(m, iter);
3369 if (!(trace_flags & TRACE_ITER_VERBOSE))
3370 print_lat_help_header(m);
3372 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3373 if (trace_flags & TRACE_ITER_IRQ_INFO)
3374 print_func_help_header_irq(iter->trace_buffer, m);
3376 print_func_help_header(iter->trace_buffer, m);
3381 static void test_ftrace_alive(struct seq_file *m)
3383 if (!ftrace_is_dead())
3385 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3386 "# MAY BE MISSING FUNCTION EVENTS\n");
3389 #ifdef CONFIG_TRACER_MAX_TRACE
3390 static void show_snapshot_main_help(struct seq_file *m)
3392 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3393 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3394 "# Takes a snapshot of the main buffer.\n"
3395 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3396 "# (Doesn't have to be '2' works with any number that\n"
3397 "# is not a '0' or '1')\n");
3400 static void show_snapshot_percpu_help(struct seq_file *m)
3402 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3403 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3404 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3405 "# Takes a snapshot of the main buffer for this cpu.\n");
3407 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3408 "# Must use main snapshot file to allocate.\n");
3410 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3411 "# (Doesn't have to be '2' works with any number that\n"
3412 "# is not a '0' or '1')\n");
3415 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3417 if (iter->tr->allocated_snapshot)
3418 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
3420 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
3422 seq_puts(m, "# Snapshot commands:\n");
3423 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3424 show_snapshot_main_help(m);
3426 show_snapshot_percpu_help(m);
3429 /* Should never be called */
3430 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3433 static int s_show(struct seq_file *m, void *v)
3435 struct trace_iterator *iter = v;
3438 if (iter->ent == NULL) {
3440 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3442 test_ftrace_alive(m);
3444 if (iter->snapshot && trace_empty(iter))
3445 print_snapshot_help(m, iter);
3446 else if (iter->trace && iter->trace->print_header)
3447 iter->trace->print_header(m);
3449 trace_default_header(m);
3451 } else if (iter->leftover) {
3453 * If we filled the seq_file buffer earlier, we
3454 * want to just show it now.
3456 ret = trace_print_seq(m, &iter->seq);
3458 /* ret should this time be zero, but you never know */
3459 iter->leftover = ret;
3462 print_trace_line(iter);
3463 ret = trace_print_seq(m, &iter->seq);
3465 * If we overflow the seq_file buffer, then it will
3466 * ask us for this data again at start up.
3468 * ret is 0 if seq_file write succeeded.
3471 iter->leftover = ret;
3478 * Should be used after trace_array_get(), trace_types_lock
3479 * ensures that i_cdev was already initialized.
3481 static inline int tracing_get_cpu(struct inode *inode)
3483 if (inode->i_cdev) /* See trace_create_cpu_file() */
3484 return (long)inode->i_cdev - 1;
3485 return RING_BUFFER_ALL_CPUS;
3488 static const struct seq_operations tracer_seq_ops = {
3495 static struct trace_iterator *
3496 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
3498 struct trace_array *tr = inode->i_private;
3499 struct trace_iterator *iter;
3502 if (tracing_disabled)
3503 return ERR_PTR(-ENODEV);
3505 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3507 return ERR_PTR(-ENOMEM);
3509 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3511 if (!iter->buffer_iter)
3515 * We make a copy of the current tracer to avoid concurrent
3516 * changes on it while we are reading.
3518 mutex_lock(&trace_types_lock);
3519 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3523 *iter->trace = *tr->current_trace;
3525 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3530 #ifdef CONFIG_TRACER_MAX_TRACE
3531 /* Currently only the top directory has a snapshot */
3532 if (tr->current_trace->print_max || snapshot)
3533 iter->trace_buffer = &tr->max_buffer;
3536 iter->trace_buffer = &tr->trace_buffer;
3537 iter->snapshot = snapshot;
3539 iter->cpu_file = tracing_get_cpu(inode);
3540 mutex_init(&iter->mutex);
3542 /* Notify the tracer early; before we stop tracing. */
3543 if (iter->trace && iter->trace->open)
3544 iter->trace->open(iter);
3546 /* Annotate start of buffers if we had overruns */
3547 if (ring_buffer_overruns(iter->trace_buffer->buffer))
3548 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3550 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3551 if (trace_clocks[tr->clock_id].in_ns)
3552 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3554 /* stop the trace while dumping if we are not opening "snapshot" */
3555 if (!iter->snapshot)
3556 tracing_stop_tr(tr);
3558 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3559 for_each_tracing_cpu(cpu) {
3560 iter->buffer_iter[cpu] =
3561 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3563 ring_buffer_read_prepare_sync();
3564 for_each_tracing_cpu(cpu) {
3565 ring_buffer_read_start(iter->buffer_iter[cpu]);
3566 tracing_iter_reset(iter, cpu);
3569 cpu = iter->cpu_file;
3570 iter->buffer_iter[cpu] =
3571 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3572 ring_buffer_read_prepare_sync();
3573 ring_buffer_read_start(iter->buffer_iter[cpu]);
3574 tracing_iter_reset(iter, cpu);
3577 mutex_unlock(&trace_types_lock);
3582 mutex_unlock(&trace_types_lock);
3584 kfree(iter->buffer_iter);
3586 seq_release_private(inode, file);
3587 return ERR_PTR(-ENOMEM);
3590 int tracing_open_generic(struct inode *inode, struct file *filp)
3592 if (tracing_disabled)
3595 filp->private_data = inode->i_private;
3599 bool tracing_is_disabled(void)
3601 return (tracing_disabled) ? true: false;
3605 * Open and update trace_array ref count.
3606 * Must have the current trace_array passed to it.
3608 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3610 struct trace_array *tr = inode->i_private;
3612 if (tracing_disabled)
3615 if (trace_array_get(tr) < 0)
3618 filp->private_data = inode->i_private;
3623 static int tracing_release(struct inode *inode, struct file *file)
3625 struct trace_array *tr = inode->i_private;
3626 struct seq_file *m = file->private_data;
3627 struct trace_iterator *iter;
3630 if (!(file->f_mode & FMODE_READ)) {
3631 trace_array_put(tr);
3635 /* Writes do not use seq_file */
3637 mutex_lock(&trace_types_lock);
3639 for_each_tracing_cpu(cpu) {
3640 if (iter->buffer_iter[cpu])
3641 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3644 if (iter->trace && iter->trace->close)
3645 iter->trace->close(iter);
3647 if (!iter->snapshot)
3648 /* reenable tracing if it was previously enabled */
3649 tracing_start_tr(tr);
3651 __trace_array_put(tr);
3653 mutex_unlock(&trace_types_lock);
3655 mutex_destroy(&iter->mutex);
3656 free_cpumask_var(iter->started);
3658 kfree(iter->buffer_iter);
3659 seq_release_private(inode, file);
3664 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3666 struct trace_array *tr = inode->i_private;
3668 trace_array_put(tr);
3672 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3674 struct trace_array *tr = inode->i_private;
3676 trace_array_put(tr);
3678 return single_release(inode, file);
3681 static int tracing_open(struct inode *inode, struct file *file)
3683 struct trace_array *tr = inode->i_private;
3684 struct trace_iterator *iter;
3687 if (trace_array_get(tr) < 0)
3690 /* If this file was open for write, then erase contents */
3691 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3692 int cpu = tracing_get_cpu(inode);
3694 if (cpu == RING_BUFFER_ALL_CPUS)
3695 tracing_reset_online_cpus(&tr->trace_buffer);
3697 tracing_reset(&tr->trace_buffer, cpu);
3700 if (file->f_mode & FMODE_READ) {
3701 iter = __tracing_open(inode, file, false);
3703 ret = PTR_ERR(iter);
3704 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
3705 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3709 trace_array_put(tr);
3715 * Some tracers are not suitable for instance buffers.
3716 * A tracer is always available for the global array (toplevel)
3717 * or if it explicitly states that it is.
3720 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3722 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3725 /* Find the next tracer that this trace array may use */
3726 static struct tracer *
3727 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3729 while (t && !trace_ok_for_array(t, tr))
3736 t_next(struct seq_file *m, void *v, loff_t *pos)
3738 struct trace_array *tr = m->private;
3739 struct tracer *t = v;
3744 t = get_tracer_for_array(tr, t->next);
3749 static void *t_start(struct seq_file *m, loff_t *pos)
3751 struct trace_array *tr = m->private;
3755 mutex_lock(&trace_types_lock);
3757 t = get_tracer_for_array(tr, trace_types);
3758 for (; t && l < *pos; t = t_next(m, t, &l))
3764 static void t_stop(struct seq_file *m, void *p)
3766 mutex_unlock(&trace_types_lock);
3769 static int t_show(struct seq_file *m, void *v)
3771 struct tracer *t = v;
3776 seq_puts(m, t->name);
3785 static const struct seq_operations show_traces_seq_ops = {
3792 static int show_traces_open(struct inode *inode, struct file *file)
3794 struct trace_array *tr = inode->i_private;
3798 if (tracing_disabled)
3801 ret = seq_open(file, &show_traces_seq_ops);
3805 m = file->private_data;
3812 tracing_write_stub(struct file *filp, const char __user *ubuf,
3813 size_t count, loff_t *ppos)
3818 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3822 if (file->f_mode & FMODE_READ)
3823 ret = seq_lseek(file, offset, whence);
3825 file->f_pos = ret = 0;
3830 static const struct file_operations tracing_fops = {
3831 .open = tracing_open,
3833 .write = tracing_write_stub,
3834 .llseek = tracing_lseek,
3835 .release = tracing_release,
3838 static const struct file_operations show_traces_fops = {
3839 .open = show_traces_open,
3841 .release = seq_release,
3842 .llseek = seq_lseek,
3846 * The tracer itself will not take this lock, but still we want
3847 * to provide a consistent cpumask to user-space:
3849 static DEFINE_MUTEX(tracing_cpumask_update_lock);
3852 * Temporary storage for the character representation of the
3853 * CPU bitmask (and one more byte for the newline):
3855 static char mask_str[NR_CPUS + 1];
3858 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3859 size_t count, loff_t *ppos)
3861 struct trace_array *tr = file_inode(filp)->i_private;
3864 mutex_lock(&tracing_cpumask_update_lock);
3866 len = snprintf(mask_str, count, "%*pb\n",
3867 cpumask_pr_args(tr->tracing_cpumask));
3872 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3875 mutex_unlock(&tracing_cpumask_update_lock);
3881 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3882 size_t count, loff_t *ppos)
3884 struct trace_array *tr = file_inode(filp)->i_private;
3885 cpumask_var_t tracing_cpumask_new;
3888 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3891 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3895 mutex_lock(&tracing_cpumask_update_lock);
3897 local_irq_disable();
3898 arch_spin_lock(&tr->max_lock);
3899 for_each_tracing_cpu(cpu) {
3901 * Increase/decrease the disabled counter if we are
3902 * about to flip a bit in the cpumask:
3904 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3905 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3906 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3907 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3909 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3910 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3911 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3912 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3915 arch_spin_unlock(&tr->max_lock);
3918 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3920 mutex_unlock(&tracing_cpumask_update_lock);
3921 free_cpumask_var(tracing_cpumask_new);
3926 free_cpumask_var(tracing_cpumask_new);
3931 static const struct file_operations tracing_cpumask_fops = {
3932 .open = tracing_open_generic_tr,
3933 .read = tracing_cpumask_read,
3934 .write = tracing_cpumask_write,
3935 .release = tracing_release_generic_tr,
3936 .llseek = generic_file_llseek,
3939 static int tracing_trace_options_show(struct seq_file *m, void *v)
3941 struct tracer_opt *trace_opts;
3942 struct trace_array *tr = m->private;
3946 mutex_lock(&trace_types_lock);
3947 tracer_flags = tr->current_trace->flags->val;
3948 trace_opts = tr->current_trace->flags->opts;
3950 for (i = 0; trace_options[i]; i++) {
3951 if (tr->trace_flags & (1 << i))
3952 seq_printf(m, "%s\n", trace_options[i]);
3954 seq_printf(m, "no%s\n", trace_options[i]);
3957 for (i = 0; trace_opts[i].name; i++) {
3958 if (tracer_flags & trace_opts[i].bit)
3959 seq_printf(m, "%s\n", trace_opts[i].name);
3961 seq_printf(m, "no%s\n", trace_opts[i].name);
3963 mutex_unlock(&trace_types_lock);
3968 static int __set_tracer_option(struct trace_array *tr,
3969 struct tracer_flags *tracer_flags,
3970 struct tracer_opt *opts, int neg)
3972 struct tracer *trace = tracer_flags->trace;
3975 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3980 tracer_flags->val &= ~opts->bit;
3982 tracer_flags->val |= opts->bit;
3986 /* Try to assign a tracer specific option */
3987 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3989 struct tracer *trace = tr->current_trace;
3990 struct tracer_flags *tracer_flags = trace->flags;
3991 struct tracer_opt *opts = NULL;
3994 for (i = 0; tracer_flags->opts[i].name; i++) {
3995 opts = &tracer_flags->opts[i];
3997 if (strcmp(cmp, opts->name) == 0)
3998 return __set_tracer_option(tr, trace->flags, opts, neg);
4004 /* Some tracers require overwrite to stay enabled */
4005 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4007 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4013 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4015 /* do nothing if flag is already set */
4016 if (!!(tr->trace_flags & mask) == !!enabled)
4019 /* Give the tracer a chance to approve the change */
4020 if (tr->current_trace->flag_changed)
4021 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4025 tr->trace_flags |= mask;
4027 tr->trace_flags &= ~mask;
4029 if (mask == TRACE_ITER_RECORD_CMD)
4030 trace_event_enable_cmd_record(enabled);
4032 if (mask == TRACE_ITER_EVENT_FORK)
4033 trace_event_follow_fork(tr, enabled);
4035 if (mask == TRACE_ITER_OVERWRITE) {
4036 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
4037 #ifdef CONFIG_TRACER_MAX_TRACE
4038 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4042 if (mask == TRACE_ITER_PRINTK) {
4043 trace_printk_start_stop_comm(enabled);
4044 trace_printk_control(enabled);
4050 static int trace_set_options(struct trace_array *tr, char *option)
4056 size_t orig_len = strlen(option);
4058 cmp = strstrip(option);
4060 if (strncmp(cmp, "no", 2) == 0) {
4065 mutex_lock(&trace_types_lock);
4067 for (i = 0; trace_options[i]; i++) {
4068 if (strcmp(cmp, trace_options[i]) == 0) {
4069 ret = set_tracer_flag(tr, 1 << i, !neg);
4074 /* If no option could be set, test the specific tracer options */
4075 if (!trace_options[i])
4076 ret = set_tracer_option(tr, cmp, neg);
4078 mutex_unlock(&trace_types_lock);
4081 * If the first trailing whitespace is replaced with '\0' by strstrip,
4082 * turn it back into a space.
4084 if (orig_len > strlen(option))
4085 option[strlen(option)] = ' ';
4090 static void __init apply_trace_boot_options(void)
4092 char *buf = trace_boot_options_buf;
4096 option = strsep(&buf, ",");
4102 trace_set_options(&global_trace, option);
4104 /* Put back the comma to allow this to be called again */
4111 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4112 size_t cnt, loff_t *ppos)
4114 struct seq_file *m = filp->private_data;
4115 struct trace_array *tr = m->private;
4119 if (cnt >= sizeof(buf))
4122 if (copy_from_user(buf, ubuf, cnt))
4127 ret = trace_set_options(tr, buf);
4136 static int tracing_trace_options_open(struct inode *inode, struct file *file)
4138 struct trace_array *tr = inode->i_private;
4141 if (tracing_disabled)
4144 if (trace_array_get(tr) < 0)
4147 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4149 trace_array_put(tr);
4154 static const struct file_operations tracing_iter_fops = {
4155 .open = tracing_trace_options_open,
4157 .llseek = seq_lseek,
4158 .release = tracing_single_release_tr,
4159 .write = tracing_trace_options_write,
4162 static const char readme_msg[] =
4163 "tracing mini-HOWTO:\n\n"
4164 "# echo 0 > tracing_on : quick way to disable tracing\n"
4165 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4166 " Important files:\n"
4167 " trace\t\t\t- The static contents of the buffer\n"
4168 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4169 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4170 " current_tracer\t- function and latency tracers\n"
4171 " available_tracers\t- list of configured tracers for current_tracer\n"
4172 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4173 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4174 " trace_clock\t\t-change the clock used to order events\n"
4175 " local: Per cpu clock but may not be synced across CPUs\n"
4176 " global: Synced across CPUs but slows tracing down.\n"
4177 " counter: Not a clock, but just an increment\n"
4178 " uptime: Jiffy counter from time of boot\n"
4179 " perf: Same clock that perf events use\n"
4180 #ifdef CONFIG_X86_64
4181 " x86-tsc: TSC cycle counter\n"
4183 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4184 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4185 " tracing_cpumask\t- Limit which CPUs to trace\n"
4186 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4187 "\t\t\t Remove sub-buffer with rmdir\n"
4188 " trace_options\t\t- Set format or modify how tracing happens\n"
4189 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4190 "\t\t\t option name\n"
4191 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4192 #ifdef CONFIG_DYNAMIC_FTRACE
4193 "\n available_filter_functions - list of functions that can be filtered on\n"
4194 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4195 "\t\t\t functions\n"
4196 "\t accepts: func_full_name or glob-matching-pattern\n"
4197 "\t modules: Can select a group via module\n"
4198 "\t Format: :mod:<module-name>\n"
4199 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4200 "\t triggers: a command to perform when function is hit\n"
4201 "\t Format: <function>:<trigger>[:count]\n"
4202 "\t trigger: traceon, traceoff\n"
4203 "\t\t enable_event:<system>:<event>\n"
4204 "\t\t disable_event:<system>:<event>\n"
4205 #ifdef CONFIG_STACKTRACE
4208 #ifdef CONFIG_TRACER_SNAPSHOT
4213 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4214 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4215 "\t The first one will disable tracing every time do_fault is hit\n"
4216 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4217 "\t The first time do trap is hit and it disables tracing, the\n"
4218 "\t counter will decrement to 2. If tracing is already disabled,\n"
4219 "\t the counter will not decrement. It only decrements when the\n"
4220 "\t trigger did work\n"
4221 "\t To remove trigger without count:\n"
4222 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4223 "\t To remove trigger with a count:\n"
4224 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4225 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
4226 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4227 "\t modules: Can select a group via module command :mod:\n"
4228 "\t Does not accept triggers\n"
4229 #endif /* CONFIG_DYNAMIC_FTRACE */
4230 #ifdef CONFIG_FUNCTION_TRACER
4231 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4234 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4235 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4236 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4237 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4239 #ifdef CONFIG_TRACER_SNAPSHOT
4240 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4241 "\t\t\t snapshot buffer. Read the contents for more\n"
4242 "\t\t\t information\n"
4244 #ifdef CONFIG_STACK_TRACER
4245 " stack_trace\t\t- Shows the max stack trace when active\n"
4246 " stack_max_size\t- Shows current max stack size that was traced\n"
4247 "\t\t\t Write into this file to reset the max size (trigger a\n"
4248 "\t\t\t new trace)\n"
4249 #ifdef CONFIG_DYNAMIC_FTRACE
4250 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4253 #endif /* CONFIG_STACK_TRACER */
4254 #ifdef CONFIG_KPROBE_EVENT
4255 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4256 "\t\t\t Write into this file to define/undefine new trace events.\n"
4258 #ifdef CONFIG_UPROBE_EVENT
4259 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4260 "\t\t\t Write into this file to define/undefine new trace events.\n"
4262 #if defined(CONFIG_KPROBE_EVENT) || defined(CONFIG_UPROBE_EVENT)
4263 "\t accepts: event-definitions (one definition per line)\n"
4264 "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
4265 "\t -:[<group>/]<event>\n"
4266 #ifdef CONFIG_KPROBE_EVENT
4267 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4269 #ifdef CONFIG_UPROBE_EVENT
4270 "\t place: <path>:<offset>\n"
4272 "\t args: <name>=fetcharg[:type]\n"
4273 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4274 "\t $stack<index>, $stack, $retval, $comm\n"
4275 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4276 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4278 " events/\t\t- Directory containing all trace event subsystems:\n"
4279 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4280 " events/<system>/\t- Directory containing all trace events for <system>:\n"
4281 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4283 " filter\t\t- If set, only events passing filter are traced\n"
4284 " events/<system>/<event>/\t- Directory containing control files for\n"
4286 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4287 " filter\t\t- If set, only events passing filter are traced\n"
4288 " trigger\t\t- If set, a command to perform when event is hit\n"
4289 "\t Format: <trigger>[:count][if <filter>]\n"
4290 "\t trigger: traceon, traceoff\n"
4291 "\t enable_event:<system>:<event>\n"
4292 "\t disable_event:<system>:<event>\n"
4293 #ifdef CONFIG_HIST_TRIGGERS
4294 "\t enable_hist:<system>:<event>\n"
4295 "\t disable_hist:<system>:<event>\n"
4297 #ifdef CONFIG_STACKTRACE
4300 #ifdef CONFIG_TRACER_SNAPSHOT
4303 #ifdef CONFIG_HIST_TRIGGERS
4304 "\t\t hist (see below)\n"
4306 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4307 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4308 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4309 "\t events/block/block_unplug/trigger\n"
4310 "\t The first disables tracing every time block_unplug is hit.\n"
4311 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4312 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4313 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4314 "\t Like function triggers, the counter is only decremented if it\n"
4315 "\t enabled or disabled tracing.\n"
4316 "\t To remove a trigger without a count:\n"
4317 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4318 "\t To remove a trigger with a count:\n"
4319 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4320 "\t Filters can be ignored when removing a trigger.\n"
4321 #ifdef CONFIG_HIST_TRIGGERS
4322 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
4323 "\t Format: hist:keys=<field1[,field2,...]>\n"
4324 "\t [:values=<field1[,field2,...]>]\n"
4325 "\t [:sort=<field1[,field2,...]>]\n"
4326 "\t [:size=#entries]\n"
4327 "\t [:pause][:continue][:clear]\n"
4328 "\t [:name=histname1]\n"
4329 "\t [if <filter>]\n\n"
4330 "\t When a matching event is hit, an entry is added to a hash\n"
4331 "\t table using the key(s) and value(s) named, and the value of a\n"
4332 "\t sum called 'hitcount' is incremented. Keys and values\n"
4333 "\t correspond to fields in the event's format description. Keys\n"
4334 "\t can be any field, or the special string 'stacktrace'.\n"
4335 "\t Compound keys consisting of up to two fields can be specified\n"
4336 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4337 "\t fields. Sort keys consisting of up to two fields can be\n"
4338 "\t specified using the 'sort' keyword. The sort direction can\n"
4339 "\t be modified by appending '.descending' or '.ascending' to a\n"
4340 "\t sort field. The 'size' parameter can be used to specify more\n"
4341 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4342 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4343 "\t its histogram data will be shared with other triggers of the\n"
4344 "\t same name, and trigger hits will update this common data.\n\n"
4345 "\t Reading the 'hist' file for the event will dump the hash\n"
4346 "\t table in its entirety to stdout. If there are multiple hist\n"
4347 "\t triggers attached to an event, there will be a table for each\n"
4348 "\t trigger in the output. The table displayed for a named\n"
4349 "\t trigger will be the same as any other instance having the\n"
4350 "\t same name. The default format used to display a given field\n"
4351 "\t can be modified by appending any of the following modifiers\n"
4352 "\t to the field name, as applicable:\n\n"
4353 "\t .hex display a number as a hex value\n"
4354 "\t .sym display an address as a symbol\n"
4355 "\t .sym-offset display an address as a symbol and offset\n"
4356 "\t .execname display a common_pid as a program name\n"
4357 "\t .syscall display a syscall id as a syscall name\n\n"
4358 "\t .log2 display log2 value rather than raw number\n\n"
4359 "\t The 'pause' parameter can be used to pause an existing hist\n"
4360 "\t trigger or to start a hist trigger but not log any events\n"
4361 "\t until told to do so. 'continue' can be used to start or\n"
4362 "\t restart a paused hist trigger.\n\n"
4363 "\t The 'clear' parameter will clear the contents of a running\n"
4364 "\t hist trigger and leave its current paused/active state\n"
4366 "\t The enable_hist and disable_hist triggers can be used to\n"
4367 "\t have one event conditionally start and stop another event's\n"
4368 "\t already-attached hist trigger. The syntax is analagous to\n"
4369 "\t the enable_event and disable_event triggers.\n"
4374 tracing_readme_read(struct file *filp, char __user *ubuf,
4375 size_t cnt, loff_t *ppos)
4377 return simple_read_from_buffer(ubuf, cnt, ppos,
4378 readme_msg, strlen(readme_msg));
4381 static const struct file_operations tracing_readme_fops = {
4382 .open = tracing_open_generic,
4383 .read = tracing_readme_read,
4384 .llseek = generic_file_llseek,
4387 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
4389 unsigned int *ptr = v;
4391 if (*pos || m->count)
4396 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4398 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
4407 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4413 arch_spin_lock(&trace_cmdline_lock);
4415 v = &savedcmd->map_cmdline_to_pid[0];
4417 v = saved_cmdlines_next(m, v, &l);
4425 static void saved_cmdlines_stop(struct seq_file *m, void *v)
4427 arch_spin_unlock(&trace_cmdline_lock);
4431 static int saved_cmdlines_show(struct seq_file *m, void *v)
4433 char buf[TASK_COMM_LEN];
4434 unsigned int *pid = v;
4436 __trace_find_cmdline(*pid, buf);
4437 seq_printf(m, "%d %s\n", *pid, buf);
4441 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4442 .start = saved_cmdlines_start,
4443 .next = saved_cmdlines_next,
4444 .stop = saved_cmdlines_stop,
4445 .show = saved_cmdlines_show,
4448 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4450 if (tracing_disabled)
4453 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
4456 static const struct file_operations tracing_saved_cmdlines_fops = {
4457 .open = tracing_saved_cmdlines_open,
4459 .llseek = seq_lseek,
4460 .release = seq_release,
4464 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4465 size_t cnt, loff_t *ppos)
4470 arch_spin_lock(&trace_cmdline_lock);
4471 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
4472 arch_spin_unlock(&trace_cmdline_lock);
4474 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4477 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4479 kfree(s->saved_cmdlines);
4480 kfree(s->map_cmdline_to_pid);
4484 static int tracing_resize_saved_cmdlines(unsigned int val)
4486 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4488 s = kmalloc(sizeof(*s), GFP_KERNEL);
4492 if (allocate_cmdlines_buffer(val, s) < 0) {
4497 arch_spin_lock(&trace_cmdline_lock);
4498 savedcmd_temp = savedcmd;
4500 arch_spin_unlock(&trace_cmdline_lock);
4501 free_saved_cmdlines_buffer(savedcmd_temp);
4507 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4508 size_t cnt, loff_t *ppos)
4513 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4517 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4518 if (!val || val > PID_MAX_DEFAULT)
4521 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4530 static const struct file_operations tracing_saved_cmdlines_size_fops = {
4531 .open = tracing_open_generic,
4532 .read = tracing_saved_cmdlines_size_read,
4533 .write = tracing_saved_cmdlines_size_write,
4536 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
4537 static union trace_enum_map_item *
4538 update_enum_map(union trace_enum_map_item *ptr)
4540 if (!ptr->map.enum_string) {
4541 if (ptr->tail.next) {
4542 ptr = ptr->tail.next;
4543 /* Set ptr to the next real item (skip head) */
4551 static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4553 union trace_enum_map_item *ptr = v;
4556 * Paranoid! If ptr points to end, we don't want to increment past it.
4557 * This really should never happen.
4559 ptr = update_enum_map(ptr);
4560 if (WARN_ON_ONCE(!ptr))
4567 ptr = update_enum_map(ptr);
4572 static void *enum_map_start(struct seq_file *m, loff_t *pos)
4574 union trace_enum_map_item *v;
4577 mutex_lock(&trace_enum_mutex);
4579 v = trace_enum_maps;
4583 while (v && l < *pos) {
4584 v = enum_map_next(m, v, &l);
4590 static void enum_map_stop(struct seq_file *m, void *v)
4592 mutex_unlock(&trace_enum_mutex);
4595 static int enum_map_show(struct seq_file *m, void *v)
4597 union trace_enum_map_item *ptr = v;
4599 seq_printf(m, "%s %ld (%s)\n",
4600 ptr->map.enum_string, ptr->map.enum_value,
4606 static const struct seq_operations tracing_enum_map_seq_ops = {
4607 .start = enum_map_start,
4608 .next = enum_map_next,
4609 .stop = enum_map_stop,
4610 .show = enum_map_show,
4613 static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4615 if (tracing_disabled)
4618 return seq_open(filp, &tracing_enum_map_seq_ops);
4621 static const struct file_operations tracing_enum_map_fops = {
4622 .open = tracing_enum_map_open,
4624 .llseek = seq_lseek,
4625 .release = seq_release,
4628 static inline union trace_enum_map_item *
4629 trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4631 /* Return tail of array given the head */
4632 return ptr + ptr->head.length + 1;
4636 trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4639 struct trace_enum_map **stop;
4640 struct trace_enum_map **map;
4641 union trace_enum_map_item *map_array;
4642 union trace_enum_map_item *ptr;
4647 * The trace_enum_maps contains the map plus a head and tail item,
4648 * where the head holds the module and length of array, and the
4649 * tail holds a pointer to the next list.
4651 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4653 pr_warn("Unable to allocate trace enum mapping\n");
4657 mutex_lock(&trace_enum_mutex);
4659 if (!trace_enum_maps)
4660 trace_enum_maps = map_array;
4662 ptr = trace_enum_maps;
4664 ptr = trace_enum_jmp_to_tail(ptr);
4665 if (!ptr->tail.next)
4667 ptr = ptr->tail.next;
4670 ptr->tail.next = map_array;
4672 map_array->head.mod = mod;
4673 map_array->head.length = len;
4676 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4677 map_array->map = **map;
4680 memset(map_array, 0, sizeof(*map_array));
4682 mutex_unlock(&trace_enum_mutex);
4685 static void trace_create_enum_file(struct dentry *d_tracer)
4687 trace_create_file("enum_map", 0444, d_tracer,
4688 NULL, &tracing_enum_map_fops);
4691 #else /* CONFIG_TRACE_ENUM_MAP_FILE */
4692 static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4693 static inline void trace_insert_enum_map_file(struct module *mod,
4694 struct trace_enum_map **start, int len) { }
4695 #endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4697 static void trace_insert_enum_map(struct module *mod,
4698 struct trace_enum_map **start, int len)
4700 struct trace_enum_map **map;
4707 trace_event_enum_update(map, len);
4709 trace_insert_enum_map_file(mod, start, len);
4713 tracing_set_trace_read(struct file *filp, char __user *ubuf,
4714 size_t cnt, loff_t *ppos)
4716 struct trace_array *tr = filp->private_data;
4717 char buf[MAX_TRACER_SIZE+2];
4720 mutex_lock(&trace_types_lock);
4721 r = sprintf(buf, "%s\n", tr->current_trace->name);
4722 mutex_unlock(&trace_types_lock);
4724 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4727 int tracer_init(struct tracer *t, struct trace_array *tr)
4729 tracing_reset_online_cpus(&tr->trace_buffer);
4733 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
4737 for_each_tracing_cpu(cpu)
4738 per_cpu_ptr(buf->data, cpu)->entries = val;
4741 #ifdef CONFIG_TRACER_MAX_TRACE
4742 /* resize @tr's buffer to the size of @size_tr's entries */
4743 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4744 struct trace_buffer *size_buf, int cpu_id)
4748 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4749 for_each_tracing_cpu(cpu) {
4750 ret = ring_buffer_resize(trace_buf->buffer,
4751 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
4754 per_cpu_ptr(trace_buf->data, cpu)->entries =
4755 per_cpu_ptr(size_buf->data, cpu)->entries;
4758 ret = ring_buffer_resize(trace_buf->buffer,
4759 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
4761 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4762 per_cpu_ptr(size_buf->data, cpu_id)->entries;
4767 #endif /* CONFIG_TRACER_MAX_TRACE */
4769 static int __tracing_resize_ring_buffer(struct trace_array *tr,
4770 unsigned long size, int cpu)
4775 * If kernel or user changes the size of the ring buffer
4776 * we use the size that was given, and we can forget about
4777 * expanding it later.
4779 ring_buffer_expanded = true;
4781 /* May be called before buffers are initialized */
4782 if (!tr->trace_buffer.buffer)
4785 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
4789 #ifdef CONFIG_TRACER_MAX_TRACE
4790 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4791 !tr->current_trace->use_max_tr)
4794 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
4796 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4797 &tr->trace_buffer, cpu);
4800 * AARGH! We are left with different
4801 * size max buffer!!!!
4802 * The max buffer is our "snapshot" buffer.
4803 * When a tracer needs a snapshot (one of the
4804 * latency tracers), it swaps the max buffer
4805 * with the saved snap shot. We succeeded to
4806 * update the size of the main buffer, but failed to
4807 * update the size of the max buffer. But when we tried
4808 * to reset the main buffer to the original size, we
4809 * failed there too. This is very unlikely to
4810 * happen, but if it does, warn and kill all
4814 tracing_disabled = 1;
4819 if (cpu == RING_BUFFER_ALL_CPUS)
4820 set_buffer_entries(&tr->max_buffer, size);
4822 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
4825 #endif /* CONFIG_TRACER_MAX_TRACE */
4827 if (cpu == RING_BUFFER_ALL_CPUS)
4828 set_buffer_entries(&tr->trace_buffer, size);
4830 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
4835 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4836 unsigned long size, int cpu_id)
4840 mutex_lock(&trace_types_lock);
4842 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4843 /* make sure, this cpu is enabled in the mask */
4844 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4850 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4855 mutex_unlock(&trace_types_lock);
4862 * tracing_update_buffers - used by tracing facility to expand ring buffers
4864 * To save on memory when the tracing is never used on a system with it
4865 * configured in. The ring buffers are set to a minimum size. But once
4866 * a user starts to use the tracing facility, then they need to grow
4867 * to their default size.
4869 * This function is to be called when a tracer is about to be used.
4871 int tracing_update_buffers(void)
4875 mutex_lock(&trace_types_lock);
4876 if (!ring_buffer_expanded)
4877 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
4878 RING_BUFFER_ALL_CPUS);
4879 mutex_unlock(&trace_types_lock);
4884 struct trace_option_dentry;
4887 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
4890 * Used to clear out the tracer before deletion of an instance.
4891 * Must have trace_types_lock held.
4893 static void tracing_set_nop(struct trace_array *tr)
4895 if (tr->current_trace == &nop_trace)
4898 tr->current_trace->enabled--;
4900 if (tr->current_trace->reset)
4901 tr->current_trace->reset(tr);
4903 tr->current_trace = &nop_trace;
4906 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
4908 /* Only enable if the directory has been created already. */
4912 create_trace_option_files(tr, t);
4915 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4918 #ifdef CONFIG_TRACER_MAX_TRACE
4923 mutex_lock(&trace_types_lock);
4925 if (!ring_buffer_expanded) {
4926 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
4927 RING_BUFFER_ALL_CPUS);
4933 for (t = trace_types; t; t = t->next) {
4934 if (strcmp(t->name, buf) == 0)
4941 if (t == tr->current_trace)
4944 /* Some tracers are only allowed for the top level buffer */
4945 if (!trace_ok_for_array(t, tr)) {
4950 /* If trace pipe files are being read, we can't change the tracer */
4951 if (tr->current_trace->ref) {
4956 trace_branch_disable();
4958 tr->current_trace->enabled--;
4960 if (tr->current_trace->reset)
4961 tr->current_trace->reset(tr);
4963 /* Current trace needs to be nop_trace before synchronize_sched */
4964 tr->current_trace = &nop_trace;
4966 #ifdef CONFIG_TRACER_MAX_TRACE
4967 had_max_tr = tr->allocated_snapshot;
4969 if (had_max_tr && !t->use_max_tr) {
4971 * We need to make sure that the update_max_tr sees that
4972 * current_trace changed to nop_trace to keep it from
4973 * swapping the buffers after we resize it.
4974 * The update_max_tr is called from interrupts disabled
4975 * so a synchronized_sched() is sufficient.
4977 synchronize_sched();
4982 #ifdef CONFIG_TRACER_MAX_TRACE
4983 if (t->use_max_tr && !had_max_tr) {
4984 ret = alloc_snapshot(tr);
4991 ret = tracer_init(t, tr);
4996 tr->current_trace = t;
4997 tr->current_trace->enabled++;
4998 trace_branch_enable(tr);
5000 mutex_unlock(&trace_types_lock);
5006 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5007 size_t cnt, loff_t *ppos)
5009 struct trace_array *tr = filp->private_data;
5010 char buf[MAX_TRACER_SIZE+1];
5017 if (cnt > MAX_TRACER_SIZE)
5018 cnt = MAX_TRACER_SIZE;
5020 if (copy_from_user(buf, ubuf, cnt))
5025 /* strip ending whitespace. */
5026 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5029 err = tracing_set_tracer(tr, buf);
5039 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5040 size_t cnt, loff_t *ppos)
5045 r = snprintf(buf, sizeof(buf), "%ld\n",
5046 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
5047 if (r > sizeof(buf))
5049 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5053 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5054 size_t cnt, loff_t *ppos)
5059 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5069 tracing_thresh_read(struct file *filp, char __user *ubuf,
5070 size_t cnt, loff_t *ppos)
5072 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5076 tracing_thresh_write(struct file *filp, const char __user *ubuf,
5077 size_t cnt, loff_t *ppos)
5079 struct trace_array *tr = filp->private_data;
5082 mutex_lock(&trace_types_lock);
5083 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5087 if (tr->current_trace->update_thresh) {
5088 ret = tr->current_trace->update_thresh(tr);
5095 mutex_unlock(&trace_types_lock);
5100 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5103 tracing_max_lat_read(struct file *filp, char __user *ubuf,
5104 size_t cnt, loff_t *ppos)
5106 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5110 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5111 size_t cnt, loff_t *ppos)
5113 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5118 static int tracing_open_pipe(struct inode *inode, struct file *filp)
5120 struct trace_array *tr = inode->i_private;
5121 struct trace_iterator *iter;
5124 if (tracing_disabled)
5127 if (trace_array_get(tr) < 0)
5130 mutex_lock(&trace_types_lock);
5132 /* create a buffer to store the information to pass to userspace */
5133 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5136 __trace_array_put(tr);
5140 trace_seq_init(&iter->seq);
5141 iter->trace = tr->current_trace;
5143 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5148 /* trace pipe does not show start of buffer */
5149 cpumask_setall(iter->started);
5151 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5152 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5154 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5155 if (trace_clocks[tr->clock_id].in_ns)
5156 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5159 iter->trace_buffer = &tr->trace_buffer;
5160 iter->cpu_file = tracing_get_cpu(inode);
5161 mutex_init(&iter->mutex);
5162 filp->private_data = iter;
5164 if (iter->trace->pipe_open)
5165 iter->trace->pipe_open(iter);
5167 nonseekable_open(inode, filp);
5169 tr->current_trace->ref++;
5171 mutex_unlock(&trace_types_lock);
5177 __trace_array_put(tr);
5178 mutex_unlock(&trace_types_lock);
5182 static int tracing_release_pipe(struct inode *inode, struct file *file)
5184 struct trace_iterator *iter = file->private_data;
5185 struct trace_array *tr = inode->i_private;
5187 mutex_lock(&trace_types_lock);
5189 tr->current_trace->ref--;
5191 if (iter->trace->pipe_close)
5192 iter->trace->pipe_close(iter);
5194 mutex_unlock(&trace_types_lock);
5196 free_cpumask_var(iter->started);
5197 mutex_destroy(&iter->mutex);
5200 trace_array_put(tr);
5206 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
5208 struct trace_array *tr = iter->tr;
5210 /* Iterators are static, they should be filled or empty */
5211 if (trace_buffer_iter(iter, iter->cpu_file))
5212 return POLLIN | POLLRDNORM;
5214 if (tr->trace_flags & TRACE_ITER_BLOCK)
5216 * Always select as readable when in blocking mode
5218 return POLLIN | POLLRDNORM;
5220 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
5225 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5227 struct trace_iterator *iter = filp->private_data;
5229 return trace_poll(iter, filp, poll_table);
5232 /* Must be called with iter->mutex held. */
5233 static int tracing_wait_pipe(struct file *filp)
5235 struct trace_iterator *iter = filp->private_data;
5238 while (trace_empty(iter)) {
5240 if ((filp->f_flags & O_NONBLOCK)) {
5245 * We block until we read something and tracing is disabled.
5246 * We still block if tracing is disabled, but we have never
5247 * read anything. This allows a user to cat this file, and
5248 * then enable tracing. But after we have read something,
5249 * we give an EOF when tracing is again disabled.
5251 * iter->pos will be 0 if we haven't read anything.
5253 if (!tracing_is_on() && iter->pos)
5256 mutex_unlock(&iter->mutex);
5258 ret = wait_on_pipe(iter, false);
5260 mutex_lock(&iter->mutex);
5273 tracing_read_pipe(struct file *filp, char __user *ubuf,
5274 size_t cnt, loff_t *ppos)
5276 struct trace_iterator *iter = filp->private_data;
5280 * Avoid more than one consumer on a single file descriptor
5281 * This is just a matter of traces coherency, the ring buffer itself
5284 mutex_lock(&iter->mutex);
5286 /* return any leftover data */
5287 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5291 trace_seq_init(&iter->seq);
5293 if (iter->trace->read) {
5294 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5300 sret = tracing_wait_pipe(filp);
5304 /* stop when tracing is finished */
5305 if (trace_empty(iter)) {
5310 if (cnt >= PAGE_SIZE)
5311 cnt = PAGE_SIZE - 1;
5313 /* reset all but tr, trace, and overruns */
5314 memset(&iter->seq, 0,
5315 sizeof(struct trace_iterator) -
5316 offsetof(struct trace_iterator, seq));
5317 cpumask_clear(iter->started);
5320 trace_event_read_lock();
5321 trace_access_lock(iter->cpu_file);
5322 while (trace_find_next_entry_inc(iter) != NULL) {
5323 enum print_line_t ret;
5324 int save_len = iter->seq.seq.len;
5326 ret = print_trace_line(iter);
5327 if (ret == TRACE_TYPE_PARTIAL_LINE) {
5328 /* don't print partial lines */
5329 iter->seq.seq.len = save_len;
5332 if (ret != TRACE_TYPE_NO_CONSUME)
5333 trace_consume(iter);
5335 if (trace_seq_used(&iter->seq) >= cnt)
5339 * Setting the full flag means we reached the trace_seq buffer
5340 * size and we should leave by partial output condition above.
5341 * One of the trace_seq_* functions is not used properly.
5343 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5346 trace_access_unlock(iter->cpu_file);
5347 trace_event_read_unlock();
5349 /* Now copy what we have to the user */
5350 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5351 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
5352 trace_seq_init(&iter->seq);
5355 * If there was nothing to send to user, in spite of consuming trace
5356 * entries, go back to wait for more entries.
5362 mutex_unlock(&iter->mutex);
5367 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5370 __free_page(spd->pages[idx]);
5373 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
5375 .confirm = generic_pipe_buf_confirm,
5376 .release = generic_pipe_buf_release,
5377 .steal = generic_pipe_buf_steal,
5378 .get = generic_pipe_buf_get,
5382 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
5388 /* Seq buffer is page-sized, exactly what we need. */
5390 save_len = iter->seq.seq.len;
5391 ret = print_trace_line(iter);
5393 if (trace_seq_has_overflowed(&iter->seq)) {
5394 iter->seq.seq.len = save_len;
5399 * This should not be hit, because it should only
5400 * be set if the iter->seq overflowed. But check it
5401 * anyway to be safe.
5403 if (ret == TRACE_TYPE_PARTIAL_LINE) {
5404 iter->seq.seq.len = save_len;
5408 count = trace_seq_used(&iter->seq) - save_len;
5411 iter->seq.seq.len = save_len;
5415 if (ret != TRACE_TYPE_NO_CONSUME)
5416 trace_consume(iter);
5418 if (!trace_find_next_entry_inc(iter)) {
5428 static ssize_t tracing_splice_read_pipe(struct file *filp,
5430 struct pipe_inode_info *pipe,
5434 struct page *pages_def[PIPE_DEF_BUFFERS];
5435 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5436 struct trace_iterator *iter = filp->private_data;
5437 struct splice_pipe_desc spd = {
5439 .partial = partial_def,
5440 .nr_pages = 0, /* This gets updated below. */
5441 .nr_pages_max = PIPE_DEF_BUFFERS,
5443 .ops = &tracing_pipe_buf_ops,
5444 .spd_release = tracing_spd_release_pipe,
5450 if (splice_grow_spd(pipe, &spd))
5453 mutex_lock(&iter->mutex);
5455 if (iter->trace->splice_read) {
5456 ret = iter->trace->splice_read(iter, filp,
5457 ppos, pipe, len, flags);
5462 ret = tracing_wait_pipe(filp);
5466 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
5471 trace_event_read_lock();
5472 trace_access_lock(iter->cpu_file);
5474 /* Fill as many pages as possible. */
5475 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
5476 spd.pages[i] = alloc_page(GFP_KERNEL);
5480 rem = tracing_fill_pipe_page(rem, iter);
5482 /* Copy the data into the page, so we can start over. */
5483 ret = trace_seq_to_buffer(&iter->seq,
5484 page_address(spd.pages[i]),
5485 trace_seq_used(&iter->seq));
5487 __free_page(spd.pages[i]);
5490 spd.partial[i].offset = 0;
5491 spd.partial[i].len = trace_seq_used(&iter->seq);
5493 trace_seq_init(&iter->seq);
5496 trace_access_unlock(iter->cpu_file);
5497 trace_event_read_unlock();
5498 mutex_unlock(&iter->mutex);
5503 ret = splice_to_pipe(pipe, &spd);
5507 splice_shrink_spd(&spd);
5511 mutex_unlock(&iter->mutex);
5516 tracing_entries_read(struct file *filp, char __user *ubuf,
5517 size_t cnt, loff_t *ppos)
5519 struct inode *inode = file_inode(filp);
5520 struct trace_array *tr = inode->i_private;
5521 int cpu = tracing_get_cpu(inode);
5526 mutex_lock(&trace_types_lock);
5528 if (cpu == RING_BUFFER_ALL_CPUS) {
5529 int cpu, buf_size_same;
5534 /* check if all cpu sizes are same */
5535 for_each_tracing_cpu(cpu) {
5536 /* fill in the size from first enabled cpu */
5538 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5539 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
5545 if (buf_size_same) {
5546 if (!ring_buffer_expanded)
5547 r = sprintf(buf, "%lu (expanded: %lu)\n",
5549 trace_buf_size >> 10);
5551 r = sprintf(buf, "%lu\n", size >> 10);
5553 r = sprintf(buf, "X\n");
5555 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
5557 mutex_unlock(&trace_types_lock);
5559 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5564 tracing_entries_write(struct file *filp, const char __user *ubuf,
5565 size_t cnt, loff_t *ppos)
5567 struct inode *inode = file_inode(filp);
5568 struct trace_array *tr = inode->i_private;
5572 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5576 /* must have at least 1 entry */
5580 /* value is in KB */
5582 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
5592 tracing_total_entries_read(struct file *filp, char __user *ubuf,
5593 size_t cnt, loff_t *ppos)
5595 struct trace_array *tr = filp->private_data;
5598 unsigned long size = 0, expanded_size = 0;
5600 mutex_lock(&trace_types_lock);
5601 for_each_tracing_cpu(cpu) {
5602 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
5603 if (!ring_buffer_expanded)
5604 expanded_size += trace_buf_size >> 10;
5606 if (ring_buffer_expanded)
5607 r = sprintf(buf, "%lu\n", size);
5609 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5610 mutex_unlock(&trace_types_lock);
5612 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5616 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5617 size_t cnt, loff_t *ppos)
5620 * There is no need to read what the user has written, this function
5621 * is just to make sure that there is no error when "echo" is used
5630 tracing_free_buffer_release(struct inode *inode, struct file *filp)
5632 struct trace_array *tr = inode->i_private;
5634 /* disable tracing ? */
5635 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
5636 tracer_tracing_off(tr);
5637 /* resize the ring buffer to 0 */
5638 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
5640 trace_array_put(tr);
5645 static inline int lock_user_pages(const char __user *ubuf, size_t cnt,
5646 struct page **pages, void **map_page,
5649 unsigned long addr = (unsigned long)ubuf;
5655 * Userspace is injecting traces into the kernel trace buffer.
5656 * We want to be as non intrusive as possible.
5657 * To do so, we do not want to allocate any special buffers
5658 * or take any locks, but instead write the userspace data
5659 * straight into the ring buffer.
5661 * First we need to pin the userspace buffer into memory,
5662 * which, most likely it is, because it just referenced it.
5663 * But there's no guarantee that it is. By using get_user_pages_fast()
5664 * and kmap_atomic/kunmap_atomic() we can get access to the
5665 * pages directly. We then write the data directly into the
5669 /* check if we cross pages */
5670 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5673 *offset = addr & (PAGE_SIZE - 1);
5676 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5677 if (ret < nr_pages) {
5679 put_page(pages[ret]);
5683 for (i = 0; i < nr_pages; i++)
5684 map_page[i] = kmap_atomic(pages[i]);
5689 static inline void unlock_user_pages(struct page **pages,
5690 void **map_page, int nr_pages)
5694 for (i = nr_pages - 1; i >= 0; i--) {
5695 kunmap_atomic(map_page[i]);
5701 tracing_mark_write(struct file *filp, const char __user *ubuf,
5702 size_t cnt, loff_t *fpos)
5704 struct trace_array *tr = filp->private_data;
5705 struct ring_buffer_event *event;
5706 struct ring_buffer *buffer;
5707 struct print_entry *entry;
5708 unsigned long irq_flags;
5709 struct page *pages[2];
5717 if (tracing_disabled)
5720 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5723 if (cnt > TRACE_BUF_SIZE)
5724 cnt = TRACE_BUF_SIZE;
5726 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5728 nr_pages = lock_user_pages(ubuf, cnt, pages, map_page, &offset);
5732 local_save_flags(irq_flags);
5733 size = sizeof(*entry) + cnt + 2; /* possible \n added */
5734 buffer = tr->trace_buffer.buffer;
5735 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5736 irq_flags, preempt_count());
5738 /* Ring buffer disabled, return as if not open for write */
5743 entry = ring_buffer_event_data(event);
5744 entry->ip = _THIS_IP_;
5746 if (nr_pages == 2) {
5747 len = PAGE_SIZE - offset;
5748 memcpy(&entry->buf, map_page[0] + offset, len);
5749 memcpy(&entry->buf[len], map_page[1], cnt - len);
5751 memcpy(&entry->buf, map_page[0] + offset, cnt);
5753 if (entry->buf[cnt - 1] != '\n') {
5754 entry->buf[cnt] = '\n';
5755 entry->buf[cnt + 1] = '\0';
5757 entry->buf[cnt] = '\0';
5759 __buffer_unlock_commit(buffer, event);
5766 unlock_user_pages(pages, map_page, nr_pages);
5771 /* Limit it for now to 3K (including tag) */
5772 #define RAW_DATA_MAX_SIZE (1024*3)
5775 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
5776 size_t cnt, loff_t *fpos)
5778 struct trace_array *tr = filp->private_data;
5779 struct ring_buffer_event *event;
5780 struct ring_buffer *buffer;
5781 struct raw_data_entry *entry;
5782 unsigned long irq_flags;
5783 struct page *pages[2];
5791 if (tracing_disabled)
5794 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5797 /* The marker must at least have a tag id */
5798 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
5801 if (cnt > TRACE_BUF_SIZE)
5802 cnt = TRACE_BUF_SIZE;
5804 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5806 nr_pages = lock_user_pages(ubuf, cnt, pages, map_page, &offset);
5810 local_save_flags(irq_flags);
5811 size = sizeof(*entry) + cnt;
5812 buffer = tr->trace_buffer.buffer;
5813 event = trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
5814 irq_flags, preempt_count());
5816 /* Ring buffer disabled, return as if not open for write */
5821 entry = ring_buffer_event_data(event);
5823 if (nr_pages == 2) {
5824 len = PAGE_SIZE - offset;
5825 memcpy(&entry->id, map_page[0] + offset, len);
5826 memcpy(((char *)&entry->id) + len, map_page[1], cnt - len);
5828 memcpy(&entry->id, map_page[0] + offset, cnt);
5830 __buffer_unlock_commit(buffer, event);
5837 unlock_user_pages(pages, map_page, nr_pages);
5842 static int tracing_clock_show(struct seq_file *m, void *v)
5844 struct trace_array *tr = m->private;
5847 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
5849 "%s%s%s%s", i ? " " : "",
5850 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5851 i == tr->clock_id ? "]" : "");
5857 static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5861 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5862 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5865 if (i == ARRAY_SIZE(trace_clocks))
5868 mutex_lock(&trace_types_lock);
5872 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5875 * New clock may not be consistent with the previous clock.
5876 * Reset the buffer so that it doesn't have incomparable timestamps.
5878 tracing_reset_online_cpus(&tr->trace_buffer);
5880 #ifdef CONFIG_TRACER_MAX_TRACE
5881 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5882 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
5883 tracing_reset_online_cpus(&tr->max_buffer);
5886 mutex_unlock(&trace_types_lock);
5891 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5892 size_t cnt, loff_t *fpos)
5894 struct seq_file *m = filp->private_data;
5895 struct trace_array *tr = m->private;
5897 const char *clockstr;
5900 if (cnt >= sizeof(buf))
5903 if (copy_from_user(buf, ubuf, cnt))
5908 clockstr = strstrip(buf);
5910 ret = tracing_set_clock(tr, clockstr);
5919 static int tracing_clock_open(struct inode *inode, struct file *file)
5921 struct trace_array *tr = inode->i_private;
5924 if (tracing_disabled)
5927 if (trace_array_get(tr))
5930 ret = single_open(file, tracing_clock_show, inode->i_private);
5932 trace_array_put(tr);
5937 struct ftrace_buffer_info {
5938 struct trace_iterator iter;
5943 #ifdef CONFIG_TRACER_SNAPSHOT
5944 static int tracing_snapshot_open(struct inode *inode, struct file *file)
5946 struct trace_array *tr = inode->i_private;
5947 struct trace_iterator *iter;
5951 if (trace_array_get(tr) < 0)
5954 if (file->f_mode & FMODE_READ) {
5955 iter = __tracing_open(inode, file, true);
5957 ret = PTR_ERR(iter);
5959 /* Writes still need the seq_file to hold the private data */
5961 m = kzalloc(sizeof(*m), GFP_KERNEL);
5964 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5972 iter->trace_buffer = &tr->max_buffer;
5973 iter->cpu_file = tracing_get_cpu(inode);
5975 file->private_data = m;
5979 trace_array_put(tr);
5985 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5988 struct seq_file *m = filp->private_data;
5989 struct trace_iterator *iter = m->private;
5990 struct trace_array *tr = iter->tr;
5994 ret = tracing_update_buffers();
5998 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6002 mutex_lock(&trace_types_lock);
6004 if (tr->current_trace->use_max_tr) {
6011 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6015 if (tr->allocated_snapshot)
6019 /* Only allow per-cpu swap if the ring buffer supports it */
6020 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6021 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6026 if (!tr->allocated_snapshot) {
6027 ret = alloc_snapshot(tr);
6031 local_irq_disable();
6032 /* Now, we're going to swap */
6033 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6034 update_max_tr(tr, current, smp_processor_id());
6036 update_max_tr_single(tr, current, iter->cpu_file);
6040 if (tr->allocated_snapshot) {
6041 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6042 tracing_reset_online_cpus(&tr->max_buffer);
6044 tracing_reset(&tr->max_buffer, iter->cpu_file);
6054 mutex_unlock(&trace_types_lock);
6058 static int tracing_snapshot_release(struct inode *inode, struct file *file)
6060 struct seq_file *m = file->private_data;
6063 ret = tracing_release(inode, file);
6065 if (file->f_mode & FMODE_READ)
6068 /* If write only, the seq_file is just a stub */
6076 static int tracing_buffers_open(struct inode *inode, struct file *filp);
6077 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6078 size_t count, loff_t *ppos);
6079 static int tracing_buffers_release(struct inode *inode, struct file *file);
6080 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6081 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6083 static int snapshot_raw_open(struct inode *inode, struct file *filp)
6085 struct ftrace_buffer_info *info;
6088 ret = tracing_buffers_open(inode, filp);
6092 info = filp->private_data;
6094 if (info->iter.trace->use_max_tr) {
6095 tracing_buffers_release(inode, filp);
6099 info->iter.snapshot = true;
6100 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6105 #endif /* CONFIG_TRACER_SNAPSHOT */
6108 static const struct file_operations tracing_thresh_fops = {
6109 .open = tracing_open_generic,
6110 .read = tracing_thresh_read,
6111 .write = tracing_thresh_write,
6112 .llseek = generic_file_llseek,
6115 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6116 static const struct file_operations tracing_max_lat_fops = {
6117 .open = tracing_open_generic,
6118 .read = tracing_max_lat_read,
6119 .write = tracing_max_lat_write,
6120 .llseek = generic_file_llseek,
6124 static const struct file_operations set_tracer_fops = {
6125 .open = tracing_open_generic,
6126 .read = tracing_set_trace_read,
6127 .write = tracing_set_trace_write,
6128 .llseek = generic_file_llseek,
6131 static const struct file_operations tracing_pipe_fops = {
6132 .open = tracing_open_pipe,
6133 .poll = tracing_poll_pipe,
6134 .read = tracing_read_pipe,
6135 .splice_read = tracing_splice_read_pipe,
6136 .release = tracing_release_pipe,
6137 .llseek = no_llseek,
6140 static const struct file_operations tracing_entries_fops = {
6141 .open = tracing_open_generic_tr,
6142 .read = tracing_entries_read,
6143 .write = tracing_entries_write,
6144 .llseek = generic_file_llseek,
6145 .release = tracing_release_generic_tr,
6148 static const struct file_operations tracing_total_entries_fops = {
6149 .open = tracing_open_generic_tr,
6150 .read = tracing_total_entries_read,
6151 .llseek = generic_file_llseek,
6152 .release = tracing_release_generic_tr,
6155 static const struct file_operations tracing_free_buffer_fops = {
6156 .open = tracing_open_generic_tr,
6157 .write = tracing_free_buffer_write,
6158 .release = tracing_free_buffer_release,
6161 static const struct file_operations tracing_mark_fops = {
6162 .open = tracing_open_generic_tr,
6163 .write = tracing_mark_write,
6164 .llseek = generic_file_llseek,
6165 .release = tracing_release_generic_tr,
6168 static const struct file_operations tracing_mark_raw_fops = {
6169 .open = tracing_open_generic_tr,
6170 .write = tracing_mark_raw_write,
6171 .llseek = generic_file_llseek,
6172 .release = tracing_release_generic_tr,
6175 static const struct file_operations trace_clock_fops = {
6176 .open = tracing_clock_open,
6178 .llseek = seq_lseek,
6179 .release = tracing_single_release_tr,
6180 .write = tracing_clock_write,
6183 #ifdef CONFIG_TRACER_SNAPSHOT
6184 static const struct file_operations snapshot_fops = {
6185 .open = tracing_snapshot_open,
6187 .write = tracing_snapshot_write,
6188 .llseek = tracing_lseek,
6189 .release = tracing_snapshot_release,
6192 static const struct file_operations snapshot_raw_fops = {
6193 .open = snapshot_raw_open,
6194 .read = tracing_buffers_read,
6195 .release = tracing_buffers_release,
6196 .splice_read = tracing_buffers_splice_read,
6197 .llseek = no_llseek,
6200 #endif /* CONFIG_TRACER_SNAPSHOT */
6202 static int tracing_buffers_open(struct inode *inode, struct file *filp)
6204 struct trace_array *tr = inode->i_private;
6205 struct ftrace_buffer_info *info;
6208 if (tracing_disabled)
6211 if (trace_array_get(tr) < 0)
6214 info = kzalloc(sizeof(*info), GFP_KERNEL);
6216 trace_array_put(tr);
6220 mutex_lock(&trace_types_lock);
6223 info->iter.cpu_file = tracing_get_cpu(inode);
6224 info->iter.trace = tr->current_trace;
6225 info->iter.trace_buffer = &tr->trace_buffer;
6227 /* Force reading ring buffer for first read */
6228 info->read = (unsigned int)-1;
6230 filp->private_data = info;
6232 tr->current_trace->ref++;
6234 mutex_unlock(&trace_types_lock);
6236 ret = nonseekable_open(inode, filp);
6238 trace_array_put(tr);
6244 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6246 struct ftrace_buffer_info *info = filp->private_data;
6247 struct trace_iterator *iter = &info->iter;
6249 return trace_poll(iter, filp, poll_table);
6253 tracing_buffers_read(struct file *filp, char __user *ubuf,
6254 size_t count, loff_t *ppos)
6256 struct ftrace_buffer_info *info = filp->private_data;
6257 struct trace_iterator *iter = &info->iter;
6264 #ifdef CONFIG_TRACER_MAX_TRACE
6265 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6270 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6275 /* Do we have previous read data to read? */
6276 if (info->read < PAGE_SIZE)
6280 trace_access_lock(iter->cpu_file);
6281 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
6285 trace_access_unlock(iter->cpu_file);
6288 if (trace_empty(iter)) {
6289 if ((filp->f_flags & O_NONBLOCK))
6292 ret = wait_on_pipe(iter, false);
6303 size = PAGE_SIZE - info->read;
6307 ret = copy_to_user(ubuf, info->spare + info->read, size);
6319 static int tracing_buffers_release(struct inode *inode, struct file *file)
6321 struct ftrace_buffer_info *info = file->private_data;
6322 struct trace_iterator *iter = &info->iter;
6324 mutex_lock(&trace_types_lock);
6326 iter->tr->current_trace->ref--;
6328 __trace_array_put(iter->tr);
6331 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
6334 mutex_unlock(&trace_types_lock);
6340 struct ring_buffer *buffer;
6345 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6346 struct pipe_buffer *buf)
6348 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6353 ring_buffer_free_read_page(ref->buffer, ref->page);
6358 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6359 struct pipe_buffer *buf)
6361 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6366 /* Pipe buffer operations for a buffer. */
6367 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
6369 .confirm = generic_pipe_buf_confirm,
6370 .release = buffer_pipe_buf_release,
6371 .steal = generic_pipe_buf_steal,
6372 .get = buffer_pipe_buf_get,
6376 * Callback from splice_to_pipe(), if we need to release some pages
6377 * at the end of the spd in case we error'ed out in filling the pipe.
6379 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6381 struct buffer_ref *ref =
6382 (struct buffer_ref *)spd->partial[i].private;
6387 ring_buffer_free_read_page(ref->buffer, ref->page);
6389 spd->partial[i].private = 0;
6393 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6394 struct pipe_inode_info *pipe, size_t len,
6397 struct ftrace_buffer_info *info = file->private_data;
6398 struct trace_iterator *iter = &info->iter;
6399 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6400 struct page *pages_def[PIPE_DEF_BUFFERS];
6401 struct splice_pipe_desc spd = {
6403 .partial = partial_def,
6404 .nr_pages_max = PIPE_DEF_BUFFERS,
6406 .ops = &buffer_pipe_buf_ops,
6407 .spd_release = buffer_spd_release,
6409 struct buffer_ref *ref;
6410 int entries, size, i;
6413 #ifdef CONFIG_TRACER_MAX_TRACE
6414 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6418 if (*ppos & (PAGE_SIZE - 1))
6421 if (len & (PAGE_SIZE - 1)) {
6422 if (len < PAGE_SIZE)
6427 if (splice_grow_spd(pipe, &spd))
6431 trace_access_lock(iter->cpu_file);
6432 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
6434 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
6438 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
6445 ref->buffer = iter->trace_buffer->buffer;
6446 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
6453 r = ring_buffer_read_page(ref->buffer, &ref->page,
6454 len, iter->cpu_file, 1);
6456 ring_buffer_free_read_page(ref->buffer, ref->page);
6462 * zero out any left over data, this is going to
6465 size = ring_buffer_page_len(ref->page);
6466 if (size < PAGE_SIZE)
6467 memset(ref->page + size, 0, PAGE_SIZE - size);
6469 page = virt_to_page(ref->page);
6471 spd.pages[i] = page;
6472 spd.partial[i].len = PAGE_SIZE;
6473 spd.partial[i].offset = 0;
6474 spd.partial[i].private = (unsigned long)ref;
6478 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
6481 trace_access_unlock(iter->cpu_file);
6484 /* did we read anything? */
6485 if (!spd.nr_pages) {
6490 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
6493 ret = wait_on_pipe(iter, true);
6500 ret = splice_to_pipe(pipe, &spd);
6502 splice_shrink_spd(&spd);
6507 static const struct file_operations tracing_buffers_fops = {
6508 .open = tracing_buffers_open,
6509 .read = tracing_buffers_read,
6510 .poll = tracing_buffers_poll,
6511 .release = tracing_buffers_release,
6512 .splice_read = tracing_buffers_splice_read,
6513 .llseek = no_llseek,
6517 tracing_stats_read(struct file *filp, char __user *ubuf,
6518 size_t count, loff_t *ppos)
6520 struct inode *inode = file_inode(filp);
6521 struct trace_array *tr = inode->i_private;
6522 struct trace_buffer *trace_buf = &tr->trace_buffer;
6523 int cpu = tracing_get_cpu(inode);
6524 struct trace_seq *s;
6526 unsigned long long t;
6527 unsigned long usec_rem;
6529 s = kmalloc(sizeof(*s), GFP_KERNEL);
6535 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
6536 trace_seq_printf(s, "entries: %ld\n", cnt);
6538 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
6539 trace_seq_printf(s, "overrun: %ld\n", cnt);
6541 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
6542 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6544 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
6545 trace_seq_printf(s, "bytes: %ld\n", cnt);
6547 if (trace_clocks[tr->clock_id].in_ns) {
6548 /* local or global for trace_clock */
6549 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
6550 usec_rem = do_div(t, USEC_PER_SEC);
6551 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6554 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
6555 usec_rem = do_div(t, USEC_PER_SEC);
6556 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6558 /* counter or tsc mode for trace_clock */
6559 trace_seq_printf(s, "oldest event ts: %llu\n",
6560 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
6562 trace_seq_printf(s, "now ts: %llu\n",
6563 ring_buffer_time_stamp(trace_buf->buffer, cpu));
6566 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
6567 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6569 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
6570 trace_seq_printf(s, "read events: %ld\n", cnt);
6572 count = simple_read_from_buffer(ubuf, count, ppos,
6573 s->buffer, trace_seq_used(s));
6580 static const struct file_operations tracing_stats_fops = {
6581 .open = tracing_open_generic_tr,
6582 .read = tracing_stats_read,
6583 .llseek = generic_file_llseek,
6584 .release = tracing_release_generic_tr,
6587 #ifdef CONFIG_DYNAMIC_FTRACE
6589 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
6595 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
6596 size_t cnt, loff_t *ppos)
6598 static char ftrace_dyn_info_buffer[1024];
6599 static DEFINE_MUTEX(dyn_info_mutex);
6600 unsigned long *p = filp->private_data;
6601 char *buf = ftrace_dyn_info_buffer;
6602 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
6605 mutex_lock(&dyn_info_mutex);
6606 r = sprintf(buf, "%ld ", *p);
6608 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
6611 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6613 mutex_unlock(&dyn_info_mutex);
6618 static const struct file_operations tracing_dyn_info_fops = {
6619 .open = tracing_open_generic,
6620 .read = tracing_read_dyn_info,
6621 .llseek = generic_file_llseek,
6623 #endif /* CONFIG_DYNAMIC_FTRACE */
6625 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6627 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
6633 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
6635 unsigned long *count = (long *)data;
6647 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6648 struct ftrace_probe_ops *ops, void *data)
6650 long count = (long)data;
6652 seq_printf(m, "%ps:", (void *)ip);
6654 seq_puts(m, "snapshot");
6657 seq_puts(m, ":unlimited\n");
6659 seq_printf(m, ":count=%ld\n", count);
6664 static struct ftrace_probe_ops snapshot_probe_ops = {
6665 .func = ftrace_snapshot,
6666 .print = ftrace_snapshot_print,
6669 static struct ftrace_probe_ops snapshot_count_probe_ops = {
6670 .func = ftrace_count_snapshot,
6671 .print = ftrace_snapshot_print,
6675 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6676 char *glob, char *cmd, char *param, int enable)
6678 struct ftrace_probe_ops *ops;
6679 void *count = (void *)-1;
6683 /* hash funcs only work with set_ftrace_filter */
6687 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6689 if (glob[0] == '!') {
6690 unregister_ftrace_function_probe_func(glob+1, ops);
6697 number = strsep(¶m, ":");
6699 if (!strlen(number))
6703 * We use the callback data field (which is a pointer)
6706 ret = kstrtoul(number, 0, (unsigned long *)&count);
6711 ret = register_ftrace_function_probe(glob, ops, count);
6714 alloc_snapshot(&global_trace);
6716 return ret < 0 ? ret : 0;
6719 static struct ftrace_func_command ftrace_snapshot_cmd = {
6721 .func = ftrace_trace_snapshot_callback,
6724 static __init int register_snapshot_cmd(void)
6726 return register_ftrace_command(&ftrace_snapshot_cmd);
6729 static inline __init int register_snapshot_cmd(void) { return 0; }
6730 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
6732 static struct dentry *tracing_get_dentry(struct trace_array *tr)
6734 if (WARN_ON(!tr->dir))
6735 return ERR_PTR(-ENODEV);
6737 /* Top directory uses NULL as the parent */
6738 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6741 /* All sub buffers have a descriptor */
6745 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6747 struct dentry *d_tracer;
6750 return tr->percpu_dir;
6752 d_tracer = tracing_get_dentry(tr);
6753 if (IS_ERR(d_tracer))
6756 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
6758 WARN_ONCE(!tr->percpu_dir,
6759 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
6761 return tr->percpu_dir;
6764 static struct dentry *
6765 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6766 void *data, long cpu, const struct file_operations *fops)
6768 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6770 if (ret) /* See tracing_get_cpu() */
6771 d_inode(ret)->i_cdev = (void *)(cpu + 1);
6776 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
6778 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
6779 struct dentry *d_cpu;
6780 char cpu_dir[30]; /* 30 characters should be more than enough */
6785 snprintf(cpu_dir, 30, "cpu%ld", cpu);
6786 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
6788 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
6792 /* per cpu trace_pipe */
6793 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
6794 tr, cpu, &tracing_pipe_fops);
6797 trace_create_cpu_file("trace", 0644, d_cpu,
6798 tr, cpu, &tracing_fops);
6800 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
6801 tr, cpu, &tracing_buffers_fops);
6803 trace_create_cpu_file("stats", 0444, d_cpu,
6804 tr, cpu, &tracing_stats_fops);
6806 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
6807 tr, cpu, &tracing_entries_fops);
6809 #ifdef CONFIG_TRACER_SNAPSHOT
6810 trace_create_cpu_file("snapshot", 0644, d_cpu,
6811 tr, cpu, &snapshot_fops);
6813 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
6814 tr, cpu, &snapshot_raw_fops);
6818 #ifdef CONFIG_FTRACE_SELFTEST
6819 /* Let selftest have access to static functions in this file */
6820 #include "trace_selftest.c"
6824 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6827 struct trace_option_dentry *topt = filp->private_data;
6830 if (topt->flags->val & topt->opt->bit)
6835 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6839 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6842 struct trace_option_dentry *topt = filp->private_data;
6846 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6850 if (val != 0 && val != 1)
6853 if (!!(topt->flags->val & topt->opt->bit) != val) {
6854 mutex_lock(&trace_types_lock);
6855 ret = __set_tracer_option(topt->tr, topt->flags,
6857 mutex_unlock(&trace_types_lock);
6868 static const struct file_operations trace_options_fops = {
6869 .open = tracing_open_generic,
6870 .read = trace_options_read,
6871 .write = trace_options_write,
6872 .llseek = generic_file_llseek,
6876 * In order to pass in both the trace_array descriptor as well as the index
6877 * to the flag that the trace option file represents, the trace_array
6878 * has a character array of trace_flags_index[], which holds the index
6879 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6880 * The address of this character array is passed to the flag option file
6881 * read/write callbacks.
6883 * In order to extract both the index and the trace_array descriptor,
6884 * get_tr_index() uses the following algorithm.
6888 * As the pointer itself contains the address of the index (remember
6891 * Then to get the trace_array descriptor, by subtracting that index
6892 * from the ptr, we get to the start of the index itself.
6894 * ptr - idx == &index[0]
6896 * Then a simple container_of() from that pointer gets us to the
6897 * trace_array descriptor.
6899 static void get_tr_index(void *data, struct trace_array **ptr,
6900 unsigned int *pindex)
6902 *pindex = *(unsigned char *)data;
6904 *ptr = container_of(data - *pindex, struct trace_array,
6909 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6912 void *tr_index = filp->private_data;
6913 struct trace_array *tr;
6917 get_tr_index(tr_index, &tr, &index);
6919 if (tr->trace_flags & (1 << index))
6924 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6928 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6931 void *tr_index = filp->private_data;
6932 struct trace_array *tr;
6937 get_tr_index(tr_index, &tr, &index);
6939 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6943 if (val != 0 && val != 1)
6946 mutex_lock(&trace_types_lock);
6947 ret = set_tracer_flag(tr, 1 << index, val);
6948 mutex_unlock(&trace_types_lock);
6958 static const struct file_operations trace_options_core_fops = {
6959 .open = tracing_open_generic,
6960 .read = trace_options_core_read,
6961 .write = trace_options_core_write,
6962 .llseek = generic_file_llseek,
6965 struct dentry *trace_create_file(const char *name,
6967 struct dentry *parent,
6969 const struct file_operations *fops)
6973 ret = tracefs_create_file(name, mode, parent, data, fops);
6975 pr_warn("Could not create tracefs '%s' entry\n", name);
6981 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6983 struct dentry *d_tracer;
6988 d_tracer = tracing_get_dentry(tr);
6989 if (IS_ERR(d_tracer))
6992 tr->options = tracefs_create_dir("options", d_tracer);
6994 pr_warn("Could not create tracefs directory 'options'\n");
7002 create_trace_option_file(struct trace_array *tr,
7003 struct trace_option_dentry *topt,
7004 struct tracer_flags *flags,
7005 struct tracer_opt *opt)
7007 struct dentry *t_options;
7009 t_options = trace_options_init_dentry(tr);
7013 topt->flags = flags;
7017 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
7018 &trace_options_fops);
7023 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
7025 struct trace_option_dentry *topts;
7026 struct trace_options *tr_topts;
7027 struct tracer_flags *flags;
7028 struct tracer_opt *opts;
7035 flags = tracer->flags;
7037 if (!flags || !flags->opts)
7041 * If this is an instance, only create flags for tracers
7042 * the instance may have.
7044 if (!trace_ok_for_array(tracer, tr))
7047 for (i = 0; i < tr->nr_topts; i++) {
7048 /* Make sure there's no duplicate flags. */
7049 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
7055 for (cnt = 0; opts[cnt].name; cnt++)
7058 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
7062 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7069 tr->topts = tr_topts;
7070 tr->topts[tr->nr_topts].tracer = tracer;
7071 tr->topts[tr->nr_topts].topts = topts;
7074 for (cnt = 0; opts[cnt].name; cnt++) {
7075 create_trace_option_file(tr, &topts[cnt], flags,
7077 WARN_ONCE(topts[cnt].entry == NULL,
7078 "Failed to create trace option: %s",
7083 static struct dentry *
7084 create_trace_option_core_file(struct trace_array *tr,
7085 const char *option, long index)
7087 struct dentry *t_options;
7089 t_options = trace_options_init_dentry(tr);
7093 return trace_create_file(option, 0644, t_options,
7094 (void *)&tr->trace_flags_index[index],
7095 &trace_options_core_fops);
7098 static void create_trace_options_dir(struct trace_array *tr)
7100 struct dentry *t_options;
7101 bool top_level = tr == &global_trace;
7104 t_options = trace_options_init_dentry(tr);
7108 for (i = 0; trace_options[i]; i++) {
7110 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7111 create_trace_option_core_file(tr, trace_options[i], i);
7116 rb_simple_read(struct file *filp, char __user *ubuf,
7117 size_t cnt, loff_t *ppos)
7119 struct trace_array *tr = filp->private_data;
7123 r = tracer_tracing_is_on(tr);
7124 r = sprintf(buf, "%d\n", r);
7126 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7130 rb_simple_write(struct file *filp, const char __user *ubuf,
7131 size_t cnt, loff_t *ppos)
7133 struct trace_array *tr = filp->private_data;
7134 struct ring_buffer *buffer = tr->trace_buffer.buffer;
7138 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7143 mutex_lock(&trace_types_lock);
7145 tracer_tracing_on(tr);
7146 if (tr->current_trace->start)
7147 tr->current_trace->start(tr);
7149 tracer_tracing_off(tr);
7150 if (tr->current_trace->stop)
7151 tr->current_trace->stop(tr);
7153 mutex_unlock(&trace_types_lock);
7161 static const struct file_operations rb_simple_fops = {
7162 .open = tracing_open_generic_tr,
7163 .read = rb_simple_read,
7164 .write = rb_simple_write,
7165 .release = tracing_release_generic_tr,
7166 .llseek = default_llseek,
7169 struct dentry *trace_instance_dir;
7172 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
7175 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
7177 enum ring_buffer_flags rb_flags;
7179 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
7183 buf->buffer = ring_buffer_alloc(size, rb_flags);
7187 buf->data = alloc_percpu(struct trace_array_cpu);
7189 ring_buffer_free(buf->buffer);
7193 /* Allocate the first page for all buffers */
7194 set_buffer_entries(&tr->trace_buffer,
7195 ring_buffer_size(tr->trace_buffer.buffer, 0));
7200 static int allocate_trace_buffers(struct trace_array *tr, int size)
7204 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7208 #ifdef CONFIG_TRACER_MAX_TRACE
7209 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7210 allocate_snapshot ? size : 1);
7212 ring_buffer_free(tr->trace_buffer.buffer);
7213 free_percpu(tr->trace_buffer.data);
7216 tr->allocated_snapshot = allocate_snapshot;
7219 * Only the top level trace array gets its snapshot allocated
7220 * from the kernel command line.
7222 allocate_snapshot = false;
7227 static void free_trace_buffer(struct trace_buffer *buf)
7230 ring_buffer_free(buf->buffer);
7232 free_percpu(buf->data);
7237 static void free_trace_buffers(struct trace_array *tr)
7242 free_trace_buffer(&tr->trace_buffer);
7244 #ifdef CONFIG_TRACER_MAX_TRACE
7245 free_trace_buffer(&tr->max_buffer);
7249 static void init_trace_flags_index(struct trace_array *tr)
7253 /* Used by the trace options files */
7254 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7255 tr->trace_flags_index[i] = i;
7258 static void __update_tracer_options(struct trace_array *tr)
7262 for (t = trace_types; t; t = t->next)
7263 add_tracer_options(tr, t);
7266 static void update_tracer_options(struct trace_array *tr)
7268 mutex_lock(&trace_types_lock);
7269 __update_tracer_options(tr);
7270 mutex_unlock(&trace_types_lock);
7273 static int instance_mkdir(const char *name)
7275 struct trace_array *tr;
7278 mutex_lock(&trace_types_lock);
7281 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7282 if (tr->name && strcmp(tr->name, name) == 0)
7287 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7291 tr->name = kstrdup(name, GFP_KERNEL);
7295 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7298 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
7300 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7302 raw_spin_lock_init(&tr->start_lock);
7304 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7306 tr->current_trace = &nop_trace;
7308 INIT_LIST_HEAD(&tr->systems);
7309 INIT_LIST_HEAD(&tr->events);
7311 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
7314 tr->dir = tracefs_create_dir(name, trace_instance_dir);
7318 ret = event_trace_add_tracer(tr->dir, tr);
7320 tracefs_remove_recursive(tr->dir);
7324 init_tracer_tracefs(tr, tr->dir);
7325 init_trace_flags_index(tr);
7326 __update_tracer_options(tr);
7328 list_add(&tr->list, &ftrace_trace_arrays);
7330 mutex_unlock(&trace_types_lock);
7335 free_trace_buffers(tr);
7336 free_cpumask_var(tr->tracing_cpumask);
7341 mutex_unlock(&trace_types_lock);
7347 static int instance_rmdir(const char *name)
7349 struct trace_array *tr;
7354 mutex_lock(&trace_types_lock);
7357 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7358 if (tr->name && strcmp(tr->name, name) == 0) {
7367 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
7370 list_del(&tr->list);
7372 /* Disable all the flags that were enabled coming in */
7373 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7374 if ((1 << i) & ZEROED_TRACE_FLAGS)
7375 set_tracer_flag(tr, 1 << i, 0);
7378 tracing_set_nop(tr);
7379 event_trace_del_tracer(tr);
7380 ftrace_destroy_function_files(tr);
7381 tracefs_remove_recursive(tr->dir);
7382 free_trace_buffers(tr);
7384 for (i = 0; i < tr->nr_topts; i++) {
7385 kfree(tr->topts[i].topts);
7395 mutex_unlock(&trace_types_lock);
7400 static __init void create_trace_instances(struct dentry *d_tracer)
7402 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7405 if (WARN_ON(!trace_instance_dir))
7410 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
7414 trace_create_file("available_tracers", 0444, d_tracer,
7415 tr, &show_traces_fops);
7417 trace_create_file("current_tracer", 0644, d_tracer,
7418 tr, &set_tracer_fops);
7420 trace_create_file("tracing_cpumask", 0644, d_tracer,
7421 tr, &tracing_cpumask_fops);
7423 trace_create_file("trace_options", 0644, d_tracer,
7424 tr, &tracing_iter_fops);
7426 trace_create_file("trace", 0644, d_tracer,
7429 trace_create_file("trace_pipe", 0444, d_tracer,
7430 tr, &tracing_pipe_fops);
7432 trace_create_file("buffer_size_kb", 0644, d_tracer,
7433 tr, &tracing_entries_fops);
7435 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7436 tr, &tracing_total_entries_fops);
7438 trace_create_file("free_buffer", 0200, d_tracer,
7439 tr, &tracing_free_buffer_fops);
7441 trace_create_file("trace_marker", 0220, d_tracer,
7442 tr, &tracing_mark_fops);
7444 trace_create_file("trace_marker_raw", 0220, d_tracer,
7445 tr, &tracing_mark_raw_fops);
7447 trace_create_file("trace_clock", 0644, d_tracer, tr,
7450 trace_create_file("tracing_on", 0644, d_tracer,
7451 tr, &rb_simple_fops);
7453 create_trace_options_dir(tr);
7455 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7456 trace_create_file("tracing_max_latency", 0644, d_tracer,
7457 &tr->max_latency, &tracing_max_lat_fops);
7460 if (ftrace_create_function_files(tr, d_tracer))
7461 WARN(1, "Could not allocate function filter files");
7463 #ifdef CONFIG_TRACER_SNAPSHOT
7464 trace_create_file("snapshot", 0644, d_tracer,
7465 tr, &snapshot_fops);
7468 for_each_tracing_cpu(cpu)
7469 tracing_init_tracefs_percpu(tr, cpu);
7471 ftrace_init_tracefs(tr, d_tracer);
7474 static struct vfsmount *trace_automount(void *ingore)
7476 struct vfsmount *mnt;
7477 struct file_system_type *type;
7480 * To maintain backward compatibility for tools that mount
7481 * debugfs to get to the tracing facility, tracefs is automatically
7482 * mounted to the debugfs/tracing directory.
7484 type = get_fs_type("tracefs");
7487 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
7488 put_filesystem(type);
7497 * tracing_init_dentry - initialize top level trace array
7499 * This is called when creating files or directories in the tracing
7500 * directory. It is called via fs_initcall() by any of the boot up code
7501 * and expects to return the dentry of the top level tracing directory.
7503 struct dentry *tracing_init_dentry(void)
7505 struct trace_array *tr = &global_trace;
7507 /* The top level trace array uses NULL as parent */
7511 if (WARN_ON(!tracefs_initialized()) ||
7512 (IS_ENABLED(CONFIG_DEBUG_FS) &&
7513 WARN_ON(!debugfs_initialized())))
7514 return ERR_PTR(-ENODEV);
7517 * As there may still be users that expect the tracing
7518 * files to exist in debugfs/tracing, we must automount
7519 * the tracefs file system there, so older tools still
7520 * work with the newer kerenl.
7522 tr->dir = debugfs_create_automount("tracing", NULL,
7523 trace_automount, NULL);
7525 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7526 return ERR_PTR(-ENOMEM);
7532 extern struct trace_enum_map *__start_ftrace_enum_maps[];
7533 extern struct trace_enum_map *__stop_ftrace_enum_maps[];
7535 static void __init trace_enum_init(void)
7539 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
7540 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
7543 #ifdef CONFIG_MODULES
7544 static void trace_module_add_enums(struct module *mod)
7546 if (!mod->num_trace_enums)
7550 * Modules with bad taint do not have events created, do
7551 * not bother with enums either.
7553 if (trace_module_has_bad_taint(mod))
7556 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
7559 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
7560 static void trace_module_remove_enums(struct module *mod)
7562 union trace_enum_map_item *map;
7563 union trace_enum_map_item **last = &trace_enum_maps;
7565 if (!mod->num_trace_enums)
7568 mutex_lock(&trace_enum_mutex);
7570 map = trace_enum_maps;
7573 if (map->head.mod == mod)
7575 map = trace_enum_jmp_to_tail(map);
7576 last = &map->tail.next;
7577 map = map->tail.next;
7582 *last = trace_enum_jmp_to_tail(map)->tail.next;
7585 mutex_unlock(&trace_enum_mutex);
7588 static inline void trace_module_remove_enums(struct module *mod) { }
7589 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
7591 static int trace_module_notify(struct notifier_block *self,
7592 unsigned long val, void *data)
7594 struct module *mod = data;
7597 case MODULE_STATE_COMING:
7598 trace_module_add_enums(mod);
7600 case MODULE_STATE_GOING:
7601 trace_module_remove_enums(mod);
7608 static struct notifier_block trace_module_nb = {
7609 .notifier_call = trace_module_notify,
7612 #endif /* CONFIG_MODULES */
7614 static __init int tracer_init_tracefs(void)
7616 struct dentry *d_tracer;
7618 trace_access_lock_init();
7620 d_tracer = tracing_init_dentry();
7621 if (IS_ERR(d_tracer))
7624 init_tracer_tracefs(&global_trace, d_tracer);
7625 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
7627 trace_create_file("tracing_thresh", 0644, d_tracer,
7628 &global_trace, &tracing_thresh_fops);
7630 trace_create_file("README", 0444, d_tracer,
7631 NULL, &tracing_readme_fops);
7633 trace_create_file("saved_cmdlines", 0444, d_tracer,
7634 NULL, &tracing_saved_cmdlines_fops);
7636 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
7637 NULL, &tracing_saved_cmdlines_size_fops);
7641 trace_create_enum_file(d_tracer);
7643 #ifdef CONFIG_MODULES
7644 register_module_notifier(&trace_module_nb);
7647 #ifdef CONFIG_DYNAMIC_FTRACE
7648 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
7649 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
7652 create_trace_instances(d_tracer);
7654 update_tracer_options(&global_trace);
7659 static int trace_panic_handler(struct notifier_block *this,
7660 unsigned long event, void *unused)
7662 if (ftrace_dump_on_oops)
7663 ftrace_dump(ftrace_dump_on_oops);
7667 static struct notifier_block trace_panic_notifier = {
7668 .notifier_call = trace_panic_handler,
7670 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7673 static int trace_die_handler(struct notifier_block *self,
7679 if (ftrace_dump_on_oops)
7680 ftrace_dump(ftrace_dump_on_oops);
7688 static struct notifier_block trace_die_notifier = {
7689 .notifier_call = trace_die_handler,
7694 * printk is set to max of 1024, we really don't need it that big.
7695 * Nothing should be printing 1000 characters anyway.
7697 #define TRACE_MAX_PRINT 1000
7700 * Define here KERN_TRACE so that we have one place to modify
7701 * it if we decide to change what log level the ftrace dump
7704 #define KERN_TRACE KERN_EMERG
7707 trace_printk_seq(struct trace_seq *s)
7709 /* Probably should print a warning here. */
7710 if (s->seq.len >= TRACE_MAX_PRINT)
7711 s->seq.len = TRACE_MAX_PRINT;
7714 * More paranoid code. Although the buffer size is set to
7715 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7716 * an extra layer of protection.
7718 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7719 s->seq.len = s->seq.size - 1;
7721 /* should be zero ended, but we are paranoid. */
7722 s->buffer[s->seq.len] = 0;
7724 printk(KERN_TRACE "%s", s->buffer);
7729 void trace_init_global_iter(struct trace_iterator *iter)
7731 iter->tr = &global_trace;
7732 iter->trace = iter->tr->current_trace;
7733 iter->cpu_file = RING_BUFFER_ALL_CPUS;
7734 iter->trace_buffer = &global_trace.trace_buffer;
7736 if (iter->trace && iter->trace->open)
7737 iter->trace->open(iter);
7739 /* Annotate start of buffers if we had overruns */
7740 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7741 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7743 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7744 if (trace_clocks[iter->tr->clock_id].in_ns)
7745 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
7748 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
7750 /* use static because iter can be a bit big for the stack */
7751 static struct trace_iterator iter;
7752 static atomic_t dump_running;
7753 struct trace_array *tr = &global_trace;
7754 unsigned int old_userobj;
7755 unsigned long flags;
7758 /* Only allow one dump user at a time. */
7759 if (atomic_inc_return(&dump_running) != 1) {
7760 atomic_dec(&dump_running);
7765 * Always turn off tracing when we dump.
7766 * We don't need to show trace output of what happens
7767 * between multiple crashes.
7769 * If the user does a sysrq-z, then they can re-enable
7770 * tracing with echo 1 > tracing_on.
7774 local_irq_save(flags);
7776 /* Simulate the iterator */
7777 trace_init_global_iter(&iter);
7779 for_each_tracing_cpu(cpu) {
7780 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7783 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
7785 /* don't look at user memory in panic mode */
7786 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
7788 switch (oops_dump_mode) {
7790 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7793 iter.cpu_file = raw_smp_processor_id();
7798 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
7799 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7802 printk(KERN_TRACE "Dumping ftrace buffer:\n");
7804 /* Did function tracer already get disabled? */
7805 if (ftrace_is_dead()) {
7806 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7807 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7811 * We need to stop all tracing on all CPUS to read the
7812 * the next buffer. This is a bit expensive, but is
7813 * not done often. We fill all what we can read,
7814 * and then release the locks again.
7817 while (!trace_empty(&iter)) {
7820 printk(KERN_TRACE "---------------------------------\n");
7824 /* reset all but tr, trace, and overruns */
7825 memset(&iter.seq, 0,
7826 sizeof(struct trace_iterator) -
7827 offsetof(struct trace_iterator, seq));
7828 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7831 if (trace_find_next_entry_inc(&iter) != NULL) {
7834 ret = print_trace_line(&iter);
7835 if (ret != TRACE_TYPE_NO_CONSUME)
7836 trace_consume(&iter);
7838 touch_nmi_watchdog();
7840 trace_printk_seq(&iter.seq);
7844 printk(KERN_TRACE " (ftrace buffer empty)\n");
7846 printk(KERN_TRACE "---------------------------------\n");
7849 tr->trace_flags |= old_userobj;
7851 for_each_tracing_cpu(cpu) {
7852 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7854 atomic_dec(&dump_running);
7855 local_irq_restore(flags);
7857 EXPORT_SYMBOL_GPL(ftrace_dump);
7859 __init static int tracer_alloc_buffers(void)
7865 * Make sure we don't accidently add more trace options
7866 * than we have bits for.
7868 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
7870 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7873 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
7874 goto out_free_buffer_mask;
7876 /* Only allocate trace_printk buffers if a trace_printk exists */
7877 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
7878 /* Must be called before global_trace.buffer is allocated */
7879 trace_printk_init_buffers();
7881 /* To save memory, keep the ring buffer size to its minimum */
7882 if (ring_buffer_expanded)
7883 ring_buf_size = trace_buf_size;
7887 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
7888 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
7890 raw_spin_lock_init(&global_trace.start_lock);
7892 /* Used for event triggers */
7893 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7895 goto out_free_cpumask;
7897 if (trace_create_savedcmd() < 0)
7898 goto out_free_temp_buffer;
7900 /* TODO: make the number of buffers hot pluggable with CPUS */
7901 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
7902 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7904 goto out_free_savedcmd;
7907 if (global_trace.buffer_disabled)
7910 if (trace_boot_clock) {
7911 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7913 pr_warn("Trace clock %s not defined, going back to default\n",
7918 * register_tracer() might reference current_trace, so it
7919 * needs to be set before we register anything. This is
7920 * just a bootstrap of current_trace anyway.
7922 global_trace.current_trace = &nop_trace;
7924 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7926 ftrace_init_global_array_ops(&global_trace);
7928 init_trace_flags_index(&global_trace);
7930 register_tracer(&nop_trace);
7932 /* All seems OK, enable tracing */
7933 tracing_disabled = 0;
7935 atomic_notifier_chain_register(&panic_notifier_list,
7936 &trace_panic_notifier);
7938 register_die_notifier(&trace_die_notifier);
7940 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7942 INIT_LIST_HEAD(&global_trace.systems);
7943 INIT_LIST_HEAD(&global_trace.events);
7944 list_add(&global_trace.list, &ftrace_trace_arrays);
7946 apply_trace_boot_options();
7948 register_snapshot_cmd();
7953 free_saved_cmdlines_buffer(savedcmd);
7954 out_free_temp_buffer:
7955 ring_buffer_free(temp_buffer);
7957 free_cpumask_var(global_trace.tracing_cpumask);
7958 out_free_buffer_mask:
7959 free_cpumask_var(tracing_buffer_mask);
7964 void __init trace_init(void)
7966 if (tracepoint_printk) {
7967 tracepoint_print_iter =
7968 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7969 if (WARN_ON(!tracepoint_print_iter))
7970 tracepoint_printk = 0;
7972 tracer_alloc_buffers();
7976 __init static int clear_boot_tracer(void)
7979 * The default tracer at boot buffer is an init section.
7980 * This function is called in lateinit. If we did not
7981 * find the boot tracer, then clear it out, to prevent
7982 * later registration from accessing the buffer that is
7983 * about to be freed.
7985 if (!default_bootup_tracer)
7988 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7989 default_bootup_tracer);
7990 default_bootup_tracer = NULL;
7995 fs_initcall(tracer_init_tracefs);
7996 late_initcall(clear_boot_tracer);