1 // SPDX-License-Identifier: GPL-2.0
4 * Function graph tracer.
5 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
6 * Mostly borrowed from function tracer which
7 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
17 #include "trace_output.h"
19 /* When set, irq functions will be ignored */
20 static int ftrace_graph_skip_irqs;
22 struct fgraph_cpu_data {
27 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
31 struct fgraph_cpu_data __percpu *cpu_data;
33 /* Place to preserve last processed entry. */
34 struct ftrace_graph_ent_entry ent;
35 struct ftrace_graph_ret_entry ret;
40 #define TRACE_GRAPH_INDENT 2
42 unsigned int fgraph_max_depth;
44 static struct tracer_opt trace_opts[] = {
45 /* Display overruns? (for self-debug purpose) */
46 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
48 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
49 /* Display Overhead ? */
50 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
51 /* Display proc name/pid */
52 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
53 /* Display duration of execution */
54 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
55 /* Display absolute time of an entry */
56 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
57 /* Display interrupts */
58 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
59 /* Display function name after trailing } */
60 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
61 /* Include sleep time (scheduled out) between entry and return */
62 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
64 #ifdef CONFIG_FUNCTION_PROFILER
65 /* Include time within nested functions */
66 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
72 static struct tracer_flags tracer_flags = {
73 /* Don't display overruns, proc, or tail by default */
74 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
75 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
76 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
80 static struct trace_array *graph_array;
83 * DURATION column is being also used to display IRQ signs,
84 * following values are used by print_graph_irq and others
85 * to fill in space into DURATION column.
88 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
89 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
90 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
94 print_graph_duration(struct trace_array *tr, unsigned long long duration,
95 struct trace_seq *s, u32 flags);
97 int __trace_graph_entry(struct trace_array *tr,
98 struct ftrace_graph_ent *trace,
99 unsigned int trace_ctx)
101 struct trace_event_call *call = &event_funcgraph_entry;
102 struct ring_buffer_event *event;
103 struct trace_buffer *buffer = tr->array_buffer.buffer;
104 struct ftrace_graph_ent_entry *entry;
106 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
107 sizeof(*entry), trace_ctx);
110 entry = ring_buffer_event_data(event);
111 entry->graph_ent = *trace;
112 if (!call_filter_check_discard(call, entry, buffer, event))
113 trace_buffer_unlock_commit_nostack(buffer, event);
118 static inline int ftrace_graph_ignore_irqs(void)
120 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
126 int trace_graph_entry(struct ftrace_graph_ent *trace)
128 struct trace_array *tr = graph_array;
129 struct trace_array_cpu *data;
131 unsigned int trace_ctx;
136 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
140 * Do not trace a function if it's filtered by set_graph_notrace.
141 * Make the index of ret stack negative to indicate that it should
142 * ignore further functions. But it needs its own ret stack entry
143 * to recover the original index in order to continue tracing after
144 * returning from the function.
146 if (ftrace_graph_notrace_addr(trace->func)) {
147 trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
149 * Need to return 1 to have the return called
150 * that will clear the NOTRACE bit.
155 if (!ftrace_trace_task(tr))
158 if (ftrace_graph_ignore_func(trace))
161 if (ftrace_graph_ignore_irqs())
165 * Stop here if tracing_threshold is set. We only write function return
166 * events to the ring buffer.
171 local_irq_save(flags);
172 cpu = raw_smp_processor_id();
173 data = per_cpu_ptr(tr->array_buffer.data, cpu);
174 disabled = atomic_inc_return(&data->disabled);
175 if (likely(disabled == 1)) {
176 trace_ctx = tracing_gen_ctx_flags(flags);
177 ret = __trace_graph_entry(tr, trace, trace_ctx);
182 atomic_dec(&data->disabled);
183 local_irq_restore(flags);
189 __trace_graph_function(struct trace_array *tr,
190 unsigned long ip, unsigned int trace_ctx)
192 u64 time = trace_clock_local();
193 struct ftrace_graph_ent ent = {
197 struct ftrace_graph_ret ret = {
204 __trace_graph_entry(tr, &ent, trace_ctx);
205 __trace_graph_return(tr, &ret, trace_ctx);
209 trace_graph_function(struct trace_array *tr,
210 unsigned long ip, unsigned long parent_ip,
211 unsigned int trace_ctx)
213 __trace_graph_function(tr, ip, trace_ctx);
216 void __trace_graph_return(struct trace_array *tr,
217 struct ftrace_graph_ret *trace,
218 unsigned int trace_ctx)
220 struct trace_event_call *call = &event_funcgraph_exit;
221 struct ring_buffer_event *event;
222 struct trace_buffer *buffer = tr->array_buffer.buffer;
223 struct ftrace_graph_ret_entry *entry;
225 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
226 sizeof(*entry), trace_ctx);
229 entry = ring_buffer_event_data(event);
231 if (!call_filter_check_discard(call, entry, buffer, event))
232 trace_buffer_unlock_commit_nostack(buffer, event);
235 void trace_graph_return(struct ftrace_graph_ret *trace)
237 struct trace_array *tr = graph_array;
238 struct trace_array_cpu *data;
240 unsigned int trace_ctx;
244 ftrace_graph_addr_finish(trace);
246 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
247 trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
251 local_irq_save(flags);
252 cpu = raw_smp_processor_id();
253 data = per_cpu_ptr(tr->array_buffer.data, cpu);
254 disabled = atomic_inc_return(&data->disabled);
255 if (likely(disabled == 1)) {
256 trace_ctx = tracing_gen_ctx_flags(flags);
257 __trace_graph_return(tr, trace, trace_ctx);
259 atomic_dec(&data->disabled);
260 local_irq_restore(flags);
263 void set_graph_array(struct trace_array *tr)
267 /* Make graph_array visible before we start tracing */
272 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
274 ftrace_graph_addr_finish(trace);
276 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
277 trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
281 if (tracing_thresh &&
282 (trace->rettime - trace->calltime < tracing_thresh))
285 trace_graph_return(trace);
288 static struct fgraph_ops funcgraph_thresh_ops = {
289 .entryfunc = &trace_graph_entry,
290 .retfunc = &trace_graph_thresh_return,
293 static struct fgraph_ops funcgraph_ops = {
294 .entryfunc = &trace_graph_entry,
295 .retfunc = &trace_graph_return,
298 static int graph_trace_init(struct trace_array *tr)
304 ret = register_ftrace_graph(&funcgraph_thresh_ops);
306 ret = register_ftrace_graph(&funcgraph_ops);
309 tracing_start_cmdline_record();
314 static void graph_trace_reset(struct trace_array *tr)
316 tracing_stop_cmdline_record();
318 unregister_ftrace_graph(&funcgraph_thresh_ops);
320 unregister_ftrace_graph(&funcgraph_ops);
323 static int graph_trace_update_thresh(struct trace_array *tr)
325 graph_trace_reset(tr);
326 return graph_trace_init(tr);
329 static int max_bytes_for_cpu;
331 static void print_graph_cpu(struct trace_seq *s, int cpu)
334 * Start with a space character - to make it stand out
335 * to the right a bit when trace output is pasted into
338 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
341 #define TRACE_GRAPH_PROCINFO_LENGTH 14
343 static void print_graph_proc(struct trace_seq *s, pid_t pid)
345 char comm[TASK_COMM_LEN];
346 /* sign + log10(MAX_INT) + '\0' */
352 trace_find_cmdline(pid, comm);
354 sprintf(pid_str, "%d", pid);
356 /* 1 stands for the "-" character */
357 len = strlen(comm) + strlen(pid_str) + 1;
359 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
360 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
362 /* First spaces to align center */
363 for (i = 0; i < spaces / 2; i++)
364 trace_seq_putc(s, ' ');
366 trace_seq_printf(s, "%s-%s", comm, pid_str);
368 /* Last spaces to align center */
369 for (i = 0; i < spaces - (spaces / 2); i++)
370 trace_seq_putc(s, ' ');
374 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
376 trace_seq_putc(s, ' ');
377 trace_print_lat_fmt(s, entry);
378 trace_seq_puts(s, " | ");
381 /* If the pid changed since the last trace, output this event */
383 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
391 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
393 if (*last_pid == pid)
396 prev_pid = *last_pid;
402 * Context-switch trace line:
404 ------------------------------------------
405 | 1) migration/0--1 => sshd-1755
406 ------------------------------------------
409 trace_seq_puts(s, " ------------------------------------------\n");
410 print_graph_cpu(s, cpu);
411 print_graph_proc(s, prev_pid);
412 trace_seq_puts(s, " => ");
413 print_graph_proc(s, pid);
414 trace_seq_puts(s, "\n ------------------------------------------\n\n");
417 static struct ftrace_graph_ret_entry *
418 get_return_for_leaf(struct trace_iterator *iter,
419 struct ftrace_graph_ent_entry *curr)
421 struct fgraph_data *data = iter->private;
422 struct ring_buffer_iter *ring_iter = NULL;
423 struct ring_buffer_event *event;
424 struct ftrace_graph_ret_entry *next;
427 * If the previous output failed to write to the seq buffer,
428 * then we just reuse the data from before.
430 if (data && data->failed) {
435 ring_iter = trace_buffer_iter(iter, iter->cpu);
437 /* First peek to compare current entry and the next one */
439 event = ring_buffer_iter_peek(ring_iter, NULL);
442 * We need to consume the current entry to see
445 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
447 event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
454 next = ring_buffer_event_data(event);
458 * Save current and next entries for later reference
459 * if the output fails.
463 * If the next event is not a return type, then
464 * we only care about what type it is. Otherwise we can
465 * safely copy the entire event.
467 if (next->ent.type == TRACE_GRAPH_RET)
470 data->ret.ent.type = next->ent.type;
474 if (next->ent.type != TRACE_GRAPH_RET)
477 if (curr->ent.pid != next->ent.pid ||
478 curr->graph_ent.func != next->ret.func)
481 /* this is a leaf, now advance the iterator */
483 ring_buffer_iter_advance(ring_iter);
488 static void print_graph_abs_time(u64 t, struct trace_seq *s)
490 unsigned long usecs_rem;
492 usecs_rem = do_div(t, NSEC_PER_SEC);
495 trace_seq_printf(s, "%5lu.%06lu | ",
496 (unsigned long)t, usecs_rem);
500 print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
502 unsigned long long usecs;
504 usecs = iter->ts - iter->array_buffer->time_start;
505 do_div(usecs, NSEC_PER_USEC);
507 trace_seq_printf(s, "%9llu us | ", usecs);
511 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
512 enum trace_type type, int cpu, pid_t pid, u32 flags)
514 struct trace_array *tr = iter->tr;
515 struct trace_seq *s = &iter->seq;
516 struct trace_entry *ent = iter->ent;
518 if (addr < (unsigned long)__irqentry_text_start ||
519 addr >= (unsigned long)__irqentry_text_end)
522 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
524 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
525 print_graph_abs_time(iter->ts, s);
528 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
529 print_graph_rel_time(iter, s);
532 if (flags & TRACE_GRAPH_PRINT_CPU)
533 print_graph_cpu(s, cpu);
536 if (flags & TRACE_GRAPH_PRINT_PROC) {
537 print_graph_proc(s, pid);
538 trace_seq_puts(s, " | ");
542 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
543 print_graph_lat_fmt(s, ent);
547 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
549 if (type == TRACE_GRAPH_ENT)
550 trace_seq_puts(s, "==========>");
552 trace_seq_puts(s, "<==========");
554 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
555 trace_seq_putc(s, '\n');
559 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
561 unsigned long nsecs_rem = do_div(duration, 1000);
562 /* log10(ULONG_MAX) + '\0' */
568 sprintf(usecs_str, "%lu", (unsigned long) duration);
571 trace_seq_printf(s, "%s", usecs_str);
573 len = strlen(usecs_str);
575 /* Print nsecs (we don't want to exceed 7 numbers) */
577 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
579 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
580 trace_seq_printf(s, ".%s", nsecs_str);
581 len += strlen(nsecs_str) + 1;
584 trace_seq_puts(s, " us ");
586 /* Print remaining spaces to fit the row's width */
587 for (i = len; i < 8; i++)
588 trace_seq_putc(s, ' ');
592 print_graph_duration(struct trace_array *tr, unsigned long long duration,
593 struct trace_seq *s, u32 flags)
595 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
596 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
599 /* No real adata, just filling the column with spaces */
600 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
601 case FLAGS_FILL_FULL:
602 trace_seq_puts(s, " | ");
604 case FLAGS_FILL_START:
605 trace_seq_puts(s, " ");
608 trace_seq_puts(s, " |");
612 /* Signal a overhead of time execution to the output */
613 if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
614 trace_seq_printf(s, "%c ", trace_find_mark(duration));
616 trace_seq_puts(s, " ");
618 trace_print_graph_duration(duration, s);
619 trace_seq_puts(s, "| ");
622 /* Case of a leaf function on its call entry */
623 static enum print_line_t
624 print_graph_entry_leaf(struct trace_iterator *iter,
625 struct ftrace_graph_ent_entry *entry,
626 struct ftrace_graph_ret_entry *ret_entry,
627 struct trace_seq *s, u32 flags)
629 struct fgraph_data *data = iter->private;
630 struct trace_array *tr = iter->tr;
631 struct ftrace_graph_ret *graph_ret;
632 struct ftrace_graph_ent *call;
633 unsigned long long duration;
637 graph_ret = &ret_entry->ret;
638 call = &entry->graph_ent;
639 duration = graph_ret->rettime - graph_ret->calltime;
642 struct fgraph_cpu_data *cpu_data;
644 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
647 * Comments display at + 1 to depth. Since
648 * this is a leaf function, keep the comments
649 * equal to this depth.
651 cpu_data->depth = call->depth - 1;
653 /* No need to keep this function around for this depth */
654 if (call->depth < FTRACE_RETFUNC_DEPTH &&
655 !WARN_ON_ONCE(call->depth < 0))
656 cpu_data->enter_funcs[call->depth] = 0;
659 /* Overhead and duration */
660 print_graph_duration(tr, duration, s, flags);
663 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
664 trace_seq_putc(s, ' ');
666 trace_seq_printf(s, "%ps();\n", (void *)call->func);
668 print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
669 cpu, iter->ent->pid, flags);
671 return trace_handle_return(s);
674 static enum print_line_t
675 print_graph_entry_nested(struct trace_iterator *iter,
676 struct ftrace_graph_ent_entry *entry,
677 struct trace_seq *s, int cpu, u32 flags)
679 struct ftrace_graph_ent *call = &entry->graph_ent;
680 struct fgraph_data *data = iter->private;
681 struct trace_array *tr = iter->tr;
685 struct fgraph_cpu_data *cpu_data;
688 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
689 cpu_data->depth = call->depth;
691 /* Save this function pointer to see if the exit matches */
692 if (call->depth < FTRACE_RETFUNC_DEPTH &&
693 !WARN_ON_ONCE(call->depth < 0))
694 cpu_data->enter_funcs[call->depth] = call->func;
698 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
701 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
702 trace_seq_putc(s, ' ');
704 trace_seq_printf(s, "%ps() {\n", (void *)call->func);
706 if (trace_seq_has_overflowed(s))
707 return TRACE_TYPE_PARTIAL_LINE;
710 * we already consumed the current entry to check the next one
711 * and see if this is a leaf.
713 return TRACE_TYPE_NO_CONSUME;
717 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
718 int type, unsigned long addr, u32 flags)
720 struct fgraph_data *data = iter->private;
721 struct trace_entry *ent = iter->ent;
722 struct trace_array *tr = iter->tr;
726 verif_pid(s, ent->pid, cpu, data);
730 print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
732 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
736 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
737 print_graph_abs_time(iter->ts, s);
740 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
741 print_graph_rel_time(iter, s);
744 if (flags & TRACE_GRAPH_PRINT_CPU)
745 print_graph_cpu(s, cpu);
748 if (flags & TRACE_GRAPH_PRINT_PROC) {
749 print_graph_proc(s, ent->pid);
750 trace_seq_puts(s, " | ");
754 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
755 print_graph_lat_fmt(s, ent);
761 * Entry check for irq code
764 * - we are inside irq code
765 * - we just entered irq code
768 * - funcgraph-interrupts option is set
769 * - we are not inside irq code
772 check_irq_entry(struct trace_iterator *iter, u32 flags,
773 unsigned long addr, int depth)
777 struct fgraph_data *data = iter->private;
780 * If we are either displaying irqs, or we got called as
781 * a graph event and private data does not exist,
782 * then we bypass the irq check.
784 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
788 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
791 * We are inside the irq code
796 if ((addr < (unsigned long)__irqentry_text_start) ||
797 (addr >= (unsigned long)__irqentry_text_end))
801 * We are entering irq code.
808 * Return check for irq code
811 * - we are inside irq code
812 * - we just left irq code
815 * - funcgraph-interrupts option is set
816 * - we are not inside irq code
819 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
823 struct fgraph_data *data = iter->private;
826 * If we are either displaying irqs, or we got called as
827 * a graph event and private data does not exist,
828 * then we bypass the irq check.
830 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
834 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
837 * We are not inside the irq code.
839 if (*depth_irq == -1)
843 * We are inside the irq code, and this is returning entry.
844 * Let's not trace it and clear the entry depth, since
845 * we are out of irq code.
847 * This condition ensures that we 'leave the irq code' once
848 * we are out of the entry depth. Thus protecting us from
849 * the RETURN entry loss.
851 if (*depth_irq >= depth) {
857 * We are inside the irq code, and this is not the entry.
862 static enum print_line_t
863 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
864 struct trace_iterator *iter, u32 flags)
866 struct fgraph_data *data = iter->private;
867 struct ftrace_graph_ent *call = &field->graph_ent;
868 struct ftrace_graph_ret_entry *leaf_ret;
869 static enum print_line_t ret;
872 if (check_irq_entry(iter, flags, call->func, call->depth))
873 return TRACE_TYPE_HANDLED;
875 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
877 leaf_ret = get_return_for_leaf(iter, field);
879 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
881 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
885 * If we failed to write our output, then we need to make
886 * note of it. Because we already consumed our entry.
898 static enum print_line_t
899 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
900 struct trace_entry *ent, struct trace_iterator *iter,
903 unsigned long long duration = trace->rettime - trace->calltime;
904 struct fgraph_data *data = iter->private;
905 struct trace_array *tr = iter->tr;
906 pid_t pid = ent->pid;
911 if (check_irq_return(iter, flags, trace->depth))
912 return TRACE_TYPE_HANDLED;
915 struct fgraph_cpu_data *cpu_data;
918 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
921 * Comments display at + 1 to depth. This is the
922 * return from a function, we now want the comments
923 * to display at the same level of the bracket.
925 cpu_data->depth = trace->depth - 1;
927 if (trace->depth < FTRACE_RETFUNC_DEPTH &&
928 !WARN_ON_ONCE(trace->depth < 0)) {
929 if (cpu_data->enter_funcs[trace->depth] != trace->func)
931 cpu_data->enter_funcs[trace->depth] = 0;
935 print_graph_prologue(iter, s, 0, 0, flags);
937 /* Overhead and duration */
938 print_graph_duration(tr, duration, s, flags);
941 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
942 trace_seq_putc(s, ' ');
945 * If the return function does not have a matching entry,
946 * then the entry was lost. Instead of just printing
947 * the '}' and letting the user guess what function this
948 * belongs to, write out the function name. Always do
949 * that if the funcgraph-tail option is enabled.
951 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
952 trace_seq_puts(s, "}\n");
954 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
957 if (flags & TRACE_GRAPH_PRINT_OVERRUN)
958 trace_seq_printf(s, " (Overruns: %u)\n",
961 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
964 return trace_handle_return(s);
967 static enum print_line_t
968 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
969 struct trace_iterator *iter, u32 flags)
971 struct trace_array *tr = iter->tr;
972 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
973 struct fgraph_data *data = iter->private;
974 struct trace_event *event;
980 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
982 print_graph_prologue(iter, s, 0, 0, flags);
985 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
989 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
990 trace_seq_putc(s, ' ');
993 trace_seq_puts(s, "/* ");
995 switch (iter->ent->type) {
997 ret = trace_print_bputs_msg_only(iter);
998 if (ret != TRACE_TYPE_HANDLED)
1002 ret = trace_print_bprintk_msg_only(iter);
1003 if (ret != TRACE_TYPE_HANDLED)
1007 ret = trace_print_printk_msg_only(iter);
1008 if (ret != TRACE_TYPE_HANDLED)
1012 event = ftrace_find_event(ent->type);
1014 return TRACE_TYPE_UNHANDLED;
1016 ret = event->funcs->trace(iter, sym_flags, event);
1017 if (ret != TRACE_TYPE_HANDLED)
1021 if (trace_seq_has_overflowed(s))
1024 /* Strip ending newline */
1025 if (s->buffer[s->seq.len - 1] == '\n') {
1026 s->buffer[s->seq.len - 1] = '\0';
1030 trace_seq_puts(s, " */\n");
1032 return trace_handle_return(s);
1037 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1039 struct ftrace_graph_ent_entry *field;
1040 struct fgraph_data *data = iter->private;
1041 struct trace_entry *entry = iter->ent;
1042 struct trace_seq *s = &iter->seq;
1043 int cpu = iter->cpu;
1046 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1047 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1048 return TRACE_TYPE_HANDLED;
1052 * If the last output failed, there's a possibility we need
1053 * to print out the missing entry which would never go out.
1055 if (data && data->failed) {
1057 iter->cpu = data->cpu;
1058 ret = print_graph_entry(field, s, iter, flags);
1059 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1060 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1061 ret = TRACE_TYPE_NO_CONSUME;
1067 switch (entry->type) {
1068 case TRACE_GRAPH_ENT: {
1070 * print_graph_entry() may consume the current event,
1071 * thus @field may become invalid, so we need to save it.
1072 * sizeof(struct ftrace_graph_ent_entry) is very small,
1073 * it can be safely saved at the stack.
1075 struct ftrace_graph_ent_entry saved;
1076 trace_assign_type(field, entry);
1078 return print_graph_entry(&saved, s, iter, flags);
1080 case TRACE_GRAPH_RET: {
1081 struct ftrace_graph_ret_entry *field;
1082 trace_assign_type(field, entry);
1083 return print_graph_return(&field->ret, s, entry, iter, flags);
1087 /* dont trace stack and functions as comments */
1088 return TRACE_TYPE_UNHANDLED;
1091 return print_graph_comment(s, entry, iter, flags);
1094 return TRACE_TYPE_HANDLED;
1097 static enum print_line_t
1098 print_graph_function(struct trace_iterator *iter)
1100 return print_graph_function_flags(iter, tracer_flags.val);
1103 static enum print_line_t
1104 print_graph_function_event(struct trace_iterator *iter, int flags,
1105 struct trace_event *event)
1107 return print_graph_function(iter);
1110 static void print_lat_header(struct seq_file *s, u32 flags)
1112 static const char spaces[] = " " /* 16 spaces */
1114 " "; /* 17 spaces */
1117 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1119 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1121 if (flags & TRACE_GRAPH_PRINT_CPU)
1123 if (flags & TRACE_GRAPH_PRINT_PROC)
1126 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1127 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1128 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1129 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1130 seq_printf(s, "#%.*s||| / \n", size, spaces);
1133 static void __print_graph_headers_flags(struct trace_array *tr,
1134 struct seq_file *s, u32 flags)
1136 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1139 print_lat_header(s, flags);
1143 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1144 seq_puts(s, " TIME ");
1145 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1146 seq_puts(s, " REL TIME ");
1147 if (flags & TRACE_GRAPH_PRINT_CPU)
1148 seq_puts(s, " CPU");
1149 if (flags & TRACE_GRAPH_PRINT_PROC)
1150 seq_puts(s, " TASK/PID ");
1152 seq_puts(s, "|||| ");
1153 if (flags & TRACE_GRAPH_PRINT_DURATION)
1154 seq_puts(s, " DURATION ");
1155 seq_puts(s, " FUNCTION CALLS\n");
1159 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1161 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1163 if (flags & TRACE_GRAPH_PRINT_CPU)
1165 if (flags & TRACE_GRAPH_PRINT_PROC)
1166 seq_puts(s, " | | ");
1168 seq_puts(s, "|||| ");
1169 if (flags & TRACE_GRAPH_PRINT_DURATION)
1170 seq_puts(s, " | | ");
1171 seq_puts(s, " | | | |\n");
1174 static void print_graph_headers(struct seq_file *s)
1176 print_graph_headers_flags(s, tracer_flags.val);
1179 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1181 struct trace_iterator *iter = s->private;
1182 struct trace_array *tr = iter->tr;
1184 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1187 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1188 /* print nothing if the buffers are empty */
1189 if (trace_empty(iter))
1192 print_trace_header(s, iter);
1195 __print_graph_headers_flags(tr, s, flags);
1198 void graph_trace_open(struct trace_iterator *iter)
1200 /* pid and depth on the last trace processed */
1201 struct fgraph_data *data;
1205 iter->private = NULL;
1207 /* We can be called in atomic context via ftrace_dump() */
1208 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1210 data = kzalloc(sizeof(*data), gfpflags);
1214 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1215 if (!data->cpu_data)
1218 for_each_possible_cpu(cpu) {
1219 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1220 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1221 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1222 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1230 iter->private = data;
1237 pr_warn("function graph tracer: not enough memory\n");
1240 void graph_trace_close(struct trace_iterator *iter)
1242 struct fgraph_data *data = iter->private;
1245 free_percpu(data->cpu_data);
1251 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1253 if (bit == TRACE_GRAPH_PRINT_IRQS)
1254 ftrace_graph_skip_irqs = !set;
1256 if (bit == TRACE_GRAPH_SLEEP_TIME)
1257 ftrace_graph_sleep_time_control(set);
1259 if (bit == TRACE_GRAPH_GRAPH_TIME)
1260 ftrace_graph_graph_time_control(set);
1265 static struct trace_event_functions graph_functions = {
1266 .trace = print_graph_function_event,
1269 static struct trace_event graph_trace_entry_event = {
1270 .type = TRACE_GRAPH_ENT,
1271 .funcs = &graph_functions,
1274 static struct trace_event graph_trace_ret_event = {
1275 .type = TRACE_GRAPH_RET,
1276 .funcs = &graph_functions
1279 static struct tracer graph_trace __tracer_data = {
1280 .name = "function_graph",
1281 .update_thresh = graph_trace_update_thresh,
1282 .open = graph_trace_open,
1283 .pipe_open = graph_trace_open,
1284 .close = graph_trace_close,
1285 .pipe_close = graph_trace_close,
1286 .init = graph_trace_init,
1287 .reset = graph_trace_reset,
1288 .print_line = print_graph_function,
1289 .print_header = print_graph_headers,
1290 .flags = &tracer_flags,
1291 .set_flag = func_graph_set_flag,
1292 #ifdef CONFIG_FTRACE_SELFTEST
1293 .selftest = trace_selftest_startup_function_graph,
1299 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1305 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1309 fgraph_max_depth = val;
1317 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1320 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1323 n = sprintf(buf, "%d\n", fgraph_max_depth);
1325 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1328 static const struct file_operations graph_depth_fops = {
1329 .open = tracing_open_generic,
1330 .write = graph_depth_write,
1331 .read = graph_depth_read,
1332 .llseek = generic_file_llseek,
1335 static __init int init_graph_tracefs(void)
1339 ret = tracing_init_dentry();
1343 trace_create_file("max_graph_depth", TRACE_MODE_WRITE, NULL,
1344 NULL, &graph_depth_fops);
1348 fs_initcall(init_graph_tracefs);
1350 static __init int init_graph_trace(void)
1352 max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1354 if (!register_trace_event(&graph_trace_entry_event)) {
1355 pr_warn("Warning: could not register graph trace events\n");
1359 if (!register_trace_event(&graph_trace_ret_event)) {
1360 pr_warn("Warning: could not register graph trace events\n");
1364 return register_tracer(&graph_trace);
1367 core_initcall(init_graph_trace);