arm64/ptrace: Flush FP state when setting ZT0
[platform/kernel/linux-starfive.git] / kernel / trace / fgraph.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Infrastructure to took into function calls and returns.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  * Highly modified by Steven Rostedt (VMware).
9  */
10 #include <linux/jump_label.h>
11 #include <linux/suspend.h>
12 #include <linux/ftrace.h>
13 #include <linux/slab.h>
14
15 #include <trace/events/sched.h>
16
17 #include "ftrace_internal.h"
18
19 #ifdef CONFIG_DYNAMIC_FTRACE
20 #define ASSIGN_OPS_HASH(opsname, val) \
21         .func_hash              = val, \
22         .local_hash.regex_lock  = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
23 #else
24 #define ASSIGN_OPS_HASH(opsname, val)
25 #endif
26
27 DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph);
28 int ftrace_graph_active;
29
30 /* Both enabled by default (can be cleared by function_graph tracer flags */
31 static bool fgraph_sleep_time = true;
32
33 #ifdef CONFIG_DYNAMIC_FTRACE
34 /*
35  * archs can override this function if they must do something
36  * to enable hook for graph tracer.
37  */
38 int __weak ftrace_enable_ftrace_graph_caller(void)
39 {
40         return 0;
41 }
42
43 /*
44  * archs can override this function if they must do something
45  * to disable hook for graph tracer.
46  */
47 int __weak ftrace_disable_ftrace_graph_caller(void)
48 {
49         return 0;
50 }
51 #endif
52
53 /**
54  * ftrace_graph_stop - set to permanently disable function graph tracing
55  *
56  * In case of an error int function graph tracing, this is called
57  * to try to keep function graph tracing from causing any more harm.
58  * Usually this is pretty severe and this is called to try to at least
59  * get a warning out to the user.
60  */
61 void ftrace_graph_stop(void)
62 {
63         static_branch_enable(&kill_ftrace_graph);
64 }
65
66 /* Add a function return address to the trace stack on thread info.*/
67 static int
68 ftrace_push_return_trace(unsigned long ret, unsigned long func,
69                          unsigned long frame_pointer, unsigned long *retp)
70 {
71         unsigned long long calltime;
72         int index;
73
74         if (unlikely(ftrace_graph_is_dead()))
75                 return -EBUSY;
76
77         if (!current->ret_stack)
78                 return -EBUSY;
79
80         /*
81          * We must make sure the ret_stack is tested before we read
82          * anything else.
83          */
84         smp_rmb();
85
86         /* The return trace stack is full */
87         if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
88                 atomic_inc(&current->trace_overrun);
89                 return -EBUSY;
90         }
91
92         calltime = trace_clock_local();
93
94         index = ++current->curr_ret_stack;
95         barrier();
96         current->ret_stack[index].ret = ret;
97         current->ret_stack[index].func = func;
98         current->ret_stack[index].calltime = calltime;
99 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
100         current->ret_stack[index].fp = frame_pointer;
101 #endif
102 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
103         current->ret_stack[index].retp = retp;
104 #endif
105         return 0;
106 }
107
108 /*
109  * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct
110  * functions. But those archs currently don't support direct functions
111  * anyway, and ftrace_find_rec_direct() is just a stub for them.
112  * Define MCOUNT_INSN_SIZE to keep those archs compiling.
113  */
114 #ifndef MCOUNT_INSN_SIZE
115 /* Make sure this only works without direct calls */
116 # ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
117 #  error MCOUNT_INSN_SIZE not defined with direct calls enabled
118 # endif
119 # define MCOUNT_INSN_SIZE 0
120 #endif
121
122 int function_graph_enter(unsigned long ret, unsigned long func,
123                          unsigned long frame_pointer, unsigned long *retp)
124 {
125         struct ftrace_graph_ent trace;
126
127 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
128         /*
129          * Skip graph tracing if the return location is served by direct trampoline,
130          * since call sequence and return addresses are unpredictable anyway.
131          * Ex: BPF trampoline may call original function and may skip frame
132          * depending on type of BPF programs attached.
133          */
134         if (ftrace_direct_func_count &&
135             ftrace_find_rec_direct(ret - MCOUNT_INSN_SIZE))
136                 return -EBUSY;
137 #endif
138         trace.func = func;
139         trace.depth = ++current->curr_ret_depth;
140
141         if (ftrace_push_return_trace(ret, func, frame_pointer, retp))
142                 goto out;
143
144         /* Only trace if the calling function expects to */
145         if (!ftrace_graph_entry(&trace))
146                 goto out_ret;
147
148         return 0;
149  out_ret:
150         current->curr_ret_stack--;
151  out:
152         current->curr_ret_depth--;
153         return -EBUSY;
154 }
155
156 /* Retrieve a function return address to the trace stack on thread info.*/
157 static void
158 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
159                         unsigned long frame_pointer)
160 {
161         int index;
162
163         index = current->curr_ret_stack;
164
165         if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
166                 ftrace_graph_stop();
167                 WARN_ON(1);
168                 /* Might as well panic, otherwise we have no where to go */
169                 *ret = (unsigned long)panic;
170                 return;
171         }
172
173 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
174         /*
175          * The arch may choose to record the frame pointer used
176          * and check it here to make sure that it is what we expect it
177          * to be. If gcc does not set the place holder of the return
178          * address in the frame pointer, and does a copy instead, then
179          * the function graph trace will fail. This test detects this
180          * case.
181          *
182          * Currently, x86_32 with optimize for size (-Os) makes the latest
183          * gcc do the above.
184          *
185          * Note, -mfentry does not use frame pointers, and this test
186          *  is not needed if CC_USING_FENTRY is set.
187          */
188         if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
189                 ftrace_graph_stop();
190                 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
191                      "  from func %ps return to %lx\n",
192                      current->ret_stack[index].fp,
193                      frame_pointer,
194                      (void *)current->ret_stack[index].func,
195                      current->ret_stack[index].ret);
196                 *ret = (unsigned long)panic;
197                 return;
198         }
199 #endif
200
201         *ret = current->ret_stack[index].ret;
202         trace->func = current->ret_stack[index].func;
203         trace->calltime = current->ret_stack[index].calltime;
204         trace->overrun = atomic_read(&current->trace_overrun);
205         trace->depth = current->curr_ret_depth--;
206         /*
207          * We still want to trace interrupts coming in if
208          * max_depth is set to 1. Make sure the decrement is
209          * seen before ftrace_graph_return.
210          */
211         barrier();
212 }
213
214 /*
215  * Hibernation protection.
216  * The state of the current task is too much unstable during
217  * suspend/restore to disk. We want to protect against that.
218  */
219 static int
220 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
221                                                         void *unused)
222 {
223         switch (state) {
224         case PM_HIBERNATION_PREPARE:
225                 pause_graph_tracing();
226                 break;
227
228         case PM_POST_HIBERNATION:
229                 unpause_graph_tracing();
230                 break;
231         }
232         return NOTIFY_DONE;
233 }
234
235 static struct notifier_block ftrace_suspend_notifier = {
236         .notifier_call = ftrace_suspend_notifier_call,
237 };
238
239 /* fgraph_ret_regs is not defined without CONFIG_FUNCTION_GRAPH_RETVAL */
240 struct fgraph_ret_regs;
241
242 /*
243  * Send the trace to the ring-buffer.
244  * @return the original return address.
245  */
246 static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs,
247                                                 unsigned long frame_pointer)
248 {
249         struct ftrace_graph_ret trace;
250         unsigned long ret;
251
252         ftrace_pop_return_trace(&trace, &ret, frame_pointer);
253 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
254         trace.retval = fgraph_ret_regs_return_value(ret_regs);
255 #endif
256         trace.rettime = trace_clock_local();
257         ftrace_graph_return(&trace);
258         /*
259          * The ftrace_graph_return() may still access the current
260          * ret_stack structure, we need to make sure the update of
261          * curr_ret_stack is after that.
262          */
263         barrier();
264         current->curr_ret_stack--;
265
266         if (unlikely(!ret)) {
267                 ftrace_graph_stop();
268                 WARN_ON(1);
269                 /* Might as well panic. What else to do? */
270                 ret = (unsigned long)panic;
271         }
272
273         return ret;
274 }
275
276 /*
277  * After all architecures have selected HAVE_FUNCTION_GRAPH_RETVAL, we can
278  * leave only ftrace_return_to_handler(ret_regs).
279  */
280 #ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
281 unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs)
282 {
283         return __ftrace_return_to_handler(ret_regs,
284                                 fgraph_ret_regs_frame_pointer(ret_regs));
285 }
286 #else
287 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
288 {
289         return __ftrace_return_to_handler(NULL, frame_pointer);
290 }
291 #endif
292
293 /**
294  * ftrace_graph_get_ret_stack - return the entry of the shadow stack
295  * @task: The task to read the shadow stack from
296  * @idx: Index down the shadow stack
297  *
298  * Return the ret_struct on the shadow stack of the @task at the
299  * call graph at @idx starting with zero. If @idx is zero, it
300  * will return the last saved ret_stack entry. If it is greater than
301  * zero, it will return the corresponding ret_stack for the depth
302  * of saved return addresses.
303  */
304 struct ftrace_ret_stack *
305 ftrace_graph_get_ret_stack(struct task_struct *task, int idx)
306 {
307         idx = task->curr_ret_stack - idx;
308
309         if (idx >= 0 && idx <= task->curr_ret_stack)
310                 return &task->ret_stack[idx];
311
312         return NULL;
313 }
314
315 /**
316  * ftrace_graph_ret_addr - convert a potentially modified stack return address
317  *                         to its original value
318  *
319  * This function can be called by stack unwinding code to convert a found stack
320  * return address ('ret') to its original value, in case the function graph
321  * tracer has modified it to be 'return_to_handler'.  If the address hasn't
322  * been modified, the unchanged value of 'ret' is returned.
323  *
324  * 'idx' is a state variable which should be initialized by the caller to zero
325  * before the first call.
326  *
327  * 'retp' is a pointer to the return address on the stack.  It's ignored if
328  * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
329  */
330 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
331 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
332                                     unsigned long ret, unsigned long *retp)
333 {
334         int index = task->curr_ret_stack;
335         int i;
336
337         if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
338                 return ret;
339
340         if (index < 0)
341                 return ret;
342
343         for (i = 0; i <= index; i++)
344                 if (task->ret_stack[i].retp == retp)
345                         return task->ret_stack[i].ret;
346
347         return ret;
348 }
349 #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
350 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
351                                     unsigned long ret, unsigned long *retp)
352 {
353         int task_idx;
354
355         if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
356                 return ret;
357
358         task_idx = task->curr_ret_stack;
359
360         if (!task->ret_stack || task_idx < *idx)
361                 return ret;
362
363         task_idx -= *idx;
364         (*idx)++;
365
366         return task->ret_stack[task_idx].ret;
367 }
368 #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
369
370 static struct ftrace_ops graph_ops = {
371         .func                   = ftrace_graph_func,
372         .flags                  = FTRACE_OPS_FL_INITIALIZED |
373                                    FTRACE_OPS_FL_PID |
374                                    FTRACE_OPS_GRAPH_STUB,
375 #ifdef FTRACE_GRAPH_TRAMP_ADDR
376         .trampoline             = FTRACE_GRAPH_TRAMP_ADDR,
377         /* trampoline_size is only needed for dynamically allocated tramps */
378 #endif
379         ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
380 };
381
382 void ftrace_graph_sleep_time_control(bool enable)
383 {
384         fgraph_sleep_time = enable;
385 }
386
387 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
388 {
389         return 0;
390 }
391
392 /*
393  * Simply points to ftrace_stub, but with the proper protocol.
394  * Defined by the linker script in linux/vmlinux.lds.h
395  */
396 extern void ftrace_stub_graph(struct ftrace_graph_ret *);
397
398 /* The callbacks that hook a function */
399 trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph;
400 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
401 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
402
403 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
404 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
405 {
406         int i;
407         int ret = 0;
408         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
409         struct task_struct *g, *t;
410
411         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
412                 ret_stack_list[i] =
413                         kmalloc_array(FTRACE_RETFUNC_DEPTH,
414                                       sizeof(struct ftrace_ret_stack),
415                                       GFP_KERNEL);
416                 if (!ret_stack_list[i]) {
417                         start = 0;
418                         end = i;
419                         ret = -ENOMEM;
420                         goto free;
421                 }
422         }
423
424         rcu_read_lock();
425         for_each_process_thread(g, t) {
426                 if (start == end) {
427                         ret = -EAGAIN;
428                         goto unlock;
429                 }
430
431                 if (t->ret_stack == NULL) {
432                         atomic_set(&t->trace_overrun, 0);
433                         t->curr_ret_stack = -1;
434                         t->curr_ret_depth = -1;
435                         /* Make sure the tasks see the -1 first: */
436                         smp_wmb();
437                         t->ret_stack = ret_stack_list[start++];
438                 }
439         }
440
441 unlock:
442         rcu_read_unlock();
443 free:
444         for (i = start; i < end; i++)
445                 kfree(ret_stack_list[i]);
446         return ret;
447 }
448
449 static void
450 ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
451                                 struct task_struct *prev,
452                                 struct task_struct *next,
453                                 unsigned int prev_state)
454 {
455         unsigned long long timestamp;
456         int index;
457
458         /*
459          * Does the user want to count the time a function was asleep.
460          * If so, do not update the time stamps.
461          */
462         if (fgraph_sleep_time)
463                 return;
464
465         timestamp = trace_clock_local();
466
467         prev->ftrace_timestamp = timestamp;
468
469         /* only process tasks that we timestamped */
470         if (!next->ftrace_timestamp)
471                 return;
472
473         /*
474          * Update all the counters in next to make up for the
475          * time next was sleeping.
476          */
477         timestamp -= next->ftrace_timestamp;
478
479         for (index = next->curr_ret_stack; index >= 0; index--)
480                 next->ret_stack[index].calltime += timestamp;
481 }
482
483 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
484 {
485         if (!ftrace_ops_test(&global_ops, trace->func, NULL))
486                 return 0;
487         return __ftrace_graph_entry(trace);
488 }
489
490 /*
491  * The function graph tracer should only trace the functions defined
492  * by set_ftrace_filter and set_ftrace_notrace. If another function
493  * tracer ops is registered, the graph tracer requires testing the
494  * function against the global ops, and not just trace any function
495  * that any ftrace_ops registered.
496  */
497 void update_function_graph_func(void)
498 {
499         struct ftrace_ops *op;
500         bool do_test = false;
501
502         /*
503          * The graph and global ops share the same set of functions
504          * to test. If any other ops is on the list, then
505          * the graph tracing needs to test if its the function
506          * it should call.
507          */
508         do_for_each_ftrace_op(op, ftrace_ops_list) {
509                 if (op != &global_ops && op != &graph_ops &&
510                     op != &ftrace_list_end) {
511                         do_test = true;
512                         /* in double loop, break out with goto */
513                         goto out;
514                 }
515         } while_for_each_ftrace_op(op);
516  out:
517         if (do_test)
518                 ftrace_graph_entry = ftrace_graph_entry_test;
519         else
520                 ftrace_graph_entry = __ftrace_graph_entry;
521 }
522
523 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
524
525 static void
526 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
527 {
528         atomic_set(&t->trace_overrun, 0);
529         t->ftrace_timestamp = 0;
530         /* make curr_ret_stack visible before we add the ret_stack */
531         smp_wmb();
532         t->ret_stack = ret_stack;
533 }
534
535 /*
536  * Allocate a return stack for the idle task. May be the first
537  * time through, or it may be done by CPU hotplug online.
538  */
539 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
540 {
541         t->curr_ret_stack = -1;
542         t->curr_ret_depth = -1;
543         /*
544          * The idle task has no parent, it either has its own
545          * stack or no stack at all.
546          */
547         if (t->ret_stack)
548                 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
549
550         if (ftrace_graph_active) {
551                 struct ftrace_ret_stack *ret_stack;
552
553                 ret_stack = per_cpu(idle_ret_stack, cpu);
554                 if (!ret_stack) {
555                         ret_stack =
556                                 kmalloc_array(FTRACE_RETFUNC_DEPTH,
557                                               sizeof(struct ftrace_ret_stack),
558                                               GFP_KERNEL);
559                         if (!ret_stack)
560                                 return;
561                         per_cpu(idle_ret_stack, cpu) = ret_stack;
562                 }
563                 graph_init_task(t, ret_stack);
564         }
565 }
566
567 /* Allocate a return stack for newly created task */
568 void ftrace_graph_init_task(struct task_struct *t)
569 {
570         /* Make sure we do not use the parent ret_stack */
571         t->ret_stack = NULL;
572         t->curr_ret_stack = -1;
573         t->curr_ret_depth = -1;
574
575         if (ftrace_graph_active) {
576                 struct ftrace_ret_stack *ret_stack;
577
578                 ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
579                                           sizeof(struct ftrace_ret_stack),
580                                           GFP_KERNEL);
581                 if (!ret_stack)
582                         return;
583                 graph_init_task(t, ret_stack);
584         }
585 }
586
587 void ftrace_graph_exit_task(struct task_struct *t)
588 {
589         struct ftrace_ret_stack *ret_stack = t->ret_stack;
590
591         t->ret_stack = NULL;
592         /* NULL must become visible to IRQs before we free it: */
593         barrier();
594
595         kfree(ret_stack);
596 }
597
598 /* Allocate a return stack for each task */
599 static int start_graph_tracing(void)
600 {
601         struct ftrace_ret_stack **ret_stack_list;
602         int ret, cpu;
603
604         ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
605                                        sizeof(struct ftrace_ret_stack *),
606                                        GFP_KERNEL);
607
608         if (!ret_stack_list)
609                 return -ENOMEM;
610
611         /* The cpu_boot init_task->ret_stack will never be freed */
612         for_each_online_cpu(cpu) {
613                 if (!idle_task(cpu)->ret_stack)
614                         ftrace_graph_init_idle_task(idle_task(cpu), cpu);
615         }
616
617         do {
618                 ret = alloc_retstack_tasklist(ret_stack_list);
619         } while (ret == -EAGAIN);
620
621         if (!ret) {
622                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
623                 if (ret)
624                         pr_info("ftrace_graph: Couldn't activate tracepoint"
625                                 " probe to kernel_sched_switch\n");
626         }
627
628         kfree(ret_stack_list);
629         return ret;
630 }
631
632 int register_ftrace_graph(struct fgraph_ops *gops)
633 {
634         int ret = 0;
635
636         mutex_lock(&ftrace_lock);
637
638         /* we currently allow only one tracer registered at a time */
639         if (ftrace_graph_active) {
640                 ret = -EBUSY;
641                 goto out;
642         }
643
644         register_pm_notifier(&ftrace_suspend_notifier);
645
646         ftrace_graph_active++;
647         ret = start_graph_tracing();
648         if (ret) {
649                 ftrace_graph_active--;
650                 goto out;
651         }
652
653         ftrace_graph_return = gops->retfunc;
654
655         /*
656          * Update the indirect function to the entryfunc, and the
657          * function that gets called to the entry_test first. Then
658          * call the update fgraph entry function to determine if
659          * the entryfunc should be called directly or not.
660          */
661         __ftrace_graph_entry = gops->entryfunc;
662         ftrace_graph_entry = ftrace_graph_entry_test;
663         update_function_graph_func();
664
665         ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
666 out:
667         mutex_unlock(&ftrace_lock);
668         return ret;
669 }
670
671 void unregister_ftrace_graph(struct fgraph_ops *gops)
672 {
673         mutex_lock(&ftrace_lock);
674
675         if (unlikely(!ftrace_graph_active))
676                 goto out;
677
678         ftrace_graph_active--;
679         ftrace_graph_return = ftrace_stub_graph;
680         ftrace_graph_entry = ftrace_graph_entry_stub;
681         __ftrace_graph_entry = ftrace_graph_entry_stub;
682         ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
683         unregister_pm_notifier(&ftrace_suspend_notifier);
684         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
685
686  out:
687         mutex_unlock(&ftrace_lock);
688 }