Merge tag 'v3.14.25' into backport/v3.14.24-ltsi-rc1+v3.14.25/snapshot-merge.wip
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / staging / lttng / instrumentation / events / lttng-module / sched.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM sched
3
4 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_SCHED_H
6
7 #include <linux/sched.h>
8 #include <linux/tracepoint.h>
9 #include <linux/binfmts.h>
10 #include <linux/version.h>
11 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
12 #include <linux/sched/rt.h>
13 #endif
14
15 #ifndef _TRACE_SCHED_DEF_
16 #define _TRACE_SCHED_DEF_
17
18 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
19
20 static inline long __trace_sched_switch_state(struct task_struct *p)
21 {
22         long state = p->state;
23
24 #ifdef CONFIG_PREEMPT
25         /*
26          * For all intents and purposes a preempted task is a running task.
27          */
28         if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
29 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
30                 state = TASK_RUNNING | TASK_STATE_MAX;
31 #else
32                 state = TASK_RUNNING;
33 #endif
34 #endif
35
36         return state;
37 }
38
39 #endif
40
41 #endif /* _TRACE_SCHED_DEF_ */
42
43 /*
44  * Tracepoint for calling kthread_stop, performed to end a kthread:
45  */
46 TRACE_EVENT(sched_kthread_stop,
47
48         TP_PROTO(struct task_struct *t),
49
50         TP_ARGS(t),
51
52         TP_STRUCT__entry(
53                 __array_text(   char,   comm,   TASK_COMM_LEN   )
54                 __field(        pid_t,  tid                     )
55         ),
56
57         TP_fast_assign(
58                 tp_memcpy(comm, t->comm, TASK_COMM_LEN)
59                 tp_assign(tid, t->pid)
60         ),
61
62         TP_printk("comm=%s tid=%d", __entry->comm, __entry->tid)
63 )
64
65 /*
66  * Tracepoint for the return value of the kthread stopping:
67  */
68 TRACE_EVENT(sched_kthread_stop_ret,
69
70         TP_PROTO(int ret),
71
72         TP_ARGS(ret),
73
74         TP_STRUCT__entry(
75                 __field(        int,    ret     )
76         ),
77
78         TP_fast_assign(
79                 tp_assign(ret, ret)
80         ),
81
82         TP_printk("ret=%d", __entry->ret)
83 )
84
85 /*
86  * Tracepoint for waking up a task:
87  */
88 DECLARE_EVENT_CLASS(sched_wakeup_template,
89
90 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
91         TP_PROTO(struct task_struct *p, int success),
92
93         TP_ARGS(p, success),
94 #else
95         TP_PROTO(struct rq *rq, struct task_struct *p, int success),
96
97         TP_ARGS(rq, p, success),
98 #endif
99
100         TP_STRUCT__entry(
101                 __array_text(   char,   comm,   TASK_COMM_LEN   )
102                 __field(        pid_t,  tid                     )
103                 __field(        int,    prio                    )
104                 __field(        int,    success                 )
105 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
106                 __field(        int,    target_cpu              )
107 #endif
108         ),
109
110         TP_fast_assign(
111                 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
112                 tp_assign(tid, p->pid)
113                 tp_assign(prio, p->prio)
114                 tp_assign(success, success)
115 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
116                 tp_assign(target_cpu, task_cpu(p))
117 #endif
118 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
119         )
120         TP_perf_assign(
121                 __perf_task(p)
122 #endif
123         ),
124
125 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
126         TP_printk("comm=%s tid=%d prio=%d success=%d target_cpu=%03d",
127                   __entry->comm, __entry->tid, __entry->prio,
128                   __entry->success, __entry->target_cpu)
129 #else
130         TP_printk("comm=%s tid=%d prio=%d success=%d",
131                   __entry->comm, __entry->tid, __entry->prio,
132                   __entry->success)
133 #endif
134 )
135
136 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
137
138 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
139              TP_PROTO(struct task_struct *p, int success),
140              TP_ARGS(p, success))
141
142 /*
143  * Tracepoint for waking up a new task:
144  */
145 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
146              TP_PROTO(struct task_struct *p, int success),
147              TP_ARGS(p, success))
148
149 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
150
151 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
152              TP_PROTO(struct rq *rq, struct task_struct *p, int success),
153              TP_ARGS(rq, p, success))
154
155 /*
156  * Tracepoint for waking up a new task:
157  */
158 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
159              TP_PROTO(struct rq *rq, struct task_struct *p, int success),
160              TP_ARGS(rq, p, success))
161
162 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
163
164 /*
165  * Tracepoint for task switches, performed by the scheduler:
166  */
167 TRACE_EVENT(sched_switch,
168
169 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
170         TP_PROTO(struct task_struct *prev,
171                  struct task_struct *next),
172
173         TP_ARGS(prev, next),
174 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
175         TP_PROTO(struct rq *rq, struct task_struct *prev,
176                  struct task_struct *next),
177
178         TP_ARGS(rq, prev, next),
179 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
180
181         TP_STRUCT__entry(
182                 __array_text(   char,   prev_comm,      TASK_COMM_LEN   )
183                 __field(        pid_t,  prev_tid                        )
184                 __field(        int,    prev_prio                       )
185                 __field(        long,   prev_state                      )
186                 __array_text(   char,   next_comm,      TASK_COMM_LEN   )
187                 __field(        pid_t,  next_tid                        )
188                 __field(        int,    next_prio                       )
189         ),
190
191         TP_fast_assign(
192                 tp_memcpy(next_comm, next->comm, TASK_COMM_LEN)
193                 tp_assign(prev_tid, prev->pid)
194                 tp_assign(prev_prio, prev->prio - MAX_RT_PRIO)
195 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
196                 tp_assign(prev_state, __trace_sched_switch_state(prev))
197 #else
198                 tp_assign(prev_state, prev->state)
199 #endif
200                 tp_memcpy(prev_comm, prev->comm, TASK_COMM_LEN)
201                 tp_assign(next_tid, next->pid)
202                 tp_assign(next_prio, next->prio - MAX_RT_PRIO)
203         ),
204
205 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
206         TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_tid=%d next_prio=%d",
207                 __entry->prev_comm, __entry->prev_tid, __entry->prev_prio,
208                 __entry->prev_state & (TASK_STATE_MAX-1) ?
209                   __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
210                                 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
211                                 { 16, "Z" }, { 32, "X" }, { 64, "x" },
212                                 { 128, "W" }) : "R",
213                 __entry->prev_state & TASK_STATE_MAX ? "+" : "",
214                 __entry->next_comm, __entry->next_tid, __entry->next_prio)
215 #else
216         TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_tid=%d next_prio=%d",
217                 __entry->prev_comm, __entry->prev_tid, __entry->prev_prio,
218                 __entry->prev_state ?
219                   __print_flags(__entry->prev_state, "|",
220                                 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
221                                 { 16, "Z" }, { 32, "X" }, { 64, "x" },
222                                 { 128, "W" }) : "R",
223                 __entry->next_comm, __entry->next_tid, __entry->next_prio)
224 #endif
225 )
226
227 /*
228  * Tracepoint for a task being migrated:
229  */
230 TRACE_EVENT(sched_migrate_task,
231
232         TP_PROTO(struct task_struct *p, int dest_cpu),
233
234         TP_ARGS(p, dest_cpu),
235
236         TP_STRUCT__entry(
237                 __array_text(   char,   comm,   TASK_COMM_LEN   )
238                 __field(        pid_t,  tid                     )
239                 __field(        int,    prio                    )
240                 __field(        int,    orig_cpu                )
241                 __field(        int,    dest_cpu                )
242         ),
243
244         TP_fast_assign(
245                 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
246                 tp_assign(tid, p->pid)
247                 tp_assign(prio, p->prio - MAX_RT_PRIO)
248                 tp_assign(orig_cpu, task_cpu(p))
249                 tp_assign(dest_cpu, dest_cpu)
250         ),
251
252         TP_printk("comm=%s tid=%d prio=%d orig_cpu=%d dest_cpu=%d",
253                   __entry->comm, __entry->tid, __entry->prio,
254                   __entry->orig_cpu, __entry->dest_cpu)
255 )
256
257 DECLARE_EVENT_CLASS(sched_process_template,
258
259         TP_PROTO(struct task_struct *p),
260
261         TP_ARGS(p),
262
263         TP_STRUCT__entry(
264                 __array_text(   char,   comm,   TASK_COMM_LEN   )
265                 __field(        pid_t,  tid                     )
266                 __field(        int,    prio                    )
267         ),
268
269         TP_fast_assign(
270                 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
271                 tp_assign(tid, p->pid)
272                 tp_assign(prio, p->prio - MAX_RT_PRIO)
273         ),
274
275         TP_printk("comm=%s tid=%d prio=%d",
276                   __entry->comm, __entry->tid, __entry->prio)
277 )
278
279 /*
280  * Tracepoint for freeing a task:
281  */
282 DEFINE_EVENT(sched_process_template, sched_process_free,
283              TP_PROTO(struct task_struct *p),
284              TP_ARGS(p))
285
286
287 /*
288  * Tracepoint for a task exiting:
289  */
290 DEFINE_EVENT(sched_process_template, sched_process_exit,
291              TP_PROTO(struct task_struct *p),
292              TP_ARGS(p))
293
294 /*
295  * Tracepoint for waiting on task to unschedule:
296  */
297 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
298 DEFINE_EVENT(sched_process_template, sched_wait_task,
299         TP_PROTO(struct task_struct *p),
300         TP_ARGS(p))
301 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
302 DEFINE_EVENT(sched_process_template, sched_wait_task,
303         TP_PROTO(struct rq *rq, struct task_struct *p),
304         TP_ARGS(rq, p))
305 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
306
307 /*
308  * Tracepoint for a waiting task:
309  */
310 TRACE_EVENT(sched_process_wait,
311
312         TP_PROTO(struct pid *pid),
313
314         TP_ARGS(pid),
315
316         TP_STRUCT__entry(
317                 __array_text(   char,   comm,   TASK_COMM_LEN   )
318                 __field(        pid_t,  tid                     )
319                 __field(        int,    prio                    )
320         ),
321
322         TP_fast_assign(
323                 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
324                 tp_assign(tid, pid_nr(pid))
325                 tp_assign(prio, current->prio - MAX_RT_PRIO)
326         ),
327
328         TP_printk("comm=%s tid=%d prio=%d",
329                   __entry->comm, __entry->tid, __entry->prio)
330 )
331
332 /*
333  * Tracepoint for do_fork.
334  * Saving both TID and PID information, especially for the child, allows
335  * trace analyzers to distinguish between creation of a new process and
336  * creation of a new thread. Newly created processes will have child_tid
337  * == child_pid, while creation of a thread yields to child_tid !=
338  * child_pid.
339  */
340 TRACE_EVENT(sched_process_fork,
341
342         TP_PROTO(struct task_struct *parent, struct task_struct *child),
343
344         TP_ARGS(parent, child),
345
346         TP_STRUCT__entry(
347                 __array_text(   char,   parent_comm,    TASK_COMM_LEN   )
348                 __field(        pid_t,  parent_tid                      )
349                 __field(        pid_t,  parent_pid                      )
350                 __array_text(   char,   child_comm,     TASK_COMM_LEN   )
351                 __field(        pid_t,  child_tid                       )
352                 __field(        pid_t,  child_pid                       )
353         ),
354
355         TP_fast_assign(
356                 tp_memcpy(parent_comm, parent->comm, TASK_COMM_LEN)
357                 tp_assign(parent_tid, parent->pid)
358                 tp_assign(parent_pid, parent->tgid)
359                 tp_memcpy(child_comm, child->comm, TASK_COMM_LEN)
360                 tp_assign(child_tid, child->pid)
361                 tp_assign(child_pid, child->tgid)
362         ),
363
364         TP_printk("comm=%s tid=%d child_comm=%s child_tid=%d",
365                 __entry->parent_comm, __entry->parent_tid,
366                 __entry->child_comm, __entry->child_tid)
367 )
368
369 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
370 /*
371  * Tracepoint for sending a signal:
372  */
373 TRACE_EVENT(sched_signal_send,
374
375         TP_PROTO(int sig, struct task_struct *p),
376
377         TP_ARGS(sig, p),
378
379         TP_STRUCT__entry(
380                 __field(        int,    sig                     )
381                 __array(        char,   comm,   TASK_COMM_LEN   )
382                 __field(        pid_t,  pid                     )
383         ),
384
385         TP_fast_assign(
386                 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
387                 tp_assign(pid, p->pid)
388                 tp_assign(sig, sig)
389         ),
390
391         TP_printk("sig=%d comm=%s pid=%d",
392                 __entry->sig, __entry->comm, __entry->pid)
393 )
394 #endif
395
396 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
397 /*
398  * Tracepoint for exec:
399  */
400 TRACE_EVENT(sched_process_exec,
401
402         TP_PROTO(struct task_struct *p, pid_t old_pid,
403                  struct linux_binprm *bprm),
404
405         TP_ARGS(p, old_pid, bprm),
406
407         TP_STRUCT__entry(
408                 __string(       filename,       bprm->filename  )
409                 __field(        pid_t,          tid             )
410                 __field(        pid_t,          old_tid         )
411         ),
412
413         TP_fast_assign(
414                 tp_strcpy(filename, bprm->filename)
415                 tp_assign(tid, p->pid)
416                 tp_assign(old_tid, old_pid)
417         ),
418
419         TP_printk("filename=%s tid=%d old_tid=%d", __get_str(filename),
420                   __entry->tid, __entry->old_tid)
421 )
422 #endif
423
424 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
425 /*
426  * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
427  *     adding sched_stat support to SCHED_FIFO/RR would be welcome.
428  */
429 DECLARE_EVENT_CLASS(sched_stat_template,
430
431         TP_PROTO(struct task_struct *tsk, u64 delay),
432
433         TP_ARGS(tsk, delay),
434
435         TP_STRUCT__entry(
436                 __array_text( char,     comm,   TASK_COMM_LEN   )
437                 __field( pid_t, tid                     )
438                 __field( u64,   delay                   )
439         ),
440
441         TP_fast_assign(
442                 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
443                 tp_assign(tid,  tsk->pid)
444                 tp_assign(delay, delay)
445         )
446         TP_perf_assign(
447                 __perf_count(delay)
448         ),
449
450         TP_printk("comm=%s tid=%d delay=%Lu [ns]",
451                         __entry->comm, __entry->tid,
452                         (unsigned long long)__entry->delay)
453 )
454
455
456 /*
457  * Tracepoint for accounting wait time (time the task is runnable
458  * but not actually running due to scheduler contention).
459  */
460 DEFINE_EVENT(sched_stat_template, sched_stat_wait,
461              TP_PROTO(struct task_struct *tsk, u64 delay),
462              TP_ARGS(tsk, delay))
463
464 /*
465  * Tracepoint for accounting sleep time (time the task is not runnable,
466  * including iowait, see below).
467  */
468 DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
469              TP_PROTO(struct task_struct *tsk, u64 delay),
470              TP_ARGS(tsk, delay))
471
472 /*
473  * Tracepoint for accounting iowait time (time the task is not runnable
474  * due to waiting on IO to complete).
475  */
476 DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
477              TP_PROTO(struct task_struct *tsk, u64 delay),
478              TP_ARGS(tsk, delay))
479
480 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
481 /*
482  * Tracepoint for accounting blocked time (time the task is in uninterruptible).
483  */
484 DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
485              TP_PROTO(struct task_struct *tsk, u64 delay),
486              TP_ARGS(tsk, delay))
487 #endif
488
489 /*
490  * Tracepoint for accounting runtime (time the task is executing
491  * on a CPU).
492  */
493 TRACE_EVENT(sched_stat_runtime,
494
495         TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
496
497         TP_ARGS(tsk, runtime, vruntime),
498
499         TP_STRUCT__entry(
500                 __array_text( char,     comm,   TASK_COMM_LEN   )
501                 __field( pid_t, tid                     )
502                 __field( u64,   runtime                 )
503                 __field( u64,   vruntime                        )
504         ),
505
506         TP_fast_assign(
507                 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
508                 tp_assign(tid, tsk->pid)
509                 tp_assign(runtime, runtime)
510                 tp_assign(vruntime, vruntime)
511         )
512         TP_perf_assign(
513                 __perf_count(runtime)
514 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
515                 __perf_task(tsk)
516 #endif
517         ),
518
519         TP_printk("comm=%s tid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
520                         __entry->comm, __entry->tid,
521                         (unsigned long long)__entry->runtime,
522                         (unsigned long long)__entry->vruntime)
523 )
524 #endif
525
526 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
527 /*
528  * Tracepoint for showing priority inheritance modifying a tasks
529  * priority.
530  */
531 TRACE_EVENT(sched_pi_setprio,
532
533         TP_PROTO(struct task_struct *tsk, int newprio),
534
535         TP_ARGS(tsk, newprio),
536
537         TP_STRUCT__entry(
538                 __array_text( char,     comm,   TASK_COMM_LEN   )
539                 __field( pid_t, tid                     )
540                 __field( int,   oldprio                 )
541                 __field( int,   newprio                 )
542         ),
543
544         TP_fast_assign(
545                 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
546                 tp_assign(tid, tsk->pid)
547                 tp_assign(oldprio, tsk->prio - MAX_RT_PRIO)
548                 tp_assign(newprio, newprio - MAX_RT_PRIO)
549         ),
550
551         TP_printk("comm=%s tid=%d oldprio=%d newprio=%d",
552                         __entry->comm, __entry->tid,
553                         __entry->oldprio, __entry->newprio)
554 )
555 #endif
556
557 #endif /* _TRACE_SCHED_H */
558
559 /* This part must be outside protection */
560 #include "../../../probes/define_trace.h"