1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KERNEL_VTIME_H
3 #define _LINUX_KERNEL_VTIME_H
5 #include <linux/context_tracking_state.h>
6 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
14 * vtime_accounting_enabled_this_cpu() definitions/declarations
16 #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
18 static inline bool vtime_accounting_enabled_this_cpu(void) { return true; }
19 extern void vtime_task_switch(struct task_struct *prev);
21 #elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
24 * Checks if vtime is enabled on some CPU. Cputime readers want to be careful
25 * in that case and compute the tickless cputime.
26 * For now vtime state is tied to context tracking. We might want to decouple
27 * those later if necessary.
29 static inline bool vtime_accounting_enabled(void)
31 return context_tracking_enabled();
34 static inline bool vtime_accounting_enabled_cpu(int cpu)
36 return context_tracking_enabled_cpu(cpu);
39 static inline bool vtime_accounting_enabled_this_cpu(void)
41 return context_tracking_enabled_this_cpu();
44 extern void vtime_task_switch_generic(struct task_struct *prev);
46 static inline void vtime_task_switch(struct task_struct *prev)
48 if (vtime_accounting_enabled_this_cpu())
49 vtime_task_switch_generic(prev);
52 #else /* !CONFIG_VIRT_CPU_ACCOUNTING */
54 static inline bool vtime_accounting_enabled_cpu(int cpu) {return false; }
55 static inline bool vtime_accounting_enabled_this_cpu(void) { return false; }
56 static inline void vtime_task_switch(struct task_struct *prev) { }
63 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
64 extern void vtime_account_kernel(struct task_struct *tsk);
65 extern void vtime_account_idle(struct task_struct *tsk);
66 #else /* !CONFIG_VIRT_CPU_ACCOUNTING */
67 static inline void vtime_account_kernel(struct task_struct *tsk) { }
68 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
70 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
71 extern void arch_vtime_task_switch(struct task_struct *tsk);
72 extern void vtime_user_enter(struct task_struct *tsk);
73 extern void vtime_user_exit(struct task_struct *tsk);
74 extern void vtime_guest_enter(struct task_struct *tsk);
75 extern void vtime_guest_exit(struct task_struct *tsk);
76 extern void vtime_init_idle(struct task_struct *tsk, int cpu);
77 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
78 static inline void vtime_user_enter(struct task_struct *tsk) { }
79 static inline void vtime_user_exit(struct task_struct *tsk) { }
80 static inline void vtime_guest_enter(struct task_struct *tsk) { }
81 static inline void vtime_guest_exit(struct task_struct *tsk) { }
82 static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
85 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
86 extern void vtime_account_irq_enter(struct task_struct *tsk);
87 static inline void vtime_account_irq_exit(struct task_struct *tsk)
89 /* On hard|softirq exit we always account to hard|softirq cputime */
90 vtime_account_kernel(tsk);
92 extern void vtime_flush(struct task_struct *tsk);
93 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
94 static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
95 static inline void vtime_account_irq_exit(struct task_struct *tsk) { }
96 static inline void vtime_flush(struct task_struct *tsk) { }
100 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
101 extern void irqtime_account_irq(struct task_struct *tsk);
103 static inline void irqtime_account_irq(struct task_struct *tsk) { }
106 static inline void account_irq_enter_time(struct task_struct *tsk)
108 vtime_account_irq_enter(tsk);
109 irqtime_account_irq(tsk);
112 static inline void account_irq_exit_time(struct task_struct *tsk)
114 vtime_account_irq_exit(tsk);
115 irqtime_account_irq(tsk);
118 #endif /* _LINUX_KERNEL_VTIME_H */