extern void __vtime_account_system(struct task_struct *tsk);
extern void vtime_account_system(struct task_struct *tsk);
extern void __vtime_account_idle(struct task_struct *tsk);
+extern void vtime_account(struct task_struct *tsk);
#else
static inline void vtime_task_switch(struct task_struct *prev) { }
+static inline void __vtime_account_system(struct task_struct *tsk) { }
static inline void vtime_account_system(struct task_struct *tsk) { }
+static inline void vtime_account(struct task_struct *tsk) { }
#endif
-#if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING)
-static inline void vtime_account(struct task_struct *tsk)
-{
-}
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+extern void irqtime_account_irq(struct task_struct *tsk);
#else
-extern void vtime_account(struct task_struct *tsk);
+static inline void irqtime_account_irq(struct task_struct *tsk) { }
#endif
static inline void vtime_account_irq_enter(struct task_struct *tsk)
* the idle time is flushed on hardirq time already.
*/
vtime_account(tsk);
+ irqtime_account_irq(tsk);
}
static inline void vtime_account_irq_exit(struct task_struct *tsk)
{
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
/* On hard|softirq exit we always account to hard|softirq cputime */
__vtime_account_system(tsk);
-#endif
-#ifdef CONFIG_IRQ_TIME_ACCOUNTING
- vtime_account(tsk);
-#endif
+ irqtime_account_irq(tsk);
}
#endif /* _LINUX_KERNEL_VTIME_H */
* Called before incrementing preempt_count on {soft,}irq_enter
* and before decrementing preempt_count on {soft,}irq_exit.
*/
-void vtime_account(struct task_struct *curr)
+void irqtime_account_irq(struct task_struct *curr)
{
unsigned long flags;
s64 delta;
irq_time_write_end();
local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(vtime_account);
+EXPORT_SYMBOL_GPL(irqtime_account_irq);
static int irqtime_account_hi_update(void)
{