* accumulated times to the current process, and to prepare accounting on
* the next process.
*/
-void account_switch_vtime(struct task_struct *prev)
+void vtime_task_switch(struct task_struct *prev)
{
struct thread_info *pi = task_thread_info(prev);
struct thread_info *ni = task_thread_info(current);
* Account time for a transition between system, hard irq or soft irq state.
* Note that this function is called with interrupts enabled.
*/
-void account_system_vtime(struct task_struct *tsk)
+void vtime_account(struct task_struct *tsk)
{
struct thread_info *ti = task_thread_info(tsk);
unsigned long flags;
local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(account_system_vtime);
+EXPORT_SYMBOL_GPL(vtime_account);
/*
* Called from the timer interrupt handler to charge accumulated user time
* Account time for a transition between system, hard irq
* or soft irq state.
*/
-void account_system_vtime(struct task_struct *tsk)
+void vtime_account(struct task_struct *tsk)
{
u64 now, nowscaled, delta, deltascaled;
unsigned long flags;
}
local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(account_system_vtime);
+EXPORT_SYMBOL_GPL(vtime_account);
/*
* Transfer the user and system times accumulated in the paca
* by the exception entry and exit code to the generic process
* user and system time records.
* Must be called with interrupts disabled.
- * Assumes that account_system_vtime() has been called recently
+ * Assumes that vtime_account() has been called recently
* (i.e. since the last entry from usermode) so that
* get_paca()->user_time_scaled is up to date.
*/
account_user_time(tsk, utime, utimescaled);
}
-void account_switch_vtime(struct task_struct *prev)
+void vtime_task_switch(struct task_struct *prev)
{
- account_system_vtime(prev);
+ vtime_account(prev);
account_process_tick(prev, 0);
}
return virt_timer_forward(user + system);
}
-void account_switch_vtime(struct task_struct *prev)
+void vtime_task_switch(struct task_struct *prev)
{
struct thread_info *ti;
* Update process times based on virtual cpu times stored by entry.S
* to the lowcore fields user_timer, system_timer & steal_clock.
*/
-void account_system_vtime(struct task_struct *tsk)
+void vtime_account(struct task_struct *tsk)
{
struct thread_info *ti = task_thread_info(tsk);
u64 timer, system;
virt_timer_forward(system);
}
-EXPORT_SYMBOL_GPL(account_system_vtime);
+EXPORT_SYMBOL_GPL(vtime_account);
void __kprobes vtime_stop_cpu(void)
{
struct task_struct;
#if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING)
-static inline void account_system_vtime(struct task_struct *tsk)
+static inline void vtime_account(struct task_struct *tsk)
{
}
#else
-extern void account_system_vtime(struct task_struct *tsk);
+extern void vtime_account(struct task_struct *tsk);
#endif
#if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
*/
#define __irq_enter() \
do { \
- account_system_vtime(current); \
+ vtime_account(current); \
add_preempt_count(HARDIRQ_OFFSET); \
trace_hardirq_enter(); \
} while (0)
#define __irq_exit() \
do { \
trace_hardirq_exit(); \
- account_system_vtime(current); \
+ vtime_account(current); \
sub_preempt_count(HARDIRQ_OFFSET); \
} while (0)
extern void account_idle_ticks(unsigned long ticks);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
-extern void account_switch_vtime(struct task_struct *prev);
+extern void vtime_task_switch(struct task_struct *prev);
#else
-static inline void account_switch_vtime(struct task_struct *prev) { }
+static inline void vtime_task_switch(struct task_struct *prev) { }
#endif
#endif /* _LINUX_KERNEL_STAT_H */
static inline void kvm_guest_enter(void)
{
BUG_ON(preemptible());
- account_system_vtime(current);
+ vtime_account(current);
current->flags |= PF_VCPU;
/* KVM does not hold any references to rcu protected data when it
* switches CPU into a guest mode. In fact switching to a guest mode
static inline void kvm_guest_exit(void)
{
- account_system_vtime(current);
+ vtime_account(current);
current->flags &= ~PF_VCPU;
}
* Manfred Spraul <manfred@colorfullife.com>
*/
prev_state = prev->state;
- account_switch_vtime(prev);
+ vtime_task_switch(prev);
finish_arch_switch(prev);
perf_event_task_sched_in(prev, current);
finish_lock_switch(rq, prev);
/*
* There are no locks covering percpu hardirq/softirq time.
- * They are only modified in account_system_vtime, on corresponding CPU
+ * They are only modified in vtime_account, on corresponding CPU
* with interrupts disabled. So, writes are safe.
* They are read and saved off onto struct rq in update_rq_clock().
* This may result in other CPU reading this CPU's irq time and can
- * race with irq/account_system_vtime on this CPU. We would either get old
+ * race with irq/vtime_account on this CPU. We would either get old
* or new value with a side effect of accounting a slice of irq time to wrong
* task when irq is in progress while we read rq->clock. That is a worthy
* compromise in place of having locks on each irq in account_system_time.
* Called before incrementing preempt_count on {soft,}irq_enter
* and before decrementing preempt_count on {soft,}irq_exit.
*/
-void account_system_vtime(struct task_struct *curr)
+void vtime_account(struct task_struct *curr)
{
unsigned long flags;
s64 delta;
irq_time_write_end();
local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(account_system_vtime);
+EXPORT_SYMBOL_GPL(vtime_account);
static int irqtime_account_hi_update(void)
{
current->flags &= ~PF_MEMALLOC;
pending = local_softirq_pending();
- account_system_vtime(current);
+ vtime_account(current);
__local_bh_disable((unsigned long)__builtin_return_address(0),
SOFTIRQ_OFFSET);
lockdep_softirq_exit();
- account_system_vtime(current);
+ vtime_account(current);
__local_bh_enable(SOFTIRQ_OFFSET);
tsk_restore_flags(current, old_flags, PF_MEMALLOC);
}
*/
void irq_exit(void)
{
- account_system_vtime(current);
+ vtime_account(current);
trace_hardirq_exit();
sub_preempt_count(IRQ_EXIT_OFFSET);
if (!in_interrupt() && local_softirq_pending())