}
EXPORT_SYMBOL_GPL(irqtime_account_irq);
-static cputime_t irqtime_tick_accounted(cputime_t maxtime)
+static u64 irqtime_tick_accounted(u64 maxtime)
{
struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
- cputime_t delta;
+ u64 delta;
- delta = nsecs_to_cputime(irqtime->tick_delta);
- delta = min(delta, maxtime);
- irqtime->tick_delta -= cputime_to_nsecs(delta);
+ delta = min(irqtime->tick_delta, maxtime);
+ irqtime->tick_delta -= delta;
return delta;
}
#define sched_clock_irqtime (0)
-static cputime_t irqtime_tick_accounted(cputime_t dummy)
+static u64 irqtime_tick_accounted(u64 dummy)
{
return 0;
}
* ticks are not redelivered later. Due to that, this function may on
* occasion account more time than the calling functions think elapsed.
*/
-static __always_inline cputime_t steal_account_process_time(cputime_t maxtime)
+static __always_inline u64 steal_account_process_time(u64 maxtime)
{
#ifdef CONFIG_PARAVIRT
if (static_key_false(¶virt_steal_enabled)) {
- cputime_t steal_cputime;
- u64 steal, rounded;
+ u64 steal;
steal = paravirt_steal_clock(smp_processor_id());
steal -= this_rq()->prev_steal_time;
+ steal = min(steal, maxtime);
+ account_steal_time(steal);
+ this_rq()->prev_steal_time += steal;
- steal_cputime = min(nsecs_to_cputime(steal), maxtime);
- rounded = cputime_to_nsecs(steal_cputime);
- account_steal_time(rounded);
- this_rq()->prev_steal_time += rounded;
-
- return steal_cputime;
+ return steal;
}
#endif
return 0;
/*
* Account how much elapsed time was spent in steal, irq, or softirq time.
*/
-static inline cputime_t account_other_time(cputime_t max)
+static inline u64 account_other_time(u64 max)
{
- cputime_t accounted;
+ u64 accounted;
/* Shall be converted to a lockdep-enabled lightweight check */
WARN_ON_ONCE(!irqs_disabled());
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
struct rq *rq, int ticks)
{
- u64 old_cputime = (__force u64) cputime_one_jiffy * ticks;
- cputime_t other;
- u64 cputime;
+ u64 other, cputime = TICK_NSEC * ticks;
/*
* When returning from idle, many ticks can get accounted at
* other time can exceed ticks occasionally.
*/
other = account_other_time(ULONG_MAX);
- if (other >= old_cputime)
+ if (other >= cputime)
return;
- old_cputime -= other;
- cputime = cputime_to_nsecs(old_cputime);
+ cputime -= other;
if (this_cpu_ksoftirqd() == p) {
/*
*/
void account_process_tick(struct task_struct *p, int user_tick)
{
- cputime_t old_cputime, steal;
- u64 cputime;
+ u64 cputime, steal;
struct rq *rq = this_rq();
if (vtime_accounting_cpu_enabled())
return;
}
- old_cputime = cputime_one_jiffy;
+ cputime = TICK_NSEC;
steal = steal_account_process_time(ULONG_MAX);
- if (steal >= old_cputime)
+ if (steal >= cputime)
return;
- old_cputime -= steal;
- cputime = cputime_to_nsecs(old_cputime);
+ cputime -= steal;
if (user_tick)
account_user_time(p, cputime);
}
cputime = ticks * TICK_NSEC;
- steal = cputime_to_nsecs(steal_account_process_time(ULONG_MAX));
+ steal = steal_account_process_time(ULONG_MAX);
if (steal >= cputime)
return;
write_seqcount_begin(&tsk->vtime_seqcount);
tsk->vtime_snap_whence = VTIME_SYS;
if (vtime_delta(tsk)) {
+ u64 nsecs;
delta_cpu = get_vtime_delta(tsk);
account_user_time(tsk, cputime_to_nsecs(delta_cpu));
}