1 #include <linux/export.h>
2 #include <linux/sched.h>
3 #include <linux/tsacct_kern.h>
4 #include <linux/kernel_stat.h>
5 #include <linux/static_key.h>
6 #include <linux/context_tracking.h>
10 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
13 * There are no locks covering percpu hardirq/softirq time.
14 * They are only modified in vtime_account, on corresponding CPU
15 * with interrupts disabled. So, writes are safe.
16 * They are read and saved off onto struct rq in update_rq_clock().
17 * This may result in other CPU reading this CPU's irq time and can
18 * race with irq/vtime_account on this CPU. We would either get old
19 * or new value with a side effect of accounting a slice of irq time to wrong
20 * task when irq is in progress while we read rq->clock. That is a worthy
21 * compromise in place of having locks on each irq in account_system_time.
23 DEFINE_PER_CPU(u64, cpu_hardirq_time);
24 DEFINE_PER_CPU(u64, cpu_softirq_time);
26 static DEFINE_PER_CPU(u64, irq_start_time);
27 static int sched_clock_irqtime;
29 void enable_sched_clock_irqtime(void)
31 sched_clock_irqtime = 1;
34 void disable_sched_clock_irqtime(void)
36 sched_clock_irqtime = 0;
40 DEFINE_PER_CPU(seqcount_t, irq_time_seq);
41 #endif /* CONFIG_64BIT */
44 * Called before incrementing preempt_count on {soft,}irq_enter
45 * and before decrementing preempt_count on {soft,}irq_exit.
47 void irqtime_account_irq(struct task_struct *curr)
53 if (!sched_clock_irqtime)
56 local_irq_save(flags);
58 cpu = smp_processor_id();
59 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
60 __this_cpu_add(irq_start_time, delta);
62 irq_time_write_begin();
64 * We do not account for softirq time from ksoftirqd here.
65 * We want to continue accounting softirq time to ksoftirqd thread
66 * in that case, so as not to confuse scheduler with a special task
67 * that do not consume any time, but still wants to run.
70 __this_cpu_add(cpu_hardirq_time, delta);
71 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
72 __this_cpu_add(cpu_softirq_time, delta);
75 local_irq_restore(flags);
77 EXPORT_SYMBOL_GPL(irqtime_account_irq);
79 static int irqtime_account_hi_update(void)
81 u64 *cpustat = kcpustat_this_cpu->cpustat;
86 local_irq_save(flags);
87 latest_ns = this_cpu_read(cpu_hardirq_time);
88 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ])
90 local_irq_restore(flags);
94 static int irqtime_account_si_update(void)
96 u64 *cpustat = kcpustat_this_cpu->cpustat;
101 local_irq_save(flags);
102 latest_ns = this_cpu_read(cpu_softirq_time);
103 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ])
105 local_irq_restore(flags);
109 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
111 #define sched_clock_irqtime (0)
113 #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
115 static inline void task_group_account_field(struct task_struct *p, int index,
118 #ifdef CONFIG_CGROUP_CPUACCT
119 struct kernel_cpustat *kcpustat;
123 * Since all updates are sure to touch the root cgroup, we
124 * get ourselves ahead and touch it first. If the root cgroup
125 * is the only cgroup, then nothing else should be necessary.
128 __get_cpu_var(kernel_cpustat).cpustat[index] += tmp;
130 #ifdef CONFIG_CGROUP_CPUACCT
131 if (unlikely(!cpuacct_subsys.active))
136 while (ca && (ca != &root_cpuacct)) {
137 kcpustat = this_cpu_ptr(ca->cpustat);
138 kcpustat->cpustat[index] += tmp;
146 * Account user cpu time to a process.
147 * @p: the process that the cpu time gets accounted to
148 * @cputime: the cpu time spent in user space since the last update
149 * @cputime_scaled: cputime scaled by cpu frequency
151 void account_user_time(struct task_struct *p, cputime_t cputime,
152 cputime_t cputime_scaled)
156 /* Add user time to process. */
158 p->utimescaled += cputime_scaled;
159 account_group_user_time(p, cputime);
161 index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
163 /* Add user time to cpustat. */
164 task_group_account_field(p, index, (__force u64) cputime);
166 /* Account for user time used */
167 acct_account_cputime(p);
171 * Account guest cpu time to a process.
172 * @p: the process that the cpu time gets accounted to
173 * @cputime: the cpu time spent in virtual machine since the last update
174 * @cputime_scaled: cputime scaled by cpu frequency
176 static void account_guest_time(struct task_struct *p, cputime_t cputime,
177 cputime_t cputime_scaled)
179 u64 *cpustat = kcpustat_this_cpu->cpustat;
181 /* Add guest time to process. */
183 p->utimescaled += cputime_scaled;
184 account_group_user_time(p, cputime);
187 /* Add guest time to cpustat. */
188 if (TASK_NICE(p) > 0) {
189 cpustat[CPUTIME_NICE] += (__force u64) cputime;
190 cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
192 cpustat[CPUTIME_USER] += (__force u64) cputime;
193 cpustat[CPUTIME_GUEST] += (__force u64) cputime;
198 * Account system cpu time to a process and desired cpustat field
199 * @p: the process that the cpu time gets accounted to
200 * @cputime: the cpu time spent in kernel space since the last update
201 * @cputime_scaled: cputime scaled by cpu frequency
202 * @target_cputime64: pointer to cpustat field that has to be updated
205 void __account_system_time(struct task_struct *p, cputime_t cputime,
206 cputime_t cputime_scaled, int index)
208 /* Add system time to process. */
210 p->stimescaled += cputime_scaled;
211 account_group_system_time(p, cputime);
213 /* Add system time to cpustat. */
214 task_group_account_field(p, index, (__force u64) cputime);
216 /* Account for system time used */
217 acct_account_cputime(p);
221 * Account system cpu time to a process.
222 * @p: the process that the cpu time gets accounted to
223 * @hardirq_offset: the offset to subtract from hardirq_count()
224 * @cputime: the cpu time spent in kernel space since the last update
225 * @cputime_scaled: cputime scaled by cpu frequency
227 void account_system_time(struct task_struct *p, int hardirq_offset,
228 cputime_t cputime, cputime_t cputime_scaled)
232 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
233 account_guest_time(p, cputime, cputime_scaled);
237 if (hardirq_count() - hardirq_offset)
239 else if (in_serving_softirq())
240 index = CPUTIME_SOFTIRQ;
242 index = CPUTIME_SYSTEM;
244 __account_system_time(p, cputime, cputime_scaled, index);
248 * Account for involuntary wait time.
249 * @cputime: the cpu time spent in involuntary wait
251 void account_steal_time(cputime_t cputime)
253 u64 *cpustat = kcpustat_this_cpu->cpustat;
255 cpustat[CPUTIME_STEAL] += (__force u64) cputime;
259 * Account for idle time.
260 * @cputime: the cpu time spent in idle wait
262 void account_idle_time(cputime_t cputime)
264 u64 *cpustat = kcpustat_this_cpu->cpustat;
265 struct rq *rq = this_rq();
267 if (atomic_read(&rq->nr_iowait) > 0)
268 cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
270 cpustat[CPUTIME_IDLE] += (__force u64) cputime;
273 static __always_inline bool steal_account_process_tick(void)
275 #ifdef CONFIG_PARAVIRT
276 if (static_key_false(¶virt_steal_enabled)) {
279 steal = paravirt_steal_clock(smp_processor_id());
280 steal -= this_rq()->prev_steal_time;
282 st = steal_ticks(steal);
283 this_rq()->prev_steal_time += st * TICK_NSEC;
285 account_steal_time(st);
293 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
294 * tasks (sum on group iteration) belonging to @tsk's group.
296 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
298 struct signal_struct *sig = tsk->signal;
299 cputime_t utime, stime;
300 struct task_struct *t;
302 times->utime = sig->utime;
303 times->stime = sig->stime;
304 times->sum_exec_runtime = sig->sum_sched_runtime;
307 /* make sure we can trust tsk->thread_group list */
308 if (!likely(pid_alive(tsk)))
313 task_cputime(tsk, &utime, &stime);
314 times->utime += utime;
315 times->stime += stime;
316 times->sum_exec_runtime += task_sched_runtime(t);
317 } while_each_thread(tsk, t);
322 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
324 * Account a tick to a process and cpustat
325 * @p: the process that the cpu time gets accounted to
326 * @user_tick: is the tick from userspace
327 * @rq: the pointer to rq
329 * Tick demultiplexing follows the order
330 * - pending hardirq update
331 * - pending softirq update
335 * - check for guest_time
336 * - else account as system_time
338 * Check for hardirq is done both for system and user time as there is
339 * no timer going off while we are on hardirq and hence we may never get an
340 * opportunity to update it solely in system time.
341 * p->stime and friends are only updated on system time and not on irq
342 * softirq as those do not count in task exec_runtime any more.
344 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
347 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
348 u64 *cpustat = kcpustat_this_cpu->cpustat;
350 if (steal_account_process_tick())
353 if (irqtime_account_hi_update()) {
354 cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy;
355 } else if (irqtime_account_si_update()) {
356 cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy;
357 } else if (this_cpu_ksoftirqd() == p) {
359 * ksoftirqd time do not get accounted in cpu_softirq_time.
360 * So, we have to handle it separately here.
361 * Also, p->stime needs to be updated for ksoftirqd.
363 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
365 } else if (user_tick) {
366 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
367 } else if (p == rq->idle) {
368 account_idle_time(cputime_one_jiffy);
369 } else if (p->flags & PF_VCPU) { /* System time or guest time */
370 account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
372 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
377 static void irqtime_account_idle_ticks(int ticks)
380 struct rq *rq = this_rq();
382 for (i = 0; i < ticks; i++)
383 irqtime_account_process_tick(current, 0, rq);
385 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
386 static inline void irqtime_account_idle_ticks(int ticks) {}
387 static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
389 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
392 * Use precise platform statistics if available:
394 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
396 #ifndef __ARCH_HAS_VTIME_TASK_SWITCH
397 void vtime_task_switch(struct task_struct *prev)
399 if (!vtime_accounting_enabled())
402 if (is_idle_task(prev))
403 vtime_account_idle(prev);
405 vtime_account_system(prev);
407 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
408 vtime_account_user(prev);
410 arch_vtime_task_switch(prev);
415 * Archs that account the whole time spent in the idle task
416 * (outside irq) as idle time can rely on this and just implement
417 * vtime_account_system() and vtime_account_idle(). Archs that
418 * have other meaning of the idle time (s390 only includes the
419 * time spent by the CPU when it's in low power mode) must override
422 #ifndef __ARCH_HAS_VTIME_ACCOUNT
423 void vtime_account_irq_enter(struct task_struct *tsk)
425 if (!vtime_accounting_enabled())
428 if (!in_interrupt()) {
430 * If we interrupted user, context_tracking_in_user()
431 * is 1 because the context tracking don't hook
432 * on irq entry/exit. This way we know if
433 * we need to flush user time on kernel entry.
435 if (context_tracking_in_user()) {
436 vtime_account_user(tsk);
440 if (is_idle_task(tsk)) {
441 vtime_account_idle(tsk);
445 vtime_account_system(tsk);
447 EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
448 #endif /* __ARCH_HAS_VTIME_ACCOUNT */
449 #endif /* CONFIG_VIRT_CPU_ACCOUNTING */
452 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
453 void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
459 void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
461 struct task_cputime cputime;
463 thread_group_cputime(p, &cputime);
468 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
470 * Account a single tick of cpu time.
471 * @p: the process that the cpu time gets accounted to
472 * @user_tick: indicates if the tick is a user or a system tick
474 void account_process_tick(struct task_struct *p, int user_tick)
476 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
477 struct rq *rq = this_rq();
479 if (vtime_accounting_enabled())
482 if (sched_clock_irqtime) {
483 irqtime_account_process_tick(p, user_tick, rq);
487 if (steal_account_process_tick())
491 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
492 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
493 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
496 account_idle_time(cputime_one_jiffy);
500 * Account multiple ticks of steal time.
501 * @p: the process from which the cpu time has been stolen
502 * @ticks: number of stolen ticks
504 void account_steal_ticks(unsigned long ticks)
506 account_steal_time(jiffies_to_cputime(ticks));
510 * Account multiple ticks of idle time.
511 * @ticks: number of stolen ticks
513 void account_idle_ticks(unsigned long ticks)
516 if (sched_clock_irqtime) {
517 irqtime_account_idle_ticks(ticks);
521 account_idle_time(jiffies_to_cputime(ticks));
525 * Perform (stime * rtime) / total with reduced chances
526 * of multiplication overflows by using smaller factors
527 * like quotient and remainders of divisions between
530 static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
532 u64 rem, res, scaled;
534 if (rtime >= total) {
536 * Scale up to rtime / total then add
537 * the remainder scaled to stime / total.
539 res = div64_u64_rem(rtime, total, &rem);
540 scaled = stime * res;
541 scaled += div64_u64(stime * rem, total);
544 * Same in reverse: scale down to total / rtime
545 * then substract that result scaled to
546 * to the remaining part.
548 res = div64_u64_rem(total, rtime, &rem);
549 scaled = div64_u64(stime, res);
550 scaled -= div64_u64(scaled * rem, total);
553 return (__force cputime_t) scaled;
557 * Adjust tick based cputime random precision against scheduler
558 * runtime accounting.
560 static void cputime_adjust(struct task_cputime *curr,
561 struct cputime *prev,
562 cputime_t *ut, cputime_t *st)
564 cputime_t rtime, stime, total;
566 if (vtime_accounting_enabled()) {
573 total = stime + curr->utime;
576 * Tick based cputime accounting depend on random scheduling
577 * timeslices of a task to be interrupted or not by the timer.
578 * Depending on these circumstances, the number of these interrupts
579 * may be over or under-optimistic, matching the real user and system
580 * cputime with a variable precision.
582 * Fix this by scaling these tick based values against the total
583 * runtime accounted by the CFS scheduler.
585 rtime = nsecs_to_cputime(curr->sum_exec_runtime);
592 stime = scale_stime((__force u64)stime,
593 (__force u64)rtime, (__force u64)total);
597 * If the tick based count grows faster than the scheduler one,
598 * the result of the scaling may go backward.
599 * Let's enforce monotonicity.
601 prev->stime = max(prev->stime, stime);
602 prev->utime = max(prev->utime, rtime - prev->stime);
608 void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
610 struct task_cputime cputime = {
611 .sum_exec_runtime = p->se.sum_exec_runtime,
614 task_cputime(p, &cputime.utime, &cputime.stime);
615 cputime_adjust(&cputime, &p->prev_cputime, ut, st);
619 * Must be called with siglock held.
621 void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
623 struct task_cputime cputime;
625 thread_group_cputime(p, &cputime);
626 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
628 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
630 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
631 static unsigned long long vtime_delta(struct task_struct *tsk)
633 unsigned long long clock;
635 clock = local_clock();
636 if (clock < tsk->vtime_snap)
639 return clock - tsk->vtime_snap;
642 static cputime_t get_vtime_delta(struct task_struct *tsk)
644 unsigned long long delta = vtime_delta(tsk);
646 WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING);
647 tsk->vtime_snap += delta;
649 /* CHECKME: always safe to convert nsecs to cputime? */
650 return nsecs_to_cputime(delta);
653 static void __vtime_account_system(struct task_struct *tsk)
655 cputime_t delta_cpu = get_vtime_delta(tsk);
657 account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu));
660 void vtime_account_system(struct task_struct *tsk)
662 if (!vtime_accounting_enabled())
665 write_seqlock(&tsk->vtime_seqlock);
666 __vtime_account_system(tsk);
667 write_sequnlock(&tsk->vtime_seqlock);
670 void vtime_account_irq_exit(struct task_struct *tsk)
672 if (!vtime_accounting_enabled())
675 write_seqlock(&tsk->vtime_seqlock);
676 if (context_tracking_in_user())
677 tsk->vtime_snap_whence = VTIME_USER;
678 __vtime_account_system(tsk);
679 write_sequnlock(&tsk->vtime_seqlock);
682 void vtime_account_user(struct task_struct *tsk)
686 if (!vtime_accounting_enabled())
689 delta_cpu = get_vtime_delta(tsk);
691 write_seqlock(&tsk->vtime_seqlock);
692 tsk->vtime_snap_whence = VTIME_SYS;
693 account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
694 write_sequnlock(&tsk->vtime_seqlock);
697 void vtime_user_enter(struct task_struct *tsk)
699 if (!vtime_accounting_enabled())
702 write_seqlock(&tsk->vtime_seqlock);
703 tsk->vtime_snap_whence = VTIME_USER;
704 __vtime_account_system(tsk);
705 write_sequnlock(&tsk->vtime_seqlock);
708 void vtime_guest_enter(struct task_struct *tsk)
710 write_seqlock(&tsk->vtime_seqlock);
711 __vtime_account_system(tsk);
712 current->flags |= PF_VCPU;
713 write_sequnlock(&tsk->vtime_seqlock);
716 void vtime_guest_exit(struct task_struct *tsk)
718 write_seqlock(&tsk->vtime_seqlock);
719 __vtime_account_system(tsk);
720 current->flags &= ~PF_VCPU;
721 write_sequnlock(&tsk->vtime_seqlock);
724 void vtime_account_idle(struct task_struct *tsk)
726 cputime_t delta_cpu = get_vtime_delta(tsk);
728 account_idle_time(delta_cpu);
731 bool vtime_accounting_enabled(void)
733 return context_tracking_active();
736 void arch_vtime_task_switch(struct task_struct *prev)
738 write_seqlock(&prev->vtime_seqlock);
739 prev->vtime_snap_whence = VTIME_SLEEPING;
740 write_sequnlock(&prev->vtime_seqlock);
742 write_seqlock(¤t->vtime_seqlock);
743 current->vtime_snap_whence = VTIME_SYS;
744 current->vtime_snap = sched_clock();
745 write_sequnlock(¤t->vtime_seqlock);
748 void vtime_init_idle(struct task_struct *t)
752 write_seqlock_irqsave(&t->vtime_seqlock, flags);
753 t->vtime_snap_whence = VTIME_SYS;
754 t->vtime_snap = sched_clock();
755 write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
758 cputime_t task_gtime(struct task_struct *t)
764 seq = read_seqbegin(&t->vtime_seqlock);
767 if (t->flags & PF_VCPU)
768 gtime += vtime_delta(t);
770 } while (read_seqretry(&t->vtime_seqlock, seq));
776 * Fetch cputime raw values from fields of task_struct and
777 * add up the pending nohz execution time since the last
781 fetch_task_cputime(struct task_struct *t,
782 cputime_t *u_dst, cputime_t *s_dst,
783 cputime_t *u_src, cputime_t *s_src,
784 cputime_t *udelta, cputime_t *sdelta)
787 unsigned long long delta;
793 seq = read_seqbegin(&t->vtime_seqlock);
800 /* Task is sleeping, nothing to add */
801 if (t->vtime_snap_whence == VTIME_SLEEPING ||
805 delta = vtime_delta(t);
808 * Task runs either in user or kernel space, add pending nohz time to
811 if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) {
814 if (t->vtime_snap_whence == VTIME_SYS)
817 } while (read_seqretry(&t->vtime_seqlock, seq));
821 void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
823 cputime_t udelta, sdelta;
825 fetch_task_cputime(t, utime, stime, &t->utime,
826 &t->stime, &udelta, &sdelta);
833 void task_cputime_scaled(struct task_struct *t,
834 cputime_t *utimescaled, cputime_t *stimescaled)
836 cputime_t udelta, sdelta;
838 fetch_task_cputime(t, utimescaled, stimescaled,
839 &t->utimescaled, &t->stimescaled, &udelta, &sdelta);
841 *utimescaled += cputime_to_scaled(udelta);
843 *stimescaled += cputime_to_scaled(sdelta);
845 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */