2 * Implement CPU time clocks for the POSIX clock interface.
5 #include <linux/sched.h>
6 #include <linux/posix-timers.h>
7 #include <linux/errno.h>
8 #include <linux/math64.h>
9 #include <asm/uaccess.h>
10 #include <linux/kernel_stat.h>
11 #include <trace/events/timer.h>
12 #include <linux/random.h>
15 * Called after updating RLIMIT_CPU to run cpu timer and update
16 * tsk->signal->cputime_expires expiration cache if necessary. Needs
17 * siglock protection since other code may update expiration cache as
20 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
22 cputime_t cputime = secs_to_cputime(rlim_new);
24 spin_lock_irq(&task->sighand->siglock);
25 set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL);
26 spin_unlock_irq(&task->sighand->siglock);
29 static int check_clock(const clockid_t which_clock)
32 struct task_struct *p;
33 const pid_t pid = CPUCLOCK_PID(which_clock);
35 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
42 p = find_task_by_vpid(pid);
43 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
44 same_thread_group(p, current) : has_group_leader_pid(p))) {
52 static inline union cpu_time_count
53 timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
55 union cpu_time_count ret;
56 ret.sched = 0; /* high half always zero when .cpu used */
57 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
58 ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
60 ret.cpu = timespec_to_cputime(tp);
65 static void sample_to_timespec(const clockid_t which_clock,
66 union cpu_time_count cpu,
69 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
70 *tp = ns_to_timespec(cpu.sched);
72 cputime_to_timespec(cpu.cpu, tp);
75 static inline int cpu_time_before(const clockid_t which_clock,
76 union cpu_time_count now,
77 union cpu_time_count then)
79 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
80 return now.sched < then.sched;
82 return now.cpu < then.cpu;
85 static inline void cpu_time_add(const clockid_t which_clock,
86 union cpu_time_count *acc,
87 union cpu_time_count val)
89 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
90 acc->sched += val.sched;
95 static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
96 union cpu_time_count a,
97 union cpu_time_count b)
99 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
108 * Update expiry time from increment, and increase overrun count,
109 * given the current clock sample.
111 static void bump_cpu_timer(struct k_itimer *timer,
112 union cpu_time_count now)
116 if (timer->it.cpu.incr.sched == 0)
119 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
120 unsigned long long delta, incr;
122 if (now.sched < timer->it.cpu.expires.sched)
124 incr = timer->it.cpu.incr.sched;
125 delta = now.sched + incr - timer->it.cpu.expires.sched;
126 /* Don't use (incr*2 < delta), incr*2 might overflow. */
127 for (i = 0; incr < delta - incr; i++)
129 for (; i >= 0; incr >>= 1, i--) {
132 timer->it.cpu.expires.sched += incr;
133 timer->it_overrun += 1 << i;
137 cputime_t delta, incr;
139 if (now.cpu < timer->it.cpu.expires.cpu)
141 incr = timer->it.cpu.incr.cpu;
142 delta = now.cpu + incr - timer->it.cpu.expires.cpu;
143 /* Don't use (incr*2 < delta), incr*2 might overflow. */
144 for (i = 0; incr < delta - incr; i++)
146 for (; i >= 0; incr = incr >> 1, i--) {
149 timer->it.cpu.expires.cpu += incr;
150 timer->it_overrun += 1 << i;
156 static inline cputime_t prof_ticks(struct task_struct *p)
158 return p->utime + p->stime;
160 static inline cputime_t virt_ticks(struct task_struct *p)
166 posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
168 int error = check_clock(which_clock);
171 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
172 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
174 * If sched_clock is using a cycle counter, we
175 * don't have any idea of its true resolution
176 * exported, but it is much more than 1s/HZ.
185 posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
188 * You can never reset a CPU clock, but we check for other errors
189 * in the call before failing with EPERM.
191 int error = check_clock(which_clock);
200 * Sample a per-thread clock for the given task.
202 static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
203 union cpu_time_count *cpu)
205 switch (CPUCLOCK_WHICH(which_clock)) {
209 cpu->cpu = prof_ticks(p);
212 cpu->cpu = virt_ticks(p);
215 cpu->sched = task_sched_runtime(p);
221 static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
223 if (b->utime > a->utime)
226 if (b->stime > a->stime)
229 if (b->sum_exec_runtime > a->sum_exec_runtime)
230 a->sum_exec_runtime = b->sum_exec_runtime;
233 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
235 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
236 struct task_cputime sum;
239 if (!cputimer->running) {
241 * The POSIX timer interface allows for absolute time expiry
242 * values through the TIMER_ABSTIME flag, therefore we have
243 * to synchronize the timer to the clock every time we start
246 thread_group_cputime(tsk, &sum);
247 raw_spin_lock_irqsave(&cputimer->lock, flags);
248 cputimer->running = 1;
249 update_gt_cputime(&cputimer->cputime, &sum);
251 raw_spin_lock_irqsave(&cputimer->lock, flags);
252 *times = cputimer->cputime;
253 raw_spin_unlock_irqrestore(&cputimer->lock, flags);
257 * Sample a process (thread group) clock for the given group_leader task.
258 * Must be called with tasklist_lock held for reading.
260 static int cpu_clock_sample_group(const clockid_t which_clock,
261 struct task_struct *p,
262 union cpu_time_count *cpu)
264 struct task_cputime cputime;
266 switch (CPUCLOCK_WHICH(which_clock)) {
270 thread_group_cputime(p, &cputime);
271 cpu->cpu = cputime.utime + cputime.stime;
274 thread_group_cputime(p, &cputime);
275 cpu->cpu = cputime.utime;
278 thread_group_cputime(p, &cputime);
279 cpu->sched = cputime.sum_exec_runtime;
286 static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
288 const pid_t pid = CPUCLOCK_PID(which_clock);
290 union cpu_time_count rtn;
294 * Special case constant value for our own clocks.
295 * We don't have to do any lookup to find ourselves.
297 if (CPUCLOCK_PERTHREAD(which_clock)) {
299 * Sampling just ourselves we can do with no locking.
301 error = cpu_clock_sample(which_clock,
304 read_lock(&tasklist_lock);
305 error = cpu_clock_sample_group(which_clock,
307 read_unlock(&tasklist_lock);
311 * Find the given PID, and validate that the caller
312 * should be able to see it.
314 struct task_struct *p;
316 p = find_task_by_vpid(pid);
318 if (CPUCLOCK_PERTHREAD(which_clock)) {
319 if (same_thread_group(p, current)) {
320 error = cpu_clock_sample(which_clock,
324 read_lock(&tasklist_lock);
325 if (thread_group_leader(p) && p->sighand) {
327 cpu_clock_sample_group(which_clock,
330 read_unlock(&tasklist_lock);
338 sample_to_timespec(which_clock, rtn, tp);
344 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
345 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
346 * new timer already all-zeros initialized.
348 static int posix_cpu_timer_create(struct k_itimer *new_timer)
351 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
352 struct task_struct *p;
354 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
357 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
360 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
364 p = find_task_by_vpid(pid);
365 if (p && !same_thread_group(p, current))
370 p = current->group_leader;
372 p = find_task_by_vpid(pid);
373 if (p && !has_group_leader_pid(p))
377 new_timer->it.cpu.task = p;
389 * Clean up a CPU-clock timer that is about to be destroyed.
390 * This is called from timer deletion with the timer already locked.
391 * If we return TIMER_RETRY, it's necessary to release the timer's lock
392 * and try again. (This happens when the timer is in the middle of firing.)
394 static int posix_cpu_timer_del(struct k_itimer *timer)
396 struct task_struct *p = timer->it.cpu.task;
399 if (likely(p != NULL)) {
400 read_lock(&tasklist_lock);
401 if (unlikely(p->sighand == NULL)) {
403 * We raced with the reaping of the task.
404 * The deletion should have cleared us off the list.
406 BUG_ON(!list_empty(&timer->it.cpu.entry));
408 spin_lock(&p->sighand->siglock);
409 if (timer->it.cpu.firing)
412 list_del(&timer->it.cpu.entry);
413 spin_unlock(&p->sighand->siglock);
415 read_unlock(&tasklist_lock);
425 * Clean out CPU timers still ticking when a thread exited. The task
426 * pointer is cleared, and the expiry time is replaced with the residual
427 * time for later timer_gettime calls to return.
428 * This must be called with the siglock held.
430 static void cleanup_timers(struct list_head *head,
431 cputime_t utime, cputime_t stime,
432 unsigned long long sum_exec_runtime)
434 struct cpu_timer_list *timer, *next;
435 cputime_t ptime = utime + stime;
437 list_for_each_entry_safe(timer, next, head, entry) {
438 list_del_init(&timer->entry);
439 if (timer->expires.cpu < ptime) {
440 timer->expires.cpu = 0;
442 timer->expires.cpu -= ptime;
447 list_for_each_entry_safe(timer, next, head, entry) {
448 list_del_init(&timer->entry);
449 if (timer->expires.cpu < utime) {
450 timer->expires.cpu = 0;
452 timer->expires.cpu -= utime;
457 list_for_each_entry_safe(timer, next, head, entry) {
458 list_del_init(&timer->entry);
459 if (timer->expires.sched < sum_exec_runtime) {
460 timer->expires.sched = 0;
462 timer->expires.sched -= sum_exec_runtime;
468 * These are both called with the siglock held, when the current thread
469 * is being reaped. When the final (leader) thread in the group is reaped,
470 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
472 void posix_cpu_timers_exit(struct task_struct *tsk)
474 add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
475 sizeof(unsigned long long));
476 cleanup_timers(tsk->cpu_timers,
477 tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
480 void posix_cpu_timers_exit_group(struct task_struct *tsk)
482 struct signal_struct *const sig = tsk->signal;
484 cleanup_timers(tsk->signal->cpu_timers,
485 tsk->utime + sig->utime, tsk->stime + sig->stime,
486 tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
489 static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
492 * That's all for this thread or process.
493 * We leave our residual in expires to be reported.
495 put_task_struct(timer->it.cpu.task);
496 timer->it.cpu.task = NULL;
497 timer->it.cpu.expires = cpu_time_sub(timer->it_clock,
498 timer->it.cpu.expires,
502 static inline int expires_gt(cputime_t expires, cputime_t new_exp)
504 return expires == 0 || expires > new_exp;
508 * Insert the timer on the appropriate list before any timers that
509 * expire later. This must be called with the tasklist_lock held
510 * for reading, interrupts disabled and p->sighand->siglock taken.
512 static void arm_timer(struct k_itimer *timer)
514 struct task_struct *p = timer->it.cpu.task;
515 struct list_head *head, *listpos;
516 struct task_cputime *cputime_expires;
517 struct cpu_timer_list *const nt = &timer->it.cpu;
518 struct cpu_timer_list *next;
520 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
521 head = p->cpu_timers;
522 cputime_expires = &p->cputime_expires;
524 head = p->signal->cpu_timers;
525 cputime_expires = &p->signal->cputime_expires;
527 head += CPUCLOCK_WHICH(timer->it_clock);
530 list_for_each_entry(next, head, entry) {
531 if (cpu_time_before(timer->it_clock, nt->expires, next->expires))
533 listpos = &next->entry;
535 list_add(&nt->entry, listpos);
537 if (listpos == head) {
538 union cpu_time_count *exp = &nt->expires;
541 * We are the new earliest-expiring POSIX 1.b timer, hence
542 * need to update expiration cache. Take into account that
543 * for process timers we share expiration cache with itimers
544 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
547 switch (CPUCLOCK_WHICH(timer->it_clock)) {
549 if (expires_gt(cputime_expires->prof_exp, exp->cpu))
550 cputime_expires->prof_exp = exp->cpu;
553 if (expires_gt(cputime_expires->virt_exp, exp->cpu))
554 cputime_expires->virt_exp = exp->cpu;
557 if (cputime_expires->sched_exp == 0 ||
558 cputime_expires->sched_exp > exp->sched)
559 cputime_expires->sched_exp = exp->sched;
566 * The timer is locked, fire it and arrange for its reload.
568 static void cpu_timer_fire(struct k_itimer *timer)
570 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
572 * User don't want any signal.
574 timer->it.cpu.expires.sched = 0;
575 } else if (unlikely(timer->sigq == NULL)) {
577 * This a special case for clock_nanosleep,
578 * not a normal timer from sys_timer_create.
580 wake_up_process(timer->it_process);
581 timer->it.cpu.expires.sched = 0;
582 } else if (timer->it.cpu.incr.sched == 0) {
584 * One-shot timer. Clear it as soon as it's fired.
586 posix_timer_event(timer, 0);
587 timer->it.cpu.expires.sched = 0;
588 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
590 * The signal did not get queued because the signal
591 * was ignored, so we won't get any callback to
592 * reload the timer. But we need to keep it
593 * ticking in case the signal is deliverable next time.
595 posix_cpu_timer_schedule(timer);
600 * Sample a process (thread group) timer for the given group_leader task.
601 * Must be called with tasklist_lock held for reading.
603 static int cpu_timer_sample_group(const clockid_t which_clock,
604 struct task_struct *p,
605 union cpu_time_count *cpu)
607 struct task_cputime cputime;
609 thread_group_cputimer(p, &cputime);
610 switch (CPUCLOCK_WHICH(which_clock)) {
614 cpu->cpu = cputime.utime + cputime.stime;
617 cpu->cpu = cputime.utime;
620 cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
627 * Guts of sys_timer_settime for CPU timers.
628 * This is called with the timer locked and interrupts disabled.
629 * If we return TIMER_RETRY, it's necessary to release the timer's lock
630 * and try again. (This happens when the timer is in the middle of firing.)
632 static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
633 struct itimerspec *new, struct itimerspec *old)
635 struct task_struct *p = timer->it.cpu.task;
636 union cpu_time_count old_expires, new_expires, old_incr, val;
639 if (unlikely(p == NULL)) {
641 * Timer refers to a dead task's clock.
646 new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
648 read_lock(&tasklist_lock);
650 * We need the tasklist_lock to protect against reaping that
651 * clears p->sighand. If p has just been reaped, we can no
652 * longer get any information about it at all.
654 if (unlikely(p->sighand == NULL)) {
655 read_unlock(&tasklist_lock);
657 timer->it.cpu.task = NULL;
662 * Disarm any old timer after extracting its expiry time.
664 BUG_ON(!irqs_disabled());
667 old_incr = timer->it.cpu.incr;
668 spin_lock(&p->sighand->siglock);
669 old_expires = timer->it.cpu.expires;
670 if (unlikely(timer->it.cpu.firing)) {
671 timer->it.cpu.firing = -1;
674 list_del_init(&timer->it.cpu.entry);
677 * We need to sample the current value to convert the new
678 * value from to relative and absolute, and to convert the
679 * old value from absolute to relative. To set a process
680 * timer, we need a sample to balance the thread expiry
681 * times (in arm_timer). With an absolute time, we must
682 * check if it's already passed. In short, we need a sample.
684 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
685 cpu_clock_sample(timer->it_clock, p, &val);
687 cpu_timer_sample_group(timer->it_clock, p, &val);
691 if (old_expires.sched == 0) {
692 old->it_value.tv_sec = 0;
693 old->it_value.tv_nsec = 0;
696 * Update the timer in case it has
697 * overrun already. If it has,
698 * we'll report it as having overrun
699 * and with the next reloaded timer
700 * already ticking, though we are
701 * swallowing that pending
702 * notification here to install the
705 bump_cpu_timer(timer, val);
706 if (cpu_time_before(timer->it_clock, val,
707 timer->it.cpu.expires)) {
708 old_expires = cpu_time_sub(
710 timer->it.cpu.expires, val);
711 sample_to_timespec(timer->it_clock,
715 old->it_value.tv_nsec = 1;
716 old->it_value.tv_sec = 0;
723 * We are colliding with the timer actually firing.
724 * Punt after filling in the timer's old value, and
725 * disable this firing since we are already reporting
726 * it as an overrun (thanks to bump_cpu_timer above).
728 spin_unlock(&p->sighand->siglock);
729 read_unlock(&tasklist_lock);
733 if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) {
734 cpu_time_add(timer->it_clock, &new_expires, val);
738 * Install the new expiry time (or zero).
739 * For a timer with no notification action, we don't actually
740 * arm the timer (we'll just fake it for timer_gettime).
742 timer->it.cpu.expires = new_expires;
743 if (new_expires.sched != 0 &&
744 cpu_time_before(timer->it_clock, val, new_expires)) {
748 spin_unlock(&p->sighand->siglock);
749 read_unlock(&tasklist_lock);
752 * Install the new reload setting, and
753 * set up the signal and overrun bookkeeping.
755 timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
759 * This acts as a modification timestamp for the timer,
760 * so any automatic reload attempt will punt on seeing
761 * that we have reset the timer manually.
763 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
765 timer->it_overrun_last = 0;
766 timer->it_overrun = -1;
768 if (new_expires.sched != 0 &&
769 !cpu_time_before(timer->it_clock, val, new_expires)) {
771 * The designated time already passed, so we notify
772 * immediately, even if the thread never runs to
773 * accumulate more time on this clock.
775 cpu_timer_fire(timer);
781 sample_to_timespec(timer->it_clock,
782 old_incr, &old->it_interval);
787 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
789 union cpu_time_count now;
790 struct task_struct *p = timer->it.cpu.task;
794 * Easy part: convert the reload time.
796 sample_to_timespec(timer->it_clock,
797 timer->it.cpu.incr, &itp->it_interval);
799 if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all. */
800 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
804 if (unlikely(p == NULL)) {
806 * This task already died and the timer will never fire.
807 * In this case, expires is actually the dead value.
810 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
816 * Sample the clock to take the difference with the expiry time.
818 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
819 cpu_clock_sample(timer->it_clock, p, &now);
820 clear_dead = p->exit_state;
822 read_lock(&tasklist_lock);
823 if (unlikely(p->sighand == NULL)) {
825 * The process has been reaped.
826 * We can't even collect a sample any more.
827 * Call the timer disarmed, nothing else to do.
830 timer->it.cpu.task = NULL;
831 timer->it.cpu.expires.sched = 0;
832 read_unlock(&tasklist_lock);
835 cpu_timer_sample_group(timer->it_clock, p, &now);
836 clear_dead = (unlikely(p->exit_state) &&
837 thread_group_empty(p));
839 read_unlock(&tasklist_lock);
842 if (unlikely(clear_dead)) {
844 * We've noticed that the thread is dead, but
845 * not yet reaped. Take this opportunity to
848 clear_dead_task(timer, now);
852 if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) {
853 sample_to_timespec(timer->it_clock,
854 cpu_time_sub(timer->it_clock,
855 timer->it.cpu.expires, now),
859 * The timer should have expired already, but the firing
860 * hasn't taken place yet. Say it's just about to expire.
862 itp->it_value.tv_nsec = 1;
863 itp->it_value.tv_sec = 0;
868 * Check for any per-thread CPU timers that have fired and move them off
869 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
870 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
872 static void check_thread_timers(struct task_struct *tsk,
873 struct list_head *firing)
876 struct list_head *timers = tsk->cpu_timers;
877 struct signal_struct *const sig = tsk->signal;
881 tsk->cputime_expires.prof_exp = 0;
882 while (!list_empty(timers)) {
883 struct cpu_timer_list *t = list_first_entry(timers,
884 struct cpu_timer_list,
886 if (!--maxfire || prof_ticks(tsk) < t->expires.cpu) {
887 tsk->cputime_expires.prof_exp = t->expires.cpu;
891 list_move_tail(&t->entry, firing);
896 tsk->cputime_expires.virt_exp = 0;
897 while (!list_empty(timers)) {
898 struct cpu_timer_list *t = list_first_entry(timers,
899 struct cpu_timer_list,
901 if (!--maxfire || virt_ticks(tsk) < t->expires.cpu) {
902 tsk->cputime_expires.virt_exp = t->expires.cpu;
906 list_move_tail(&t->entry, firing);
911 tsk->cputime_expires.sched_exp = 0;
912 while (!list_empty(timers)) {
913 struct cpu_timer_list *t = list_first_entry(timers,
914 struct cpu_timer_list,
916 if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
917 tsk->cputime_expires.sched_exp = t->expires.sched;
921 list_move_tail(&t->entry, firing);
925 * Check for the special case thread timers.
927 soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
928 if (soft != RLIM_INFINITY) {
930 ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
932 if (hard != RLIM_INFINITY &&
933 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
935 * At the hard limit, we just die.
936 * No need to calculate anything else now.
938 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
941 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
943 * At the soft limit, send a SIGXCPU every second.
946 soft += USEC_PER_SEC;
947 sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
950 "RT Watchdog Timeout: %s[%d]\n",
951 tsk->comm, task_pid_nr(tsk));
952 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
957 static void stop_process_timers(struct signal_struct *sig)
959 struct thread_group_cputimer *cputimer = &sig->cputimer;
962 raw_spin_lock_irqsave(&cputimer->lock, flags);
963 cputimer->running = 0;
964 raw_spin_unlock_irqrestore(&cputimer->lock, flags);
967 static u32 onecputick;
969 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
970 cputime_t *expires, cputime_t cur_time, int signo)
975 if (cur_time >= it->expires) {
977 it->expires += it->incr;
978 it->error += it->incr_error;
979 if (it->error >= onecputick) {
980 it->expires -= cputime_one_jiffy;
981 it->error -= onecputick;
987 trace_itimer_expire(signo == SIGPROF ?
988 ITIMER_PROF : ITIMER_VIRTUAL,
989 tsk->signal->leader_pid, cur_time);
990 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
993 if (it->expires && (!*expires || it->expires < *expires)) {
994 *expires = it->expires;
999 * task_cputime_zero - Check a task_cputime struct for all zero fields.
1001 * @cputime: The struct to compare.
1003 * Checks @cputime to see if all fields are zero. Returns true if all fields
1004 * are zero, false if any field is nonzero.
1006 static inline int task_cputime_zero(const struct task_cputime *cputime)
1008 if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
1014 * Check for any per-thread CPU timers that have fired and move them
1015 * off the tsk->*_timers list onto the firing list. Per-thread timers
1016 * have already been taken off.
1018 static void check_process_timers(struct task_struct *tsk,
1019 struct list_head *firing)
1022 struct signal_struct *const sig = tsk->signal;
1023 cputime_t utime, ptime, virt_expires, prof_expires;
1024 unsigned long long sum_sched_runtime, sched_expires;
1025 struct list_head *timers = sig->cpu_timers;
1026 struct task_cputime cputime;
1030 * Collect the current process totals.
1032 thread_group_cputimer(tsk, &cputime);
1033 utime = cputime.utime;
1034 ptime = utime + cputime.stime;
1035 sum_sched_runtime = cputime.sum_exec_runtime;
1038 while (!list_empty(timers)) {
1039 struct cpu_timer_list *tl = list_first_entry(timers,
1040 struct cpu_timer_list,
1042 if (!--maxfire || ptime < tl->expires.cpu) {
1043 prof_expires = tl->expires.cpu;
1047 list_move_tail(&tl->entry, firing);
1053 while (!list_empty(timers)) {
1054 struct cpu_timer_list *tl = list_first_entry(timers,
1055 struct cpu_timer_list,
1057 if (!--maxfire || utime < tl->expires.cpu) {
1058 virt_expires = tl->expires.cpu;
1062 list_move_tail(&tl->entry, firing);
1068 while (!list_empty(timers)) {
1069 struct cpu_timer_list *tl = list_first_entry(timers,
1070 struct cpu_timer_list,
1072 if (!--maxfire || sum_sched_runtime < tl->expires.sched) {
1073 sched_expires = tl->expires.sched;
1077 list_move_tail(&tl->entry, firing);
1081 * Check for the special case process timers.
1083 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
1085 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
1087 soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1088 if (soft != RLIM_INFINITY) {
1089 unsigned long psecs = cputime_to_secs(ptime);
1090 unsigned long hard =
1091 ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
1093 if (psecs >= hard) {
1095 * At the hard limit, we just die.
1096 * No need to calculate anything else now.
1098 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1101 if (psecs >= soft) {
1103 * At the soft limit, send a SIGXCPU every second.
1105 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1108 sig->rlim[RLIMIT_CPU].rlim_cur = soft;
1111 x = secs_to_cputime(soft);
1112 if (!prof_expires || x < prof_expires) {
1117 sig->cputime_expires.prof_exp = prof_expires;
1118 sig->cputime_expires.virt_exp = virt_expires;
1119 sig->cputime_expires.sched_exp = sched_expires;
1120 if (task_cputime_zero(&sig->cputime_expires))
1121 stop_process_timers(sig);
1125 * This is called from the signal code (via do_schedule_next_timer)
1126 * when the last timer signal was delivered and we have to reload the timer.
1128 void posix_cpu_timer_schedule(struct k_itimer *timer)
1130 struct task_struct *p = timer->it.cpu.task;
1131 union cpu_time_count now;
1133 if (unlikely(p == NULL))
1135 * The task was cleaned up already, no future firings.
1140 * Fetch the current sample and update the timer's expiry time.
1142 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1143 cpu_clock_sample(timer->it_clock, p, &now);
1144 bump_cpu_timer(timer, now);
1145 if (unlikely(p->exit_state)) {
1146 clear_dead_task(timer, now);
1149 read_lock(&tasklist_lock); /* arm_timer needs it. */
1150 spin_lock(&p->sighand->siglock);
1152 read_lock(&tasklist_lock);
1153 if (unlikely(p->sighand == NULL)) {
1155 * The process has been reaped.
1156 * We can't even collect a sample any more.
1159 timer->it.cpu.task = p = NULL;
1160 timer->it.cpu.expires.sched = 0;
1162 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1164 * We've noticed that the thread is dead, but
1165 * not yet reaped. Take this opportunity to
1166 * drop our task ref.
1168 clear_dead_task(timer, now);
1171 spin_lock(&p->sighand->siglock);
1172 cpu_timer_sample_group(timer->it_clock, p, &now);
1173 bump_cpu_timer(timer, now);
1174 /* Leave the tasklist_lock locked for the call below. */
1178 * Now re-arm for the new expiry time.
1180 BUG_ON(!irqs_disabled());
1182 spin_unlock(&p->sighand->siglock);
1185 read_unlock(&tasklist_lock);
1188 timer->it_overrun_last = timer->it_overrun;
1189 timer->it_overrun = -1;
1190 ++timer->it_requeue_pending;
1194 * task_cputime_expired - Compare two task_cputime entities.
1196 * @sample: The task_cputime structure to be checked for expiration.
1197 * @expires: Expiration times, against which @sample will be checked.
1199 * Checks @sample against @expires to see if any field of @sample has expired.
1200 * Returns true if any field of the former is greater than the corresponding
1201 * field of the latter if the latter field is set. Otherwise returns false.
1203 static inline int task_cputime_expired(const struct task_cputime *sample,
1204 const struct task_cputime *expires)
1206 if (expires->utime && sample->utime >= expires->utime)
1208 if (expires->stime && sample->utime + sample->stime >= expires->stime)
1210 if (expires->sum_exec_runtime != 0 &&
1211 sample->sum_exec_runtime >= expires->sum_exec_runtime)
1217 * fastpath_timer_check - POSIX CPU timers fast path.
1219 * @tsk: The task (thread) being checked.
1221 * Check the task and thread group timers. If both are zero (there are no
1222 * timers set) return false. Otherwise snapshot the task and thread group
1223 * timers and compare them with the corresponding expiration times. Return
1224 * true if a timer has expired, else return false.
1226 static inline int fastpath_timer_check(struct task_struct *tsk)
1228 struct signal_struct *sig;
1230 if (!task_cputime_zero(&tsk->cputime_expires)) {
1231 struct task_cputime task_sample = {
1232 .utime = tsk->utime,
1233 .stime = tsk->stime,
1234 .sum_exec_runtime = tsk->se.sum_exec_runtime
1237 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1242 if (sig->cputimer.running) {
1243 struct task_cputime group_sample;
1245 raw_spin_lock(&sig->cputimer.lock);
1246 group_sample = sig->cputimer.cputime;
1247 raw_spin_unlock(&sig->cputimer.lock);
1249 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1257 * This is called from the timer interrupt handler. The irq handler has
1258 * already updated our counts. We need to check if any timers fire now.
1259 * Interrupts are disabled.
1261 void run_posix_cpu_timers(struct task_struct *tsk)
1264 struct k_itimer *timer, *next;
1265 unsigned long flags;
1267 BUG_ON(!irqs_disabled());
1270 * The fast path checks that there are no expired thread or thread
1271 * group timers. If that's so, just return.
1273 if (!fastpath_timer_check(tsk))
1276 if (!lock_task_sighand(tsk, &flags))
1279 * Here we take off tsk->signal->cpu_timers[N] and
1280 * tsk->cpu_timers[N] all the timers that are firing, and
1281 * put them on the firing list.
1283 check_thread_timers(tsk, &firing);
1285 * If there are any active process wide timers (POSIX 1.b, itimers,
1286 * RLIMIT_CPU) cputimer must be running.
1288 if (tsk->signal->cputimer.running)
1289 check_process_timers(tsk, &firing);
1292 * We must release these locks before taking any timer's lock.
1293 * There is a potential race with timer deletion here, as the
1294 * siglock now protects our private firing list. We have set
1295 * the firing flag in each timer, so that a deletion attempt
1296 * that gets the timer lock before we do will give it up and
1297 * spin until we've taken care of that timer below.
1299 unlock_task_sighand(tsk, &flags);
1302 * Now that all the timers on our list have the firing flag,
1303 * no one will touch their list entries but us. We'll take
1304 * each timer's lock before clearing its firing flag, so no
1305 * timer call will interfere.
1307 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1310 spin_lock(&timer->it_lock);
1311 list_del_init(&timer->it.cpu.entry);
1312 cpu_firing = timer->it.cpu.firing;
1313 timer->it.cpu.firing = 0;
1315 * The firing flag is -1 if we collided with a reset
1316 * of the timer, which already reported this
1317 * almost-firing as an overrun. So don't generate an event.
1319 if (likely(cpu_firing >= 0))
1320 cpu_timer_fire(timer);
1321 spin_unlock(&timer->it_lock);
1326 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1327 * The tsk->sighand->siglock must be held by the caller.
1329 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1330 cputime_t *newval, cputime_t *oldval)
1332 union cpu_time_count now;
1334 BUG_ON(clock_idx == CPUCLOCK_SCHED);
1335 cpu_timer_sample_group(clock_idx, tsk, &now);
1339 * We are setting itimer. The *oldval is absolute and we update
1340 * it to be relative, *newval argument is relative and we update
1341 * it to be absolute.
1344 if (*oldval <= now.cpu) {
1345 /* Just about to fire. */
1346 *oldval = cputime_one_jiffy;
1358 * Update expiration cache if we are the earliest timer, or eventually
1359 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
1361 switch (clock_idx) {
1363 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
1364 tsk->signal->cputime_expires.prof_exp = *newval;
1367 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
1368 tsk->signal->cputime_expires.virt_exp = *newval;
1373 static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1374 struct timespec *rqtp, struct itimerspec *it)
1376 struct k_itimer timer;
1380 * Set up a temporary timer and then wait for it to go off.
1382 memset(&timer, 0, sizeof timer);
1383 spin_lock_init(&timer.it_lock);
1384 timer.it_clock = which_clock;
1385 timer.it_overrun = -1;
1386 error = posix_cpu_timer_create(&timer);
1387 timer.it_process = current;
1389 static struct itimerspec zero_it;
1391 memset(it, 0, sizeof *it);
1392 it->it_value = *rqtp;
1394 spin_lock_irq(&timer.it_lock);
1395 error = posix_cpu_timer_set(&timer, flags, it, NULL);
1397 spin_unlock_irq(&timer.it_lock);
1401 while (!signal_pending(current)) {
1402 if (timer.it.cpu.expires.sched == 0) {
1404 * Our timer fired and was reset.
1406 spin_unlock_irq(&timer.it_lock);
1411 * Block until cpu_timer_fire (or a signal) wakes us.
1413 __set_current_state(TASK_INTERRUPTIBLE);
1414 spin_unlock_irq(&timer.it_lock);
1416 spin_lock_irq(&timer.it_lock);
1420 * We were interrupted by a signal.
1422 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
1423 posix_cpu_timer_set(&timer, 0, &zero_it, it);
1424 spin_unlock_irq(&timer.it_lock);
1426 if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
1428 * It actually did fire already.
1433 error = -ERESTART_RESTARTBLOCK;
1439 static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1441 static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1442 struct timespec *rqtp, struct timespec __user *rmtp)
1444 struct restart_block *restart_block =
1445 ¤t_thread_info()->restart_block;
1446 struct itimerspec it;
1450 * Diagnose required errors first.
1452 if (CPUCLOCK_PERTHREAD(which_clock) &&
1453 (CPUCLOCK_PID(which_clock) == 0 ||
1454 CPUCLOCK_PID(which_clock) == current->pid))
1457 error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
1459 if (error == -ERESTART_RESTARTBLOCK) {
1461 if (flags & TIMER_ABSTIME)
1462 return -ERESTARTNOHAND;
1464 * Report back to the user the time still remaining.
1466 if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1469 restart_block->fn = posix_cpu_nsleep_restart;
1470 restart_block->nanosleep.clockid = which_clock;
1471 restart_block->nanosleep.rmtp = rmtp;
1472 restart_block->nanosleep.expires = timespec_to_ns(rqtp);
1477 static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1479 clockid_t which_clock = restart_block->nanosleep.clockid;
1481 struct itimerspec it;
1484 t = ns_to_timespec(restart_block->nanosleep.expires);
1486 error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
1488 if (error == -ERESTART_RESTARTBLOCK) {
1489 struct timespec __user *rmtp = restart_block->nanosleep.rmtp;
1491 * Report back to the user the time still remaining.
1493 if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1496 restart_block->nanosleep.expires = timespec_to_ns(&t);
1502 #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1503 #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1505 static int process_cpu_clock_getres(const clockid_t which_clock,
1506 struct timespec *tp)
1508 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1510 static int process_cpu_clock_get(const clockid_t which_clock,
1511 struct timespec *tp)
1513 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1515 static int process_cpu_timer_create(struct k_itimer *timer)
1517 timer->it_clock = PROCESS_CLOCK;
1518 return posix_cpu_timer_create(timer);
1520 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1521 struct timespec *rqtp,
1522 struct timespec __user *rmtp)
1524 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
1526 static long process_cpu_nsleep_restart(struct restart_block *restart_block)
1530 static int thread_cpu_clock_getres(const clockid_t which_clock,
1531 struct timespec *tp)
1533 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1535 static int thread_cpu_clock_get(const clockid_t which_clock,
1536 struct timespec *tp)
1538 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1540 static int thread_cpu_timer_create(struct k_itimer *timer)
1542 timer->it_clock = THREAD_CLOCK;
1543 return posix_cpu_timer_create(timer);
1546 struct k_clock clock_posix_cpu = {
1547 .clock_getres = posix_cpu_clock_getres,
1548 .clock_set = posix_cpu_clock_set,
1549 .clock_get = posix_cpu_clock_get,
1550 .timer_create = posix_cpu_timer_create,
1551 .nsleep = posix_cpu_nsleep,
1552 .nsleep_restart = posix_cpu_nsleep_restart,
1553 .timer_set = posix_cpu_timer_set,
1554 .timer_del = posix_cpu_timer_del,
1555 .timer_get = posix_cpu_timer_get,
1558 static __init int init_posix_cpu_timers(void)
1560 struct k_clock process = {
1561 .clock_getres = process_cpu_clock_getres,
1562 .clock_get = process_cpu_clock_get,
1563 .timer_create = process_cpu_timer_create,
1564 .nsleep = process_cpu_nsleep,
1565 .nsleep_restart = process_cpu_nsleep_restart,
1567 struct k_clock thread = {
1568 .clock_getres = thread_cpu_clock_getres,
1569 .clock_get = thread_cpu_clock_get,
1570 .timer_create = thread_cpu_timer_create,
1574 posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1575 posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1577 cputime_to_timespec(cputime_one_jiffy, &ts);
1578 onecputick = ts.tv_nsec;
1579 WARN_ON(ts.tv_sec != 0);
1583 __initcall(init_posix_cpu_timers);