2 * Implement CPU time clocks for the POSIX clock interface.
5 #include <linux/sched.h>
6 #include <linux/posix-timers.h>
7 #include <linux/errno.h>
8 #include <linux/math64.h>
9 #include <asm/uaccess.h>
10 #include <linux/kernel_stat.h>
11 #include <trace/events/timer.h>
12 #include <linux/random.h>
13 #include <linux/tick.h>
14 #include <linux/workqueue.h>
17 * Called after updating RLIMIT_CPU to run cpu timer and update
18 * tsk->signal->cputime_expires expiration cache if necessary. Needs
19 * siglock protection since other code may update expiration cache as
22 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
24 cputime_t cputime = secs_to_cputime(rlim_new);
26 spin_lock_irq(&task->sighand->siglock);
27 set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL);
28 spin_unlock_irq(&task->sighand->siglock);
31 static int check_clock(const clockid_t which_clock)
34 struct task_struct *p;
35 const pid_t pid = CPUCLOCK_PID(which_clock);
37 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
44 p = find_task_by_vpid(pid);
45 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
46 same_thread_group(p, current) : has_group_leader_pid(p))) {
54 static inline unsigned long long
55 timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
57 unsigned long long ret;
59 ret = 0; /* high half always zero when .cpu used */
60 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
61 ret = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
63 ret = cputime_to_expires(timespec_to_cputime(tp));
68 static void sample_to_timespec(const clockid_t which_clock,
69 unsigned long long expires,
72 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
73 *tp = ns_to_timespec(expires);
75 cputime_to_timespec((__force cputime_t)expires, tp);
79 * Update expiry time from increment, and increase overrun count,
80 * given the current clock sample.
82 static void bump_cpu_timer(struct k_itimer *timer,
83 unsigned long long now)
86 unsigned long long delta, incr;
88 if (timer->it.cpu.incr == 0)
91 if (now < timer->it.cpu.expires)
94 incr = timer->it.cpu.incr;
95 delta = now + incr - timer->it.cpu.expires;
97 /* Don't use (incr*2 < delta), incr*2 might overflow. */
98 for (i = 0; incr < delta - incr; i++)
101 for (; i >= 0; incr >>= 1, i--) {
105 timer->it.cpu.expires += incr;
106 timer->it_overrun += 1 << i;
112 * task_cputime_zero - Check a task_cputime struct for all zero fields.
114 * @cputime: The struct to compare.
116 * Checks @cputime to see if all fields are zero. Returns true if all fields
117 * are zero, false if any field is nonzero.
119 static inline int task_cputime_zero(const struct task_cputime *cputime)
121 if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
126 static inline unsigned long long prof_ticks(struct task_struct *p)
128 cputime_t utime, stime;
130 task_cputime(p, &utime, &stime);
132 return cputime_to_expires(utime + stime);
134 static inline unsigned long long virt_ticks(struct task_struct *p)
138 task_cputime(p, &utime, NULL);
140 return cputime_to_expires(utime);
144 posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
146 int error = check_clock(which_clock);
149 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
150 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
152 * If sched_clock is using a cycle counter, we
153 * don't have any idea of its true resolution
154 * exported, but it is much more than 1s/HZ.
163 posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
166 * You can never reset a CPU clock, but we check for other errors
167 * in the call before failing with EPERM.
169 int error = check_clock(which_clock);
178 * Sample a per-thread clock for the given task.
180 static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
181 unsigned long long *sample)
183 switch (CPUCLOCK_WHICH(which_clock)) {
187 *sample = prof_ticks(p);
190 *sample = virt_ticks(p);
193 *sample = task_sched_runtime(p);
199 static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
201 if (b->utime > a->utime)
204 if (b->stime > a->stime)
207 if (b->sum_exec_runtime > a->sum_exec_runtime)
208 a->sum_exec_runtime = b->sum_exec_runtime;
211 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
213 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
214 struct task_cputime sum;
217 if (!cputimer->running) {
219 * The POSIX timer interface allows for absolute time expiry
220 * values through the TIMER_ABSTIME flag, therefore we have
221 * to synchronize the timer to the clock every time we start
224 thread_group_cputime(tsk, &sum);
225 raw_spin_lock_irqsave(&cputimer->lock, flags);
226 cputimer->running = 1;
227 update_gt_cputime(&cputimer->cputime, &sum);
229 raw_spin_lock_irqsave(&cputimer->lock, flags);
230 *times = cputimer->cputime;
231 raw_spin_unlock_irqrestore(&cputimer->lock, flags);
235 * Sample a process (thread group) clock for the given group_leader task.
236 * Must be called with tasklist_lock held for reading.
238 static int cpu_clock_sample_group(const clockid_t which_clock,
239 struct task_struct *p,
240 unsigned long long *sample)
242 struct task_cputime cputime;
244 switch (CPUCLOCK_WHICH(which_clock)) {
248 thread_group_cputime(p, &cputime);
249 *sample = cputime_to_expires(cputime.utime + cputime.stime);
252 thread_group_cputime(p, &cputime);
253 *sample = cputime_to_expires(cputime.utime);
256 thread_group_cputime(p, &cputime);
257 *sample = cputime.sum_exec_runtime;
264 static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
266 const pid_t pid = CPUCLOCK_PID(which_clock);
268 unsigned long long rtn;
272 * Special case constant value for our own clocks.
273 * We don't have to do any lookup to find ourselves.
275 if (CPUCLOCK_PERTHREAD(which_clock)) {
277 * Sampling just ourselves we can do with no locking.
279 error = cpu_clock_sample(which_clock,
282 read_lock(&tasklist_lock);
283 error = cpu_clock_sample_group(which_clock,
285 read_unlock(&tasklist_lock);
289 * Find the given PID, and validate that the caller
290 * should be able to see it.
292 struct task_struct *p;
294 p = find_task_by_vpid(pid);
296 if (CPUCLOCK_PERTHREAD(which_clock)) {
297 if (same_thread_group(p, current)) {
298 error = cpu_clock_sample(which_clock,
302 read_lock(&tasklist_lock);
303 if (thread_group_leader(p) && p->sighand) {
305 cpu_clock_sample_group(which_clock,
308 read_unlock(&tasklist_lock);
316 sample_to_timespec(which_clock, rtn, tp);
322 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
323 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
324 * new timer already all-zeros initialized.
326 static int posix_cpu_timer_create(struct k_itimer *new_timer)
329 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
330 struct task_struct *p;
332 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
335 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
338 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
342 p = find_task_by_vpid(pid);
343 if (p && !same_thread_group(p, current))
348 p = current->group_leader;
350 p = find_task_by_vpid(pid);
351 if (p && !has_group_leader_pid(p))
355 new_timer->it.cpu.task = p;
367 * Clean up a CPU-clock timer that is about to be destroyed.
368 * This is called from timer deletion with the timer already locked.
369 * If we return TIMER_RETRY, it's necessary to release the timer's lock
370 * and try again. (This happens when the timer is in the middle of firing.)
372 static int posix_cpu_timer_del(struct k_itimer *timer)
374 struct task_struct *p = timer->it.cpu.task;
377 if (likely(p != NULL)) {
378 read_lock(&tasklist_lock);
379 if (unlikely(p->sighand == NULL)) {
381 * We raced with the reaping of the task.
382 * The deletion should have cleared us off the list.
384 BUG_ON(!list_empty(&timer->it.cpu.entry));
386 spin_lock(&p->sighand->siglock);
387 if (timer->it.cpu.firing)
390 list_del(&timer->it.cpu.entry);
391 spin_unlock(&p->sighand->siglock);
393 read_unlock(&tasklist_lock);
403 * Clean out CPU timers still ticking when a thread exited. The task
404 * pointer is cleared, and the expiry time is replaced with the residual
405 * time for later timer_gettime calls to return.
406 * This must be called with the siglock held.
408 static void cleanup_timers(struct list_head *head,
409 cputime_t utime, cputime_t stime,
410 unsigned long long sum_exec_runtime)
412 struct cpu_timer_list *timer, *next;
413 cputime_t ptime = utime + stime;
415 list_for_each_entry_safe(timer, next, head, entry) {
416 list_del_init(&timer->entry);
417 if (timer->expires < cputime_to_expires(ptime)) {
420 timer->expires -= cputime_to_expires(ptime);
425 list_for_each_entry_safe(timer, next, head, entry) {
426 list_del_init(&timer->entry);
427 if (timer->expires < cputime_to_expires(utime)) {
430 timer->expires -= cputime_to_expires(utime);
435 list_for_each_entry_safe(timer, next, head, entry) {
436 list_del_init(&timer->entry);
437 if (timer->expires < sum_exec_runtime) {
440 timer->expires -= sum_exec_runtime;
446 * These are both called with the siglock held, when the current thread
447 * is being reaped. When the final (leader) thread in the group is reaped,
448 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
450 void posix_cpu_timers_exit(struct task_struct *tsk)
452 cputime_t utime, stime;
454 add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
455 sizeof(unsigned long long));
456 task_cputime(tsk, &utime, &stime);
457 cleanup_timers(tsk->cpu_timers,
458 utime, stime, tsk->se.sum_exec_runtime);
461 void posix_cpu_timers_exit_group(struct task_struct *tsk)
463 struct signal_struct *const sig = tsk->signal;
464 cputime_t utime, stime;
466 task_cputime(tsk, &utime, &stime);
467 cleanup_timers(tsk->signal->cpu_timers,
468 utime + sig->utime, stime + sig->stime,
469 tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
472 static void clear_dead_task(struct k_itimer *timer, unsigned long long now)
475 * That's all for this thread or process.
476 * We leave our residual in expires to be reported.
478 put_task_struct(timer->it.cpu.task);
479 timer->it.cpu.task = NULL;
480 timer->it.cpu.expires -= now;
483 static inline int expires_gt(cputime_t expires, cputime_t new_exp)
485 return expires == 0 || expires > new_exp;
489 * Insert the timer on the appropriate list before any timers that
490 * expire later. This must be called with the tasklist_lock held
491 * for reading, interrupts disabled and p->sighand->siglock taken.
493 static void arm_timer(struct k_itimer *timer)
495 struct task_struct *p = timer->it.cpu.task;
496 struct list_head *head, *listpos;
497 struct task_cputime *cputime_expires;
498 struct cpu_timer_list *const nt = &timer->it.cpu;
499 struct cpu_timer_list *next;
501 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
502 head = p->cpu_timers;
503 cputime_expires = &p->cputime_expires;
505 head = p->signal->cpu_timers;
506 cputime_expires = &p->signal->cputime_expires;
508 head += CPUCLOCK_WHICH(timer->it_clock);
511 list_for_each_entry(next, head, entry) {
512 if (nt->expires < next->expires)
514 listpos = &next->entry;
516 list_add(&nt->entry, listpos);
518 if (listpos == head) {
519 unsigned long long exp = nt->expires;
522 * We are the new earliest-expiring POSIX 1.b timer, hence
523 * need to update expiration cache. Take into account that
524 * for process timers we share expiration cache with itimers
525 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
528 switch (CPUCLOCK_WHICH(timer->it_clock)) {
530 if (expires_gt(cputime_expires->prof_exp, expires_to_cputime(exp)))
531 cputime_expires->prof_exp = expires_to_cputime(exp);
534 if (expires_gt(cputime_expires->virt_exp, expires_to_cputime(exp)))
535 cputime_expires->virt_exp = expires_to_cputime(exp);
538 if (cputime_expires->sched_exp == 0 ||
539 cputime_expires->sched_exp > exp)
540 cputime_expires->sched_exp = exp;
547 * The timer is locked, fire it and arrange for its reload.
549 static void cpu_timer_fire(struct k_itimer *timer)
551 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
553 * User don't want any signal.
555 timer->it.cpu.expires = 0;
556 } else if (unlikely(timer->sigq == NULL)) {
558 * This a special case for clock_nanosleep,
559 * not a normal timer from sys_timer_create.
561 wake_up_process(timer->it_process);
562 timer->it.cpu.expires = 0;
563 } else if (timer->it.cpu.incr == 0) {
565 * One-shot timer. Clear it as soon as it's fired.
567 posix_timer_event(timer, 0);
568 timer->it.cpu.expires = 0;
569 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
571 * The signal did not get queued because the signal
572 * was ignored, so we won't get any callback to
573 * reload the timer. But we need to keep it
574 * ticking in case the signal is deliverable next time.
576 posix_cpu_timer_schedule(timer);
581 * Sample a process (thread group) timer for the given group_leader task.
582 * Must be called with tasklist_lock held for reading.
584 static int cpu_timer_sample_group(const clockid_t which_clock,
585 struct task_struct *p,
586 unsigned long long *sample)
588 struct task_cputime cputime;
590 thread_group_cputimer(p, &cputime);
591 switch (CPUCLOCK_WHICH(which_clock)) {
595 *sample = cputime_to_expires(cputime.utime + cputime.stime);
598 *sample = cputime_to_expires(cputime.utime);
601 *sample = cputime.sum_exec_runtime + task_delta_exec(p);
607 #ifdef CONFIG_NO_HZ_FULL
608 static void nohz_kick_work_fn(struct work_struct *work)
610 tick_nohz_full_kick_all();
613 static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn);
616 * We need the IPIs to be sent from sane process context.
617 * The posix cpu timers are always set with irqs disabled.
619 static void posix_cpu_timer_kick_nohz(void)
621 schedule_work(&nohz_kick_work);
624 bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk)
626 if (!task_cputime_zero(&tsk->cputime_expires))
629 if (tsk->signal->cputimer.running)
635 static inline void posix_cpu_timer_kick_nohz(void) { }
639 * Guts of sys_timer_settime for CPU timers.
640 * This is called with the timer locked and interrupts disabled.
641 * If we return TIMER_RETRY, it's necessary to release the timer's lock
642 * and try again. (This happens when the timer is in the middle of firing.)
644 static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
645 struct itimerspec *new, struct itimerspec *old)
647 struct task_struct *p = timer->it.cpu.task;
648 unsigned long long old_expires, new_expires, old_incr, val;
651 if (unlikely(p == NULL)) {
653 * Timer refers to a dead task's clock.
658 new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
660 read_lock(&tasklist_lock);
662 * We need the tasklist_lock to protect against reaping that
663 * clears p->sighand. If p has just been reaped, we can no
664 * longer get any information about it at all.
666 if (unlikely(p->sighand == NULL)) {
667 read_unlock(&tasklist_lock);
669 timer->it.cpu.task = NULL;
674 * Disarm any old timer after extracting its expiry time.
676 BUG_ON(!irqs_disabled());
679 old_incr = timer->it.cpu.incr;
680 spin_lock(&p->sighand->siglock);
681 old_expires = timer->it.cpu.expires;
682 if (unlikely(timer->it.cpu.firing)) {
683 timer->it.cpu.firing = -1;
686 list_del_init(&timer->it.cpu.entry);
689 * We need to sample the current value to convert the new
690 * value from to relative and absolute, and to convert the
691 * old value from absolute to relative. To set a process
692 * timer, we need a sample to balance the thread expiry
693 * times (in arm_timer). With an absolute time, we must
694 * check if it's already passed. In short, we need a sample.
696 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
697 cpu_clock_sample(timer->it_clock, p, &val);
699 cpu_timer_sample_group(timer->it_clock, p, &val);
703 if (old_expires == 0) {
704 old->it_value.tv_sec = 0;
705 old->it_value.tv_nsec = 0;
708 * Update the timer in case it has
709 * overrun already. If it has,
710 * we'll report it as having overrun
711 * and with the next reloaded timer
712 * already ticking, though we are
713 * swallowing that pending
714 * notification here to install the
717 bump_cpu_timer(timer, val);
718 if (val < timer->it.cpu.expires) {
719 old_expires = timer->it.cpu.expires - val;
720 sample_to_timespec(timer->it_clock,
724 old->it_value.tv_nsec = 1;
725 old->it_value.tv_sec = 0;
732 * We are colliding with the timer actually firing.
733 * Punt after filling in the timer's old value, and
734 * disable this firing since we are already reporting
735 * it as an overrun (thanks to bump_cpu_timer above).
737 spin_unlock(&p->sighand->siglock);
738 read_unlock(&tasklist_lock);
742 if (new_expires != 0 && !(flags & TIMER_ABSTIME)) {
747 * Install the new expiry time (or zero).
748 * For a timer with no notification action, we don't actually
749 * arm the timer (we'll just fake it for timer_gettime).
751 timer->it.cpu.expires = new_expires;
752 if (new_expires != 0 && val < new_expires) {
756 spin_unlock(&p->sighand->siglock);
757 read_unlock(&tasklist_lock);
760 * Install the new reload setting, and
761 * set up the signal and overrun bookkeeping.
763 timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
767 * This acts as a modification timestamp for the timer,
768 * so any automatic reload attempt will punt on seeing
769 * that we have reset the timer manually.
771 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
773 timer->it_overrun_last = 0;
774 timer->it_overrun = -1;
776 if (new_expires != 0 && !(val < new_expires)) {
778 * The designated time already passed, so we notify
779 * immediately, even if the thread never runs to
780 * accumulate more time on this clock.
782 cpu_timer_fire(timer);
788 sample_to_timespec(timer->it_clock,
789 old_incr, &old->it_interval);
792 posix_cpu_timer_kick_nohz();
796 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
798 unsigned long long now;
799 struct task_struct *p = timer->it.cpu.task;
803 * Easy part: convert the reload time.
805 sample_to_timespec(timer->it_clock,
806 timer->it.cpu.incr, &itp->it_interval);
808 if (timer->it.cpu.expires == 0) { /* Timer not armed at all. */
809 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
813 if (unlikely(p == NULL)) {
815 * This task already died and the timer will never fire.
816 * In this case, expires is actually the dead value.
819 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
825 * Sample the clock to take the difference with the expiry time.
827 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
828 cpu_clock_sample(timer->it_clock, p, &now);
829 clear_dead = p->exit_state;
831 read_lock(&tasklist_lock);
832 if (unlikely(p->sighand == NULL)) {
834 * The process has been reaped.
835 * We can't even collect a sample any more.
836 * Call the timer disarmed, nothing else to do.
839 timer->it.cpu.task = NULL;
840 timer->it.cpu.expires = 0;
841 read_unlock(&tasklist_lock);
844 cpu_timer_sample_group(timer->it_clock, p, &now);
845 clear_dead = (unlikely(p->exit_state) &&
846 thread_group_empty(p));
848 read_unlock(&tasklist_lock);
851 if (unlikely(clear_dead)) {
853 * We've noticed that the thread is dead, but
854 * not yet reaped. Take this opportunity to
857 clear_dead_task(timer, now);
861 if (now < timer->it.cpu.expires) {
862 sample_to_timespec(timer->it_clock,
863 timer->it.cpu.expires - now,
867 * The timer should have expired already, but the firing
868 * hasn't taken place yet. Say it's just about to expire.
870 itp->it_value.tv_nsec = 1;
871 itp->it_value.tv_sec = 0;
876 * Check for any per-thread CPU timers that have fired and move them off
877 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
878 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
880 static void check_thread_timers(struct task_struct *tsk,
881 struct list_head *firing)
884 struct list_head *timers = tsk->cpu_timers;
885 struct signal_struct *const sig = tsk->signal;
889 tsk->cputime_expires.prof_exp = 0;
890 while (!list_empty(timers)) {
891 struct cpu_timer_list *t = list_first_entry(timers,
892 struct cpu_timer_list,
894 if (!--maxfire || prof_ticks(tsk) < t->expires) {
895 tsk->cputime_expires.prof_exp = expires_to_cputime(t->expires);
899 list_move_tail(&t->entry, firing);
904 tsk->cputime_expires.virt_exp = 0;
905 while (!list_empty(timers)) {
906 struct cpu_timer_list *t = list_first_entry(timers,
907 struct cpu_timer_list,
909 if (!--maxfire || virt_ticks(tsk) < t->expires) {
910 tsk->cputime_expires.virt_exp = expires_to_cputime(t->expires);
914 list_move_tail(&t->entry, firing);
919 tsk->cputime_expires.sched_exp = 0;
920 while (!list_empty(timers)) {
921 struct cpu_timer_list *t = list_first_entry(timers,
922 struct cpu_timer_list,
924 if (!--maxfire || tsk->se.sum_exec_runtime < t->expires) {
925 tsk->cputime_expires.sched_exp = t->expires;
929 list_move_tail(&t->entry, firing);
933 * Check for the special case thread timers.
935 soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
936 if (soft != RLIM_INFINITY) {
938 ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
940 if (hard != RLIM_INFINITY &&
941 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
943 * At the hard limit, we just die.
944 * No need to calculate anything else now.
946 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
949 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
951 * At the soft limit, send a SIGXCPU every second.
954 soft += USEC_PER_SEC;
955 sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
958 "RT Watchdog Timeout: %s[%d]\n",
959 tsk->comm, task_pid_nr(tsk));
960 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
965 static void stop_process_timers(struct signal_struct *sig)
967 struct thread_group_cputimer *cputimer = &sig->cputimer;
970 raw_spin_lock_irqsave(&cputimer->lock, flags);
971 cputimer->running = 0;
972 raw_spin_unlock_irqrestore(&cputimer->lock, flags);
975 static u32 onecputick;
977 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
978 unsigned long long *expires,
979 unsigned long long cur_time, int signo)
984 if (cur_time >= it->expires) {
986 it->expires += it->incr;
987 it->error += it->incr_error;
988 if (it->error >= onecputick) {
989 it->expires -= cputime_one_jiffy;
990 it->error -= onecputick;
996 trace_itimer_expire(signo == SIGPROF ?
997 ITIMER_PROF : ITIMER_VIRTUAL,
998 tsk->signal->leader_pid, cur_time);
999 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
1002 if (it->expires && (!*expires || it->expires < *expires)) {
1003 *expires = it->expires;
1008 * Check for any per-thread CPU timers that have fired and move them
1009 * off the tsk->*_timers list onto the firing list. Per-thread timers
1010 * have already been taken off.
1012 static void check_process_timers(struct task_struct *tsk,
1013 struct list_head *firing)
1016 struct signal_struct *const sig = tsk->signal;
1017 unsigned long long utime, ptime, virt_expires, prof_expires;
1018 unsigned long long sum_sched_runtime, sched_expires;
1019 struct list_head *timers = sig->cpu_timers;
1020 struct task_cputime cputime;
1024 * Collect the current process totals.
1026 thread_group_cputimer(tsk, &cputime);
1027 utime = cputime_to_expires(cputime.utime);
1028 ptime = utime + cputime_to_expires(cputime.stime);
1029 sum_sched_runtime = cputime.sum_exec_runtime;
1032 while (!list_empty(timers)) {
1033 struct cpu_timer_list *tl = list_first_entry(timers,
1034 struct cpu_timer_list,
1036 if (!--maxfire || ptime < tl->expires) {
1037 prof_expires = tl->expires;
1041 list_move_tail(&tl->entry, firing);
1047 while (!list_empty(timers)) {
1048 struct cpu_timer_list *tl = list_first_entry(timers,
1049 struct cpu_timer_list,
1051 if (!--maxfire || utime < tl->expires) {
1052 virt_expires = tl->expires;
1056 list_move_tail(&tl->entry, firing);
1062 while (!list_empty(timers)) {
1063 struct cpu_timer_list *tl = list_first_entry(timers,
1064 struct cpu_timer_list,
1066 if (!--maxfire || sum_sched_runtime < tl->expires) {
1067 sched_expires = tl->expires;
1071 list_move_tail(&tl->entry, firing);
1075 * Check for the special case process timers.
1077 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
1079 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
1081 soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1082 if (soft != RLIM_INFINITY) {
1083 unsigned long psecs = cputime_to_secs(ptime);
1084 unsigned long hard =
1085 ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
1087 if (psecs >= hard) {
1089 * At the hard limit, we just die.
1090 * No need to calculate anything else now.
1092 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1095 if (psecs >= soft) {
1097 * At the soft limit, send a SIGXCPU every second.
1099 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1102 sig->rlim[RLIMIT_CPU].rlim_cur = soft;
1105 x = secs_to_cputime(soft);
1106 if (!prof_expires || x < prof_expires) {
1111 sig->cputime_expires.prof_exp = expires_to_cputime(prof_expires);
1112 sig->cputime_expires.virt_exp = expires_to_cputime(virt_expires);
1113 sig->cputime_expires.sched_exp = sched_expires;
1114 if (task_cputime_zero(&sig->cputime_expires))
1115 stop_process_timers(sig);
1119 * This is called from the signal code (via do_schedule_next_timer)
1120 * when the last timer signal was delivered and we have to reload the timer.
1122 void posix_cpu_timer_schedule(struct k_itimer *timer)
1124 struct task_struct *p = timer->it.cpu.task;
1125 unsigned long long now;
1127 if (unlikely(p == NULL))
1129 * The task was cleaned up already, no future firings.
1134 * Fetch the current sample and update the timer's expiry time.
1136 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1137 cpu_clock_sample(timer->it_clock, p, &now);
1138 bump_cpu_timer(timer, now);
1139 if (unlikely(p->exit_state)) {
1140 clear_dead_task(timer, now);
1143 read_lock(&tasklist_lock); /* arm_timer needs it. */
1144 spin_lock(&p->sighand->siglock);
1146 read_lock(&tasklist_lock);
1147 if (unlikely(p->sighand == NULL)) {
1149 * The process has been reaped.
1150 * We can't even collect a sample any more.
1153 timer->it.cpu.task = p = NULL;
1154 timer->it.cpu.expires = 0;
1156 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1158 * We've noticed that the thread is dead, but
1159 * not yet reaped. Take this opportunity to
1160 * drop our task ref.
1162 clear_dead_task(timer, now);
1165 spin_lock(&p->sighand->siglock);
1166 cpu_timer_sample_group(timer->it_clock, p, &now);
1167 bump_cpu_timer(timer, now);
1168 /* Leave the tasklist_lock locked for the call below. */
1172 * Now re-arm for the new expiry time.
1174 BUG_ON(!irqs_disabled());
1176 spin_unlock(&p->sighand->siglock);
1179 read_unlock(&tasklist_lock);
1182 timer->it_overrun_last = timer->it_overrun;
1183 timer->it_overrun = -1;
1184 ++timer->it_requeue_pending;
1188 * task_cputime_expired - Compare two task_cputime entities.
1190 * @sample: The task_cputime structure to be checked for expiration.
1191 * @expires: Expiration times, against which @sample will be checked.
1193 * Checks @sample against @expires to see if any field of @sample has expired.
1194 * Returns true if any field of the former is greater than the corresponding
1195 * field of the latter if the latter field is set. Otherwise returns false.
1197 static inline int task_cputime_expired(const struct task_cputime *sample,
1198 const struct task_cputime *expires)
1200 if (expires->utime && sample->utime >= expires->utime)
1202 if (expires->stime && sample->utime + sample->stime >= expires->stime)
1204 if (expires->sum_exec_runtime != 0 &&
1205 sample->sum_exec_runtime >= expires->sum_exec_runtime)
1211 * fastpath_timer_check - POSIX CPU timers fast path.
1213 * @tsk: The task (thread) being checked.
1215 * Check the task and thread group timers. If both are zero (there are no
1216 * timers set) return false. Otherwise snapshot the task and thread group
1217 * timers and compare them with the corresponding expiration times. Return
1218 * true if a timer has expired, else return false.
1220 static inline int fastpath_timer_check(struct task_struct *tsk)
1222 struct signal_struct *sig;
1223 cputime_t utime, stime;
1225 task_cputime(tsk, &utime, &stime);
1227 if (!task_cputime_zero(&tsk->cputime_expires)) {
1228 struct task_cputime task_sample = {
1231 .sum_exec_runtime = tsk->se.sum_exec_runtime
1234 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1239 if (sig->cputimer.running) {
1240 struct task_cputime group_sample;
1242 raw_spin_lock(&sig->cputimer.lock);
1243 group_sample = sig->cputimer.cputime;
1244 raw_spin_unlock(&sig->cputimer.lock);
1246 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1254 * This is called from the timer interrupt handler. The irq handler has
1255 * already updated our counts. We need to check if any timers fire now.
1256 * Interrupts are disabled.
1258 void run_posix_cpu_timers(struct task_struct *tsk)
1261 struct k_itimer *timer, *next;
1262 unsigned long flags;
1264 BUG_ON(!irqs_disabled());
1267 * The fast path checks that there are no expired thread or thread
1268 * group timers. If that's so, just return.
1270 if (!fastpath_timer_check(tsk))
1273 if (!lock_task_sighand(tsk, &flags))
1276 * Here we take off tsk->signal->cpu_timers[N] and
1277 * tsk->cpu_timers[N] all the timers that are firing, and
1278 * put them on the firing list.
1280 check_thread_timers(tsk, &firing);
1282 * If there are any active process wide timers (POSIX 1.b, itimers,
1283 * RLIMIT_CPU) cputimer must be running.
1285 if (tsk->signal->cputimer.running)
1286 check_process_timers(tsk, &firing);
1289 * We must release these locks before taking any timer's lock.
1290 * There is a potential race with timer deletion here, as the
1291 * siglock now protects our private firing list. We have set
1292 * the firing flag in each timer, so that a deletion attempt
1293 * that gets the timer lock before we do will give it up and
1294 * spin until we've taken care of that timer below.
1296 unlock_task_sighand(tsk, &flags);
1299 * Now that all the timers on our list have the firing flag,
1300 * no one will touch their list entries but us. We'll take
1301 * each timer's lock before clearing its firing flag, so no
1302 * timer call will interfere.
1304 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1307 spin_lock(&timer->it_lock);
1308 list_del_init(&timer->it.cpu.entry);
1309 cpu_firing = timer->it.cpu.firing;
1310 timer->it.cpu.firing = 0;
1312 * The firing flag is -1 if we collided with a reset
1313 * of the timer, which already reported this
1314 * almost-firing as an overrun. So don't generate an event.
1316 if (likely(cpu_firing >= 0))
1317 cpu_timer_fire(timer);
1318 spin_unlock(&timer->it_lock);
1322 * In case some timers were rescheduled after the queue got emptied,
1323 * wake up full dynticks CPUs.
1325 if (tsk->signal->cputimer.running)
1326 posix_cpu_timer_kick_nohz();
1330 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1331 * The tsk->sighand->siglock must be held by the caller.
1333 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1334 cputime_t *newval, cputime_t *oldval)
1336 unsigned long long now;
1338 BUG_ON(clock_idx == CPUCLOCK_SCHED);
1339 cpu_timer_sample_group(clock_idx, tsk, &now);
1343 * We are setting itimer. The *oldval is absolute and we update
1344 * it to be relative, *newval argument is relative and we update
1345 * it to be absolute.
1348 if (*oldval <= now) {
1349 /* Just about to fire. */
1350 *oldval = cputime_one_jiffy;
1362 * Update expiration cache if we are the earliest timer, or eventually
1363 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
1365 switch (clock_idx) {
1367 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
1368 tsk->signal->cputime_expires.prof_exp = *newval;
1371 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
1372 tsk->signal->cputime_expires.virt_exp = *newval;
1376 posix_cpu_timer_kick_nohz();
1379 static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1380 struct timespec *rqtp, struct itimerspec *it)
1382 struct k_itimer timer;
1386 * Set up a temporary timer and then wait for it to go off.
1388 memset(&timer, 0, sizeof timer);
1389 spin_lock_init(&timer.it_lock);
1390 timer.it_clock = which_clock;
1391 timer.it_overrun = -1;
1392 error = posix_cpu_timer_create(&timer);
1393 timer.it_process = current;
1395 static struct itimerspec zero_it;
1397 memset(it, 0, sizeof *it);
1398 it->it_value = *rqtp;
1400 spin_lock_irq(&timer.it_lock);
1401 error = posix_cpu_timer_set(&timer, flags, it, NULL);
1403 spin_unlock_irq(&timer.it_lock);
1407 while (!signal_pending(current)) {
1408 if (timer.it.cpu.expires == 0) {
1410 * Our timer fired and was reset, below
1411 * deletion can not fail.
1413 posix_cpu_timer_del(&timer);
1414 spin_unlock_irq(&timer.it_lock);
1419 * Block until cpu_timer_fire (or a signal) wakes us.
1421 __set_current_state(TASK_INTERRUPTIBLE);
1422 spin_unlock_irq(&timer.it_lock);
1424 spin_lock_irq(&timer.it_lock);
1428 * We were interrupted by a signal.
1430 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
1431 error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
1434 * Timer is now unarmed, deletion can not fail.
1436 posix_cpu_timer_del(&timer);
1438 spin_unlock_irq(&timer.it_lock);
1440 while (error == TIMER_RETRY) {
1442 * We need to handle case when timer was or is in the
1443 * middle of firing. In other cases we already freed
1446 spin_lock_irq(&timer.it_lock);
1447 error = posix_cpu_timer_del(&timer);
1448 spin_unlock_irq(&timer.it_lock);
1451 if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
1453 * It actually did fire already.
1458 error = -ERESTART_RESTARTBLOCK;
1464 static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1466 static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1467 struct timespec *rqtp, struct timespec __user *rmtp)
1469 struct restart_block *restart_block =
1470 ¤t_thread_info()->restart_block;
1471 struct itimerspec it;
1475 * Diagnose required errors first.
1477 if (CPUCLOCK_PERTHREAD(which_clock) &&
1478 (CPUCLOCK_PID(which_clock) == 0 ||
1479 CPUCLOCK_PID(which_clock) == current->pid))
1482 error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
1484 if (error == -ERESTART_RESTARTBLOCK) {
1486 if (flags & TIMER_ABSTIME)
1487 return -ERESTARTNOHAND;
1489 * Report back to the user the time still remaining.
1491 if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1494 restart_block->fn = posix_cpu_nsleep_restart;
1495 restart_block->nanosleep.clockid = which_clock;
1496 restart_block->nanosleep.rmtp = rmtp;
1497 restart_block->nanosleep.expires = timespec_to_ns(rqtp);
1502 static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1504 clockid_t which_clock = restart_block->nanosleep.clockid;
1506 struct itimerspec it;
1509 t = ns_to_timespec(restart_block->nanosleep.expires);
1511 error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
1513 if (error == -ERESTART_RESTARTBLOCK) {
1514 struct timespec __user *rmtp = restart_block->nanosleep.rmtp;
1516 * Report back to the user the time still remaining.
1518 if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1521 restart_block->nanosleep.expires = timespec_to_ns(&t);
1527 #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1528 #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1530 static int process_cpu_clock_getres(const clockid_t which_clock,
1531 struct timespec *tp)
1533 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1535 static int process_cpu_clock_get(const clockid_t which_clock,
1536 struct timespec *tp)
1538 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1540 static int process_cpu_timer_create(struct k_itimer *timer)
1542 timer->it_clock = PROCESS_CLOCK;
1543 return posix_cpu_timer_create(timer);
1545 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1546 struct timespec *rqtp,
1547 struct timespec __user *rmtp)
1549 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
1551 static long process_cpu_nsleep_restart(struct restart_block *restart_block)
1555 static int thread_cpu_clock_getres(const clockid_t which_clock,
1556 struct timespec *tp)
1558 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1560 static int thread_cpu_clock_get(const clockid_t which_clock,
1561 struct timespec *tp)
1563 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1565 static int thread_cpu_timer_create(struct k_itimer *timer)
1567 timer->it_clock = THREAD_CLOCK;
1568 return posix_cpu_timer_create(timer);
1571 struct k_clock clock_posix_cpu = {
1572 .clock_getres = posix_cpu_clock_getres,
1573 .clock_set = posix_cpu_clock_set,
1574 .clock_get = posix_cpu_clock_get,
1575 .timer_create = posix_cpu_timer_create,
1576 .nsleep = posix_cpu_nsleep,
1577 .nsleep_restart = posix_cpu_nsleep_restart,
1578 .timer_set = posix_cpu_timer_set,
1579 .timer_del = posix_cpu_timer_del,
1580 .timer_get = posix_cpu_timer_get,
1583 static __init int init_posix_cpu_timers(void)
1585 struct k_clock process = {
1586 .clock_getres = process_cpu_clock_getres,
1587 .clock_get = process_cpu_clock_get,
1588 .timer_create = process_cpu_timer_create,
1589 .nsleep = process_cpu_nsleep,
1590 .nsleep_restart = process_cpu_nsleep_restart,
1592 struct k_clock thread = {
1593 .clock_getres = thread_cpu_clock_getres,
1594 .clock_get = thread_cpu_clock_get,
1595 .timer_create = thread_cpu_timer_create,
1599 posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1600 posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1602 cputime_to_timespec(cputime_one_jiffy, &ts);
1603 onecputick = ts.tv_nsec;
1604 WARN_ON(ts.tv_sec != 0);
1608 __initcall(init_posix_cpu_timers);