1 // SPDX-License-Identifier: GPL-2.0
3 * Implement CPU time clocks for the POSIX clock interface.
6 #include <linux/sched/signal.h>
7 #include <linux/sched/cputime.h>
8 #include <linux/posix-timers.h>
9 #include <linux/errno.h>
10 #include <linux/math64.h>
11 #include <linux/uaccess.h>
12 #include <linux/kernel_stat.h>
13 #include <trace/events/timer.h>
14 #include <linux/tick.h>
15 #include <linux/workqueue.h>
16 #include <linux/compat.h>
17 #include <linux/sched/deadline.h>
19 #include "posix-timers.h"
21 static void posix_cpu_timer_rearm(struct k_itimer *timer);
23 void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit)
25 posix_cputimers_init(pct);
26 if (cpu_limit != RLIM_INFINITY) {
27 pct->bases[CPUCLOCK_PROF].nextevt = cpu_limit * NSEC_PER_SEC;
28 pct->timers_active = true;
33 * Called after updating RLIMIT_CPU to run cpu timer and update
34 * tsk->signal->posix_cputimers.bases[clock].nextevt expiration cache if
35 * necessary. Needs siglock protection since other code may update the
36 * expiration cache as well.
38 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
40 u64 nsecs = rlim_new * NSEC_PER_SEC;
42 spin_lock_irq(&task->sighand->siglock);
43 set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
44 spin_unlock_irq(&task->sighand->siglock);
48 * Functions for validating access to tasks.
50 static struct pid *pid_for_clock(const clockid_t clock, bool gettime)
52 const bool thread = !!CPUCLOCK_PERTHREAD(clock);
53 const pid_t upid = CPUCLOCK_PID(clock);
56 if (CPUCLOCK_WHICH(clock) >= CPUCLOCK_MAX)
60 * If the encoded PID is 0, then the timer is targeted at current
61 * or the process to which current belongs.
64 return thread ? task_pid(current) : task_tgid(current);
66 pid = find_vpid(upid);
71 struct task_struct *tsk = pid_task(pid, PIDTYPE_PID);
72 return (tsk && same_thread_group(tsk, current)) ? pid : NULL;
76 * For clock_gettime(PROCESS) allow finding the process by
77 * with the pid of the current task. The code needs the tgid
78 * of the process so that pid_task(pid, PIDTYPE_TGID) can be
79 * used to find the process.
81 if (gettime && (pid == task_pid(current)))
82 return task_tgid(current);
85 * For processes require that pid identifies a process.
87 return pid_has_task(pid, PIDTYPE_TGID) ? pid : NULL;
90 static inline int validate_clock_permissions(const clockid_t clock)
95 ret = pid_for_clock(clock, false) ? 0 : -EINVAL;
101 static inline enum pid_type clock_pid_type(const clockid_t clock)
103 return CPUCLOCK_PERTHREAD(clock) ? PIDTYPE_PID : PIDTYPE_TGID;
106 static inline struct task_struct *cpu_timer_task_rcu(struct k_itimer *timer)
108 return pid_task(timer->it.cpu.pid, clock_pid_type(timer->it_clock));
112 * Update expiry time from increment, and increase overrun count,
113 * given the current clock sample.
115 static u64 bump_cpu_timer(struct k_itimer *timer, u64 now)
117 u64 delta, incr, expires = timer->it.cpu.node.expires;
120 if (!timer->it_interval)
126 incr = timer->it_interval;
127 delta = now + incr - expires;
129 /* Don't use (incr*2 < delta), incr*2 might overflow. */
130 for (i = 0; incr < delta - incr; i++)
133 for (; i >= 0; incr >>= 1, i--) {
137 timer->it.cpu.node.expires += incr;
138 timer->it_overrun += 1LL << i;
141 return timer->it.cpu.node.expires;
144 /* Check whether all cache entries contain U64_MAX, i.e. eternal expiry time */
145 static inline bool expiry_cache_is_inactive(const struct posix_cputimers *pct)
147 return !(~pct->bases[CPUCLOCK_PROF].nextevt |
148 ~pct->bases[CPUCLOCK_VIRT].nextevt |
149 ~pct->bases[CPUCLOCK_SCHED].nextevt);
153 posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
155 int error = validate_clock_permissions(which_clock);
159 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
160 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
162 * If sched_clock is using a cycle counter, we
163 * don't have any idea of its true resolution
164 * exported, but it is much more than 1s/HZ.
173 posix_cpu_clock_set(const clockid_t clock, const struct timespec64 *tp)
175 int error = validate_clock_permissions(clock);
178 * You can never reset a CPU clock, but we check for other errors
179 * in the call before failing with EPERM.
181 return error ? : -EPERM;
185 * Sample a per-thread clock for the given task. clkid is validated.
187 static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p)
191 if (clkid == CPUCLOCK_SCHED)
192 return task_sched_runtime(p);
194 task_cputime(p, &utime, &stime);
198 return utime + stime;
207 static inline void store_samples(u64 *samples, u64 stime, u64 utime, u64 rtime)
209 samples[CPUCLOCK_PROF] = stime + utime;
210 samples[CPUCLOCK_VIRT] = utime;
211 samples[CPUCLOCK_SCHED] = rtime;
214 static void task_sample_cputime(struct task_struct *p, u64 *samples)
218 task_cputime(p, &utime, &stime);
219 store_samples(samples, stime, utime, p->se.sum_exec_runtime);
222 static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
225 u64 stime, utime, rtime;
227 utime = atomic64_read(&at->utime);
228 stime = atomic64_read(&at->stime);
229 rtime = atomic64_read(&at->sum_exec_runtime);
230 store_samples(samples, stime, utime, rtime);
234 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
235 * to avoid race conditions with concurrent updates to cputime.
237 static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
241 curr_cputime = atomic64_read(cputime);
242 if (sum_cputime > curr_cputime) {
243 if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
248 static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic,
249 struct task_cputime *sum)
251 __update_gt_cputime(&cputime_atomic->utime, sum->utime);
252 __update_gt_cputime(&cputime_atomic->stime, sum->stime);
253 __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
257 * thread_group_sample_cputime - Sample cputime for a given task
258 * @tsk: Task for which cputime needs to be started
259 * @samples: Storage for time samples
261 * Called from sys_getitimer() to calculate the expiry time of an active
262 * timer. That means group cputime accounting is already active. Called
263 * with task sighand lock held.
265 * Updates @times with an uptodate sample of the thread group cputimes.
267 void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples)
269 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
270 struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
272 WARN_ON_ONCE(!pct->timers_active);
274 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
278 * thread_group_start_cputime - Start cputime and return a sample
279 * @tsk: Task for which cputime needs to be started
280 * @samples: Storage for time samples
282 * The thread group cputime accounting is avoided when there are no posix
283 * CPU timers armed. Before starting a timer it's required to check whether
284 * the time accounting is active. If not, a full update of the atomic
285 * accounting store needs to be done and the accounting enabled.
287 * Updates @times with an uptodate sample of the thread group cputimes.
289 static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples)
291 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
292 struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
294 lockdep_assert_task_sighand_held(tsk);
296 /* Check if cputimer isn't running. This is accessed without locking. */
297 if (!READ_ONCE(pct->timers_active)) {
298 struct task_cputime sum;
301 * The POSIX timer interface allows for absolute time expiry
302 * values through the TIMER_ABSTIME flag, therefore we have
303 * to synchronize the timer to the clock every time we start it.
305 thread_group_cputime(tsk, &sum);
306 update_gt_cputime(&cputimer->cputime_atomic, &sum);
309 * We're setting timers_active without a lock. Ensure this
310 * only gets written to in one operation. We set it after
311 * update_gt_cputime() as a small optimization, but
312 * barriers are not required because update_gt_cputime()
313 * can handle concurrent updates.
315 WRITE_ONCE(pct->timers_active, true);
317 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
320 static void __thread_group_cputime(struct task_struct *tsk, u64 *samples)
322 struct task_cputime ct;
324 thread_group_cputime(tsk, &ct);
325 store_samples(samples, ct.stime, ct.utime, ct.sum_exec_runtime);
329 * Sample a process (thread group) clock for the given task clkid. If the
330 * group's cputime accounting is already enabled, read the atomic
331 * store. Otherwise a full update is required. clkid is already validated.
333 static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p,
336 struct thread_group_cputimer *cputimer = &p->signal->cputimer;
337 struct posix_cputimers *pct = &p->signal->posix_cputimers;
338 u64 samples[CPUCLOCK_MAX];
340 if (!READ_ONCE(pct->timers_active)) {
342 thread_group_start_cputime(p, samples);
344 __thread_group_cputime(p, samples);
346 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
349 return samples[clkid];
352 static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp)
354 const clockid_t clkid = CPUCLOCK_WHICH(clock);
355 struct task_struct *tsk;
359 tsk = pid_task(pid_for_clock(clock, true), clock_pid_type(clock));
365 if (CPUCLOCK_PERTHREAD(clock))
366 t = cpu_clock_sample(clkid, tsk);
368 t = cpu_clock_sample_group(clkid, tsk, false);
371 *tp = ns_to_timespec64(t);
376 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
377 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
378 * new timer already all-zeros initialized.
380 static int posix_cpu_timer_create(struct k_itimer *new_timer)
382 static struct lock_class_key posix_cpu_timers_key;
386 pid = pid_for_clock(new_timer->it_clock, false);
393 * If posix timer expiry is handled in task work context then
394 * timer::it_lock can be taken without disabling interrupts as all
395 * other locking happens in task context. This requires a separate
396 * lock class key otherwise regular posix timer expiry would record
397 * the lock class being taken in interrupt context and generate a
398 * false positive warning.
400 if (IS_ENABLED(CONFIG_POSIX_CPU_TIMERS_TASK_WORK))
401 lockdep_set_class(&new_timer->it_lock, &posix_cpu_timers_key);
403 new_timer->kclock = &clock_posix_cpu;
404 timerqueue_init(&new_timer->it.cpu.node);
405 new_timer->it.cpu.pid = get_pid(pid);
411 * Dequeue the timer and reset the base if it was its earliest expiration.
412 * It makes sure the next tick recalculates the base next expiration so we
413 * don't keep the costly process wide cputime counter around for a random
414 * amount of time, along with the tick dependency.
416 * If another timer gets queued between this and the next tick, its
417 * expiration will update the base next event if necessary on the next
420 static void disarm_timer(struct k_itimer *timer, struct task_struct *p)
422 struct cpu_timer *ctmr = &timer->it.cpu;
423 struct posix_cputimer_base *base;
426 if (!cpu_timer_dequeue(ctmr))
429 clkidx = CPUCLOCK_WHICH(timer->it_clock);
431 if (CPUCLOCK_PERTHREAD(timer->it_clock))
432 base = p->posix_cputimers.bases + clkidx;
434 base = p->signal->posix_cputimers.bases + clkidx;
436 if (cpu_timer_getexpires(ctmr) == base->nextevt)
442 * Clean up a CPU-clock timer that is about to be destroyed.
443 * This is called from timer deletion with the timer already locked.
444 * If we return TIMER_RETRY, it's necessary to release the timer's lock
445 * and try again. (This happens when the timer is in the middle of firing.)
447 static int posix_cpu_timer_del(struct k_itimer *timer)
449 struct cpu_timer *ctmr = &timer->it.cpu;
450 struct sighand_struct *sighand;
451 struct task_struct *p;
456 p = cpu_timer_task_rcu(timer);
461 * Protect against sighand release/switch in exit/exec and process/
462 * thread timer list entry concurrent read/writes.
464 sighand = lock_task_sighand(p, &flags);
465 if (unlikely(sighand == NULL)) {
467 * This raced with the reaping of the task. The exit cleanup
468 * should have removed this timer from the timer queue.
470 WARN_ON_ONCE(ctmr->head || timerqueue_node_queued(&ctmr->node));
472 if (timer->it.cpu.firing)
475 disarm_timer(timer, p);
477 unlock_task_sighand(p, &flags);
488 static void cleanup_timerqueue(struct timerqueue_head *head)
490 struct timerqueue_node *node;
491 struct cpu_timer *ctmr;
493 while ((node = timerqueue_getnext(head))) {
494 timerqueue_del(head, node);
495 ctmr = container_of(node, struct cpu_timer, node);
501 * Clean out CPU timers which are still armed when a thread exits. The
502 * timers are only removed from the list. No other updates are done. The
503 * corresponding posix timers are still accessible, but cannot be rearmed.
505 * This must be called with the siglock held.
507 static void cleanup_timers(struct posix_cputimers *pct)
509 cleanup_timerqueue(&pct->bases[CPUCLOCK_PROF].tqhead);
510 cleanup_timerqueue(&pct->bases[CPUCLOCK_VIRT].tqhead);
511 cleanup_timerqueue(&pct->bases[CPUCLOCK_SCHED].tqhead);
515 * These are both called with the siglock held, when the current thread
516 * is being reaped. When the final (leader) thread in the group is reaped,
517 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
519 void posix_cpu_timers_exit(struct task_struct *tsk)
521 cleanup_timers(&tsk->posix_cputimers);
523 void posix_cpu_timers_exit_group(struct task_struct *tsk)
525 cleanup_timers(&tsk->signal->posix_cputimers);
529 * Insert the timer on the appropriate list before any timers that
530 * expire later. This must be called with the sighand lock held.
532 static void arm_timer(struct k_itimer *timer, struct task_struct *p)
534 int clkidx = CPUCLOCK_WHICH(timer->it_clock);
535 struct cpu_timer *ctmr = &timer->it.cpu;
536 u64 newexp = cpu_timer_getexpires(ctmr);
537 struct posix_cputimer_base *base;
539 if (CPUCLOCK_PERTHREAD(timer->it_clock))
540 base = p->posix_cputimers.bases + clkidx;
542 base = p->signal->posix_cputimers.bases + clkidx;
544 if (!cpu_timer_enqueue(&base->tqhead, ctmr))
548 * We are the new earliest-expiring POSIX 1.b timer, hence
549 * need to update expiration cache. Take into account that
550 * for process timers we share expiration cache with itimers
551 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
553 if (newexp < base->nextevt)
554 base->nextevt = newexp;
556 if (CPUCLOCK_PERTHREAD(timer->it_clock))
557 tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
559 tick_dep_set_signal(p, TICK_DEP_BIT_POSIX_TIMER);
563 * The timer is locked, fire it and arrange for its reload.
565 static void cpu_timer_fire(struct k_itimer *timer)
567 struct cpu_timer *ctmr = &timer->it.cpu;
569 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
571 * User don't want any signal.
573 cpu_timer_setexpires(ctmr, 0);
574 } else if (unlikely(timer->sigq == NULL)) {
576 * This a special case for clock_nanosleep,
577 * not a normal timer from sys_timer_create.
579 wake_up_process(timer->it_process);
580 cpu_timer_setexpires(ctmr, 0);
581 } else if (!timer->it_interval) {
583 * One-shot timer. Clear it as soon as it's fired.
585 posix_timer_event(timer, 0);
586 cpu_timer_setexpires(ctmr, 0);
587 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
589 * The signal did not get queued because the signal
590 * was ignored, so we won't get any callback to
591 * reload the timer. But we need to keep it
592 * ticking in case the signal is deliverable next time.
594 posix_cpu_timer_rearm(timer);
595 ++timer->it_requeue_pending;
600 * Guts of sys_timer_settime for CPU timers.
601 * This is called with the timer locked and interrupts disabled.
602 * If we return TIMER_RETRY, it's necessary to release the timer's lock
603 * and try again. (This happens when the timer is in the middle of firing.)
605 static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
606 struct itimerspec64 *new, struct itimerspec64 *old)
608 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
609 u64 old_expires, new_expires, old_incr, val;
610 struct cpu_timer *ctmr = &timer->it.cpu;
611 struct sighand_struct *sighand;
612 struct task_struct *p;
617 p = cpu_timer_task_rcu(timer);
620 * If p has just been reaped, we can no
621 * longer get any information about it at all.
628 * Use the to_ktime conversion because that clamps the maximum
629 * value to KTIME_MAX and avoid multiplication overflows.
631 new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value));
634 * Protect against sighand release/switch in exit/exec and p->cpu_timers
635 * and p->signal->cpu_timers read/write in arm_timer()
637 sighand = lock_task_sighand(p, &flags);
639 * If p has just been reaped, we can no
640 * longer get any information about it at all.
642 if (unlikely(sighand == NULL)) {
648 * Disarm any old timer after extracting its expiry time.
650 old_incr = timer->it_interval;
651 old_expires = cpu_timer_getexpires(ctmr);
653 if (unlikely(timer->it.cpu.firing)) {
654 timer->it.cpu.firing = -1;
657 cpu_timer_dequeue(ctmr);
661 * We need to sample the current value to convert the new
662 * value from to relative and absolute, and to convert the
663 * old value from absolute to relative. To set a process
664 * timer, we need a sample to balance the thread expiry
665 * times (in arm_timer). With an absolute time, we must
666 * check if it's already passed. In short, we need a sample.
668 if (CPUCLOCK_PERTHREAD(timer->it_clock))
669 val = cpu_clock_sample(clkid, p);
671 val = cpu_clock_sample_group(clkid, p, true);
674 if (old_expires == 0) {
675 old->it_value.tv_sec = 0;
676 old->it_value.tv_nsec = 0;
679 * Update the timer in case it has overrun already.
680 * If it has, we'll report it as having overrun and
681 * with the next reloaded timer already ticking,
682 * though we are swallowing that pending
683 * notification here to install the new setting.
685 u64 exp = bump_cpu_timer(timer, val);
688 old_expires = exp - val;
689 old->it_value = ns_to_timespec64(old_expires);
691 old->it_value.tv_nsec = 1;
692 old->it_value.tv_sec = 0;
699 * We are colliding with the timer actually firing.
700 * Punt after filling in the timer's old value, and
701 * disable this firing since we are already reporting
702 * it as an overrun (thanks to bump_cpu_timer above).
704 unlock_task_sighand(p, &flags);
708 if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
713 * Install the new expiry time (or zero).
714 * For a timer with no notification action, we don't actually
715 * arm the timer (we'll just fake it for timer_gettime).
717 cpu_timer_setexpires(ctmr, new_expires);
718 if (new_expires != 0 && val < new_expires) {
722 unlock_task_sighand(p, &flags);
724 * Install the new reload setting, and
725 * set up the signal and overrun bookkeeping.
727 timer->it_interval = timespec64_to_ktime(new->it_interval);
730 * This acts as a modification timestamp for the timer,
731 * so any automatic reload attempt will punt on seeing
732 * that we have reset the timer manually.
734 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
736 timer->it_overrun_last = 0;
737 timer->it_overrun = -1;
739 if (new_expires != 0 && !(val < new_expires)) {
741 * The designated time already passed, so we notify
742 * immediately, even if the thread never runs to
743 * accumulate more time on this clock.
745 cpu_timer_fire(timer);
750 old->it_interval = ns_to_timespec64(old_incr);
755 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
757 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
758 struct cpu_timer *ctmr = &timer->it.cpu;
759 u64 now, expires = cpu_timer_getexpires(ctmr);
760 struct task_struct *p;
763 p = cpu_timer_task_rcu(timer);
768 * Easy part: convert the reload time.
770 itp->it_interval = ktime_to_timespec64(timer->it_interval);
776 * Sample the clock to take the difference with the expiry time.
778 if (CPUCLOCK_PERTHREAD(timer->it_clock))
779 now = cpu_clock_sample(clkid, p);
781 now = cpu_clock_sample_group(clkid, p, false);
784 itp->it_value = ns_to_timespec64(expires - now);
787 * The timer should have expired already, but the firing
788 * hasn't taken place yet. Say it's just about to expire.
790 itp->it_value.tv_nsec = 1;
791 itp->it_value.tv_sec = 0;
797 #define MAX_COLLECTED 20
799 static u64 collect_timerqueue(struct timerqueue_head *head,
800 struct list_head *firing, u64 now)
802 struct timerqueue_node *next;
805 while ((next = timerqueue_getnext(head))) {
806 struct cpu_timer *ctmr;
809 ctmr = container_of(next, struct cpu_timer, node);
810 expires = cpu_timer_getexpires(ctmr);
811 /* Limit the number of timers to expire at once */
812 if (++i == MAX_COLLECTED || now < expires)
816 cpu_timer_dequeue(ctmr);
817 list_add_tail(&ctmr->elist, firing);
823 static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
824 struct list_head *firing)
826 struct posix_cputimer_base *base = pct->bases;
829 for (i = 0; i < CPUCLOCK_MAX; i++, base++) {
830 base->nextevt = collect_timerqueue(&base->tqhead, firing,
835 static inline void check_dl_overrun(struct task_struct *tsk)
837 if (tsk->dl.dl_overrun) {
838 tsk->dl.dl_overrun = 0;
839 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
843 static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
848 if (print_fatal_signals) {
849 pr_info("%s Watchdog Timeout (%s): %s[%d]\n",
850 rt ? "RT" : "CPU", hard ? "hard" : "soft",
851 current->comm, task_pid_nr(current));
853 __group_send_sig_info(signo, SEND_SIG_PRIV, current);
858 * Check for any per-thread CPU timers that have fired and move them off
859 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
860 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
862 static void check_thread_timers(struct task_struct *tsk,
863 struct list_head *firing)
865 struct posix_cputimers *pct = &tsk->posix_cputimers;
866 u64 samples[CPUCLOCK_MAX];
870 check_dl_overrun(tsk);
872 if (expiry_cache_is_inactive(pct))
875 task_sample_cputime(tsk, samples);
876 collect_posix_cputimers(pct, samples, firing);
879 * Check for the special case thread timers.
881 soft = task_rlimit(tsk, RLIMIT_RTTIME);
882 if (soft != RLIM_INFINITY) {
883 /* Task RT timeout is accounted in jiffies. RTTIME is usec */
884 unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
885 unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
887 /* At the hard limit, send SIGKILL. No further action. */
888 if (hard != RLIM_INFINITY &&
889 check_rlimit(rttime, hard, SIGKILL, true, true))
892 /* At the soft limit, send a SIGXCPU every second */
893 if (check_rlimit(rttime, soft, SIGXCPU, true, false)) {
894 soft += USEC_PER_SEC;
895 tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = soft;
899 if (expiry_cache_is_inactive(pct))
900 tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
903 static inline void stop_process_timers(struct signal_struct *sig)
905 struct posix_cputimers *pct = &sig->posix_cputimers;
907 /* Turn off the active flag. This is done without locking. */
908 WRITE_ONCE(pct->timers_active, false);
909 tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
912 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
913 u64 *expires, u64 cur_time, int signo)
918 if (cur_time >= it->expires) {
920 it->expires += it->incr;
924 trace_itimer_expire(signo == SIGPROF ?
925 ITIMER_PROF : ITIMER_VIRTUAL,
926 task_tgid(tsk), cur_time);
927 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
930 if (it->expires && it->expires < *expires)
931 *expires = it->expires;
935 * Check for any per-thread CPU timers that have fired and move them
936 * off the tsk->*_timers list onto the firing list. Per-thread timers
937 * have already been taken off.
939 static void check_process_timers(struct task_struct *tsk,
940 struct list_head *firing)
942 struct signal_struct *const sig = tsk->signal;
943 struct posix_cputimers *pct = &sig->posix_cputimers;
944 u64 samples[CPUCLOCK_MAX];
948 * If there are no active process wide timers (POSIX 1.b, itimers,
949 * RLIMIT_CPU) nothing to check. Also skip the process wide timer
950 * processing when there is already another task handling them.
952 if (!READ_ONCE(pct->timers_active) || pct->expiry_active)
956 * Signify that a thread is checking for process timers.
957 * Write access to this field is protected by the sighand lock.
959 pct->expiry_active = true;
962 * Collect the current process totals. Group accounting is active
963 * so the sample can be taken directly.
965 proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic, samples);
966 collect_posix_cputimers(pct, samples, firing);
969 * Check for the special case process timers.
971 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF],
972 &pct->bases[CPUCLOCK_PROF].nextevt,
973 samples[CPUCLOCK_PROF], SIGPROF);
974 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT],
975 &pct->bases[CPUCLOCK_VIRT].nextevt,
976 samples[CPUCLOCK_VIRT], SIGVTALRM);
978 soft = task_rlimit(tsk, RLIMIT_CPU);
979 if (soft != RLIM_INFINITY) {
980 /* RLIMIT_CPU is in seconds. Samples are nanoseconds */
981 unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU);
982 u64 ptime = samples[CPUCLOCK_PROF];
983 u64 softns = (u64)soft * NSEC_PER_SEC;
984 u64 hardns = (u64)hard * NSEC_PER_SEC;
986 /* At the hard limit, send SIGKILL. No further action. */
987 if (hard != RLIM_INFINITY &&
988 check_rlimit(ptime, hardns, SIGKILL, false, true))
991 /* At the soft limit, send a SIGXCPU every second */
992 if (check_rlimit(ptime, softns, SIGXCPU, false, false)) {
993 sig->rlim[RLIMIT_CPU].rlim_cur = soft + 1;
994 softns += NSEC_PER_SEC;
997 /* Update the expiry cache */
998 if (softns < pct->bases[CPUCLOCK_PROF].nextevt)
999 pct->bases[CPUCLOCK_PROF].nextevt = softns;
1002 if (expiry_cache_is_inactive(pct))
1003 stop_process_timers(sig);
1005 pct->expiry_active = false;
1009 * This is called from the signal code (via posixtimer_rearm)
1010 * when the last timer signal was delivered and we have to reload the timer.
1012 static void posix_cpu_timer_rearm(struct k_itimer *timer)
1014 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
1015 struct task_struct *p;
1016 struct sighand_struct *sighand;
1017 unsigned long flags;
1021 p = cpu_timer_task_rcu(timer);
1025 /* Protect timer list r/w in arm_timer() */
1026 sighand = lock_task_sighand(p, &flags);
1027 if (unlikely(sighand == NULL))
1031 * Fetch the current sample and update the timer's expiry time.
1033 if (CPUCLOCK_PERTHREAD(timer->it_clock))
1034 now = cpu_clock_sample(clkid, p);
1036 now = cpu_clock_sample_group(clkid, p, true);
1038 bump_cpu_timer(timer, now);
1041 * Now re-arm for the new expiry time.
1043 arm_timer(timer, p);
1044 unlock_task_sighand(p, &flags);
1050 * task_cputimers_expired - Check whether posix CPU timers are expired
1052 * @samples: Array of current samples for the CPUCLOCK clocks
1053 * @pct: Pointer to a posix_cputimers container
1055 * Returns true if any member of @samples is greater than the corresponding
1056 * member of @pct->bases[CLK].nextevt. False otherwise
1059 task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct)
1063 for (i = 0; i < CPUCLOCK_MAX; i++) {
1064 if (samples[i] >= pct->bases[i].nextevt)
1071 * fastpath_timer_check - POSIX CPU timers fast path.
1073 * @tsk: The task (thread) being checked.
1075 * Check the task and thread group timers. If both are zero (there are no
1076 * timers set) return false. Otherwise snapshot the task and thread group
1077 * timers and compare them with the corresponding expiration times. Return
1078 * true if a timer has expired, else return false.
1080 static inline bool fastpath_timer_check(struct task_struct *tsk)
1082 struct posix_cputimers *pct = &tsk->posix_cputimers;
1083 struct signal_struct *sig;
1085 if (!expiry_cache_is_inactive(pct)) {
1086 u64 samples[CPUCLOCK_MAX];
1088 task_sample_cputime(tsk, samples);
1089 if (task_cputimers_expired(samples, pct))
1094 pct = &sig->posix_cputimers;
1096 * Check if thread group timers expired when timers are active and
1097 * no other thread in the group is already handling expiry for
1098 * thread group cputimers. These fields are read without the
1099 * sighand lock. However, this is fine because this is meant to be
1100 * a fastpath heuristic to determine whether we should try to
1101 * acquire the sighand lock to handle timer expiry.
1103 * In the worst case scenario, if concurrently timers_active is set
1104 * or expiry_active is cleared, but the current thread doesn't see
1105 * the change yet, the timer checks are delayed until the next
1106 * thread in the group gets a scheduler interrupt to handle the
1107 * timer. This isn't an issue in practice because these types of
1108 * delays with signals actually getting sent are expected.
1110 if (READ_ONCE(pct->timers_active) && !READ_ONCE(pct->expiry_active)) {
1111 u64 samples[CPUCLOCK_MAX];
1113 proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic,
1116 if (task_cputimers_expired(samples, pct))
1120 if (dl_task(tsk) && tsk->dl.dl_overrun)
1126 static void handle_posix_cpu_timers(struct task_struct *tsk);
1128 #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
1129 static void posix_cpu_timers_work(struct callback_head *work)
1131 handle_posix_cpu_timers(current);
1135 * Initialize posix CPU timers task work in init task. Out of line to
1136 * keep the callback static and to avoid header recursion hell.
1138 void __init posix_cputimers_init_work(void)
1140 init_task_work(¤t->posix_cputimers_work.work,
1141 posix_cpu_timers_work);
1145 * Note: All operations on tsk->posix_cputimer_work.scheduled happen either
1146 * in hard interrupt context or in task context with interrupts
1147 * disabled. Aside of that the writer/reader interaction is always in the
1148 * context of the current task, which means they are strict per CPU.
1150 static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
1152 return tsk->posix_cputimers_work.scheduled;
1155 static inline void __run_posix_cpu_timers(struct task_struct *tsk)
1157 if (WARN_ON_ONCE(tsk->posix_cputimers_work.scheduled))
1160 /* Schedule task work to actually expire the timers */
1161 tsk->posix_cputimers_work.scheduled = true;
1162 task_work_add(tsk, &tsk->posix_cputimers_work.work, TWA_RESUME);
1165 static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
1166 unsigned long start)
1171 * On !RT kernels interrupts are disabled while collecting expired
1172 * timers, so no tick can happen and the fast path check can be
1173 * reenabled without further checks.
1175 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
1176 tsk->posix_cputimers_work.scheduled = false;
1181 * On RT enabled kernels ticks can happen while the expired timers
1182 * are collected under sighand lock. But any tick which observes
1183 * the CPUTIMERS_WORK_SCHEDULED bit set, does not run the fastpath
1184 * checks. So reenabling the tick work has do be done carefully:
1186 * Disable interrupts and run the fast path check if jiffies have
1187 * advanced since the collecting of expired timers started. If
1188 * jiffies have not advanced or the fast path check did not find
1189 * newly expired timers, reenable the fast path check in the timer
1190 * interrupt. If there are newly expired timers, return false and
1191 * let the collection loop repeat.
1193 local_irq_disable();
1194 if (start != jiffies && fastpath_timer_check(tsk))
1197 tsk->posix_cputimers_work.scheduled = false;
1202 #else /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
1203 static inline void __run_posix_cpu_timers(struct task_struct *tsk)
1205 lockdep_posixtimer_enter();
1206 handle_posix_cpu_timers(tsk);
1207 lockdep_posixtimer_exit();
1210 static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
1215 static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
1216 unsigned long start)
1220 #endif /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
1222 static void handle_posix_cpu_timers(struct task_struct *tsk)
1224 struct k_itimer *timer, *next;
1225 unsigned long flags, start;
1228 if (!lock_task_sighand(tsk, &flags))
1233 * On RT locking sighand lock does not disable interrupts,
1234 * so this needs to be careful vs. ticks. Store the current
1237 start = READ_ONCE(jiffies);
1241 * Here we take off tsk->signal->cpu_timers[N] and
1242 * tsk->cpu_timers[N] all the timers that are firing, and
1243 * put them on the firing list.
1245 check_thread_timers(tsk, &firing);
1247 check_process_timers(tsk, &firing);
1250 * The above timer checks have updated the expiry cache and
1251 * because nothing can have queued or modified timers after
1252 * sighand lock was taken above it is guaranteed to be
1253 * consistent. So the next timer interrupt fastpath check
1254 * will find valid data.
1256 * If timer expiry runs in the timer interrupt context then
1257 * the loop is not relevant as timers will be directly
1258 * expired in interrupt context. The stub function below
1259 * returns always true which allows the compiler to
1260 * optimize the loop out.
1262 * If timer expiry is deferred to task work context then
1263 * the following rules apply:
1265 * - On !RT kernels no tick can have happened on this CPU
1266 * after sighand lock was acquired because interrupts are
1267 * disabled. So reenabling task work before dropping
1268 * sighand lock and reenabling interrupts is race free.
1270 * - On RT kernels ticks might have happened but the tick
1271 * work ignored posix CPU timer handling because the
1272 * CPUTIMERS_WORK_SCHEDULED bit is set. Reenabling work
1273 * must be done very carefully including a check whether
1274 * ticks have happened since the start of the timer
1275 * expiry checks. posix_cpu_timers_enable_work() takes
1276 * care of that and eventually lets the expiry checks
1279 } while (!posix_cpu_timers_enable_work(tsk, start));
1282 * We must release sighand lock before taking any timer's lock.
1283 * There is a potential race with timer deletion here, as the
1284 * siglock now protects our private firing list. We have set
1285 * the firing flag in each timer, so that a deletion attempt
1286 * that gets the timer lock before we do will give it up and
1287 * spin until we've taken care of that timer below.
1289 unlock_task_sighand(tsk, &flags);
1292 * Now that all the timers on our list have the firing flag,
1293 * no one will touch their list entries but us. We'll take
1294 * each timer's lock before clearing its firing flag, so no
1295 * timer call will interfere.
1297 list_for_each_entry_safe(timer, next, &firing, it.cpu.elist) {
1301 * spin_lock() is sufficient here even independent of the
1302 * expiry context. If expiry happens in hard interrupt
1303 * context it's obvious. For task work context it's safe
1304 * because all other operations on timer::it_lock happen in
1305 * task context (syscall or exit).
1307 spin_lock(&timer->it_lock);
1308 list_del_init(&timer->it.cpu.elist);
1309 cpu_firing = timer->it.cpu.firing;
1310 timer->it.cpu.firing = 0;
1312 * The firing flag is -1 if we collided with a reset
1313 * of the timer, which already reported this
1314 * almost-firing as an overrun. So don't generate an event.
1316 if (likely(cpu_firing >= 0))
1317 cpu_timer_fire(timer);
1318 spin_unlock(&timer->it_lock);
1323 * This is called from the timer interrupt handler. The irq handler has
1324 * already updated our counts. We need to check if any timers fire now.
1325 * Interrupts are disabled.
1327 void run_posix_cpu_timers(void)
1329 struct task_struct *tsk = current;
1331 lockdep_assert_irqs_disabled();
1334 * If the actual expiry is deferred to task work context and the
1335 * work is already scheduled there is no point to do anything here.
1337 if (posix_cpu_timers_work_scheduled(tsk))
1341 * The fast path checks that there are no expired thread or thread
1342 * group timers. If that's so, just return.
1344 if (!fastpath_timer_check(tsk))
1347 __run_posix_cpu_timers(tsk);
1351 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1352 * The tsk->sighand->siglock must be held by the caller.
1354 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
1355 u64 *newval, u64 *oldval)
1359 if (WARN_ON_ONCE(clkid >= CPUCLOCK_SCHED))
1362 nextevt = &tsk->signal->posix_cputimers.bases[clkid].nextevt;
1363 now = cpu_clock_sample_group(clkid, tsk, true);
1367 * We are setting itimer. The *oldval is absolute and we update
1368 * it to be relative, *newval argument is relative and we update
1369 * it to be absolute.
1372 if (*oldval <= now) {
1373 /* Just about to fire. */
1374 *oldval = TICK_NSEC;
1384 * Update expiration cache if this is the earliest timer. CPUCLOCK_PROF
1385 * expiry cache is also used by RLIMIT_CPU!.
1387 if (*newval < *nextevt)
1390 tick_dep_set_signal(tsk, TICK_DEP_BIT_POSIX_TIMER);
1393 static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1394 const struct timespec64 *rqtp)
1396 struct itimerspec64 it;
1397 struct k_itimer timer;
1402 * Set up a temporary timer and then wait for it to go off.
1404 memset(&timer, 0, sizeof timer);
1405 spin_lock_init(&timer.it_lock);
1406 timer.it_clock = which_clock;
1407 timer.it_overrun = -1;
1408 error = posix_cpu_timer_create(&timer);
1409 timer.it_process = current;
1412 static struct itimerspec64 zero_it;
1413 struct restart_block *restart;
1415 memset(&it, 0, sizeof(it));
1416 it.it_value = *rqtp;
1418 spin_lock_irq(&timer.it_lock);
1419 error = posix_cpu_timer_set(&timer, flags, &it, NULL);
1421 spin_unlock_irq(&timer.it_lock);
1425 while (!signal_pending(current)) {
1426 if (!cpu_timer_getexpires(&timer.it.cpu)) {
1428 * Our timer fired and was reset, below
1429 * deletion can not fail.
1431 posix_cpu_timer_del(&timer);
1432 spin_unlock_irq(&timer.it_lock);
1437 * Block until cpu_timer_fire (or a signal) wakes us.
1439 __set_current_state(TASK_INTERRUPTIBLE);
1440 spin_unlock_irq(&timer.it_lock);
1442 spin_lock_irq(&timer.it_lock);
1446 * We were interrupted by a signal.
1448 expires = cpu_timer_getexpires(&timer.it.cpu);
1449 error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
1452 * Timer is now unarmed, deletion can not fail.
1454 posix_cpu_timer_del(&timer);
1456 spin_unlock_irq(&timer.it_lock);
1458 while (error == TIMER_RETRY) {
1460 * We need to handle case when timer was or is in the
1461 * middle of firing. In other cases we already freed
1464 spin_lock_irq(&timer.it_lock);
1465 error = posix_cpu_timer_del(&timer);
1466 spin_unlock_irq(&timer.it_lock);
1469 if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
1471 * It actually did fire already.
1476 error = -ERESTART_RESTARTBLOCK;
1478 * Report back to the user the time still remaining.
1480 restart = ¤t->restart_block;
1481 restart->nanosleep.expires = expires;
1482 if (restart->nanosleep.type != TT_NONE)
1483 error = nanosleep_copyout(restart, &it.it_value);
1489 static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1491 static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1492 const struct timespec64 *rqtp)
1494 struct restart_block *restart_block = ¤t->restart_block;
1498 * Diagnose required errors first.
1500 if (CPUCLOCK_PERTHREAD(which_clock) &&
1501 (CPUCLOCK_PID(which_clock) == 0 ||
1502 CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
1505 error = do_cpu_nanosleep(which_clock, flags, rqtp);
1507 if (error == -ERESTART_RESTARTBLOCK) {
1509 if (flags & TIMER_ABSTIME)
1510 return -ERESTARTNOHAND;
1512 restart_block->nanosleep.clockid = which_clock;
1513 set_restart_fn(restart_block, posix_cpu_nsleep_restart);
1518 static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1520 clockid_t which_clock = restart_block->nanosleep.clockid;
1521 struct timespec64 t;
1523 t = ns_to_timespec64(restart_block->nanosleep.expires);
1525 return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
1528 #define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED)
1529 #define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED)
1531 static int process_cpu_clock_getres(const clockid_t which_clock,
1532 struct timespec64 *tp)
1534 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1536 static int process_cpu_clock_get(const clockid_t which_clock,
1537 struct timespec64 *tp)
1539 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1541 static int process_cpu_timer_create(struct k_itimer *timer)
1543 timer->it_clock = PROCESS_CLOCK;
1544 return posix_cpu_timer_create(timer);
1546 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1547 const struct timespec64 *rqtp)
1549 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
1551 static int thread_cpu_clock_getres(const clockid_t which_clock,
1552 struct timespec64 *tp)
1554 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1556 static int thread_cpu_clock_get(const clockid_t which_clock,
1557 struct timespec64 *tp)
1559 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1561 static int thread_cpu_timer_create(struct k_itimer *timer)
1563 timer->it_clock = THREAD_CLOCK;
1564 return posix_cpu_timer_create(timer);
1567 const struct k_clock clock_posix_cpu = {
1568 .clock_getres = posix_cpu_clock_getres,
1569 .clock_set = posix_cpu_clock_set,
1570 .clock_get_timespec = posix_cpu_clock_get,
1571 .timer_create = posix_cpu_timer_create,
1572 .nsleep = posix_cpu_nsleep,
1573 .timer_set = posix_cpu_timer_set,
1574 .timer_del = posix_cpu_timer_del,
1575 .timer_get = posix_cpu_timer_get,
1576 .timer_rearm = posix_cpu_timer_rearm,
1579 const struct k_clock clock_process = {
1580 .clock_getres = process_cpu_clock_getres,
1581 .clock_get_timespec = process_cpu_clock_get,
1582 .timer_create = process_cpu_timer_create,
1583 .nsleep = process_cpu_nsleep,
1586 const struct k_clock clock_thread = {
1587 .clock_getres = thread_cpu_clock_getres,
1588 .clock_get_timespec = thread_cpu_clock_get,
1589 .timer_create = thread_cpu_timer_create,