4 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
7 * Thanks to Thomas Gleixner for code reviews and useful comments.
11 #include <linux/file.h>
12 #include <linux/poll.h>
13 #include <linux/init.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/list.h>
19 #include <linux/spinlock.h>
20 #include <linux/time.h>
21 #include <linux/hrtimer.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/timerfd.h>
24 #include <linux/syscalls.h>
25 #include <linux/rcupdate.h>
31 wait_queue_head_t wqh;
36 struct list_head clist;
40 static LIST_HEAD(cancel_list);
41 static DEFINE_SPINLOCK(cancel_lock);
44 * This gets called when the timer event triggers. We set the "expired"
45 * flag, but we do not re-arm the timer (in case it's necessary,
46 * tintv.tv64 != 0) until the timer is accessed.
48 static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
50 struct timerfd_ctx *ctx = container_of(htmr, struct timerfd_ctx, tmr);
53 spin_lock_irqsave(&ctx->wqh.lock, flags);
56 wake_up_locked(&ctx->wqh);
57 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
59 return HRTIMER_NORESTART;
63 * Called when the clock was set to cancel the timers in the cancel
64 * list. This will wake up processes waiting on these timers. The
65 * wake-up requires ctx->ticks to be non zero, therefore we increment
66 * it before calling wake_up_locked().
68 void timerfd_clock_was_set(void)
70 ktime_t moffs = ktime_get_monotonic_offset();
71 struct timerfd_ctx *ctx;
75 list_for_each_entry_rcu(ctx, &cancel_list, clist) {
76 if (!ctx->might_cancel)
78 spin_lock_irqsave(&ctx->wqh.lock, flags);
79 if (ctx->moffs.tv64 != moffs.tv64) {
80 ctx->moffs.tv64 = KTIME_MAX;
82 wake_up_locked(&ctx->wqh);
84 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
89 static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
91 if (ctx->might_cancel) {
92 ctx->might_cancel = false;
93 spin_lock(&cancel_lock);
94 list_del_rcu(&ctx->clist);
95 spin_unlock(&cancel_lock);
99 static bool timerfd_canceled(struct timerfd_ctx *ctx)
101 if (!ctx->might_cancel || ctx->moffs.tv64 != KTIME_MAX)
103 ctx->moffs = ktime_get_monotonic_offset();
107 static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
109 if (ctx->clockid == CLOCK_REALTIME && (flags & TFD_TIMER_ABSTIME) &&
110 (flags & TFD_TIMER_CANCEL_ON_SET)) {
111 if (!ctx->might_cancel) {
112 ctx->might_cancel = true;
113 spin_lock(&cancel_lock);
114 list_add_rcu(&ctx->clist, &cancel_list);
115 spin_unlock(&cancel_lock);
117 } else if (ctx->might_cancel) {
118 timerfd_remove_cancel(ctx);
122 static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
126 remaining = hrtimer_expires_remaining(&ctx->tmr);
127 return remaining.tv64 < 0 ? ktime_set(0, 0): remaining;
130 static int timerfd_setup(struct timerfd_ctx *ctx, int flags,
131 const struct itimerspec *ktmr)
133 enum hrtimer_mode htmode;
135 int clockid = ctx->clockid;
137 htmode = (flags & TFD_TIMER_ABSTIME) ?
138 HRTIMER_MODE_ABS: HRTIMER_MODE_REL;
140 texp = timespec_to_ktime(ktmr->it_value);
143 ctx->tintv = timespec_to_ktime(ktmr->it_interval);
144 hrtimer_init(&ctx->tmr, clockid, htmode);
145 hrtimer_set_expires(&ctx->tmr, texp);
146 ctx->tmr.function = timerfd_tmrproc;
147 if (texp.tv64 != 0) {
148 hrtimer_start(&ctx->tmr, texp, htmode);
149 if (timerfd_canceled(ctx))
155 static int timerfd_release(struct inode *inode, struct file *file)
157 struct timerfd_ctx *ctx = file->private_data;
159 timerfd_remove_cancel(ctx);
160 hrtimer_cancel(&ctx->tmr);
165 static unsigned int timerfd_poll(struct file *file, poll_table *wait)
167 struct timerfd_ctx *ctx = file->private_data;
168 unsigned int events = 0;
171 poll_wait(file, &ctx->wqh, wait);
173 spin_lock_irqsave(&ctx->wqh.lock, flags);
176 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
181 static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
184 struct timerfd_ctx *ctx = file->private_data;
188 if (count < sizeof(ticks))
190 spin_lock_irq(&ctx->wqh.lock);
191 if (file->f_flags & O_NONBLOCK)
194 res = wait_event_interruptible_locked_irq(ctx->wqh, ctx->ticks);
197 * If clock has changed, we do not care about the
198 * ticks and we do not rearm the timer. Userspace must
201 if (timerfd_canceled(ctx)) {
210 if (ctx->expired && ctx->tintv.tv64) {
212 * If tintv.tv64 != 0, this is a periodic timer that
213 * needs to be re-armed. We avoid doing it in the timer
214 * callback to avoid DoS attacks specifying a very
215 * short timer period.
217 ticks += hrtimer_forward_now(&ctx->tmr,
219 hrtimer_restart(&ctx->tmr);
224 spin_unlock_irq(&ctx->wqh.lock);
226 res = put_user(ticks, (u64 __user *) buf) ? -EFAULT: sizeof(ticks);
230 static const struct file_operations timerfd_fops = {
231 .release = timerfd_release,
232 .poll = timerfd_poll,
233 .read = timerfd_read,
234 .llseek = noop_llseek,
237 static int timerfd_fget(int fd, struct fd *p)
239 struct fd f = fdget(fd);
242 if (f.file->f_op != &timerfd_fops) {
250 SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
253 struct timerfd_ctx *ctx;
255 /* Check the TFD_* constants for consistency. */
256 BUILD_BUG_ON(TFD_CLOEXEC != O_CLOEXEC);
257 BUILD_BUG_ON(TFD_NONBLOCK != O_NONBLOCK);
259 if ((flags & ~TFD_CREATE_FLAGS) ||
260 (clockid != CLOCK_MONOTONIC &&
261 clockid != CLOCK_REALTIME))
264 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
268 init_waitqueue_head(&ctx->wqh);
269 ctx->clockid = clockid;
270 hrtimer_init(&ctx->tmr, clockid, HRTIMER_MODE_ABS);
271 ctx->moffs = ktime_get_monotonic_offset();
273 ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx,
274 O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS));
281 SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
282 const struct itimerspec __user *, utmr,
283 struct itimerspec __user *, otmr)
286 struct timerfd_ctx *ctx;
287 struct itimerspec ktmr, kotmr;
290 if (copy_from_user(&ktmr, utmr, sizeof(ktmr)))
293 if ((flags & ~TFD_SETTIME_FLAGS) ||
294 !timespec_valid(&ktmr.it_value) ||
295 !timespec_valid(&ktmr.it_interval))
298 ret = timerfd_fget(ufd, &f);
301 ctx = f.file->private_data;
303 timerfd_setup_cancel(ctx, flags);
306 * We need to stop the existing timer before reprogramming
307 * it to the new values.
310 spin_lock_irq(&ctx->wqh.lock);
311 if (hrtimer_try_to_cancel(&ctx->tmr) >= 0)
313 spin_unlock_irq(&ctx->wqh.lock);
318 * If the timer is expired and it's periodic, we need to advance it
319 * because the caller may want to know the previous expiration time.
320 * We do not update "ticks" and "expired" since the timer will be
321 * re-programmed again in the following timerfd_setup() call.
323 if (ctx->expired && ctx->tintv.tv64)
324 hrtimer_forward_now(&ctx->tmr, ctx->tintv);
326 kotmr.it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
327 kotmr.it_interval = ktime_to_timespec(ctx->tintv);
330 * Re-program the timer to the new value ...
332 ret = timerfd_setup(ctx, flags, &ktmr);
334 spin_unlock_irq(&ctx->wqh.lock);
336 if (otmr && copy_to_user(otmr, &kotmr, sizeof(kotmr)))
342 SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr)
345 struct timerfd_ctx *ctx;
346 struct itimerspec kotmr;
347 int ret = timerfd_fget(ufd, &f);
350 ctx = f.file->private_data;
352 spin_lock_irq(&ctx->wqh.lock);
353 if (ctx->expired && ctx->tintv.tv64) {
356 hrtimer_forward_now(&ctx->tmr, ctx->tintv) - 1;
357 hrtimer_restart(&ctx->tmr);
359 kotmr.it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
360 kotmr.it_interval = ktime_to_timespec(ctx->tintv);
361 spin_unlock_irq(&ctx->wqh.lock);
364 return copy_to_user(otmr, &kotmr, sizeof(kotmr)) ? -EFAULT: 0;