1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/init.h>
13 #include <linux/sched/signal.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/list.h>
17 #include <linux/spinlock.h>
18 #include <linux/anon_inodes.h>
19 #include <linux/syscalls.h>
20 #include <linux/export.h>
21 #include <linux/kref.h>
22 #include <linux/eventfd.h>
23 #include <linux/proc_fs.h>
24 #include <linux/seq_file.h>
25 #include <linux/idr.h>
26 #include <linux/uio.h>
28 DEFINE_PER_CPU(int, eventfd_wake_count);
29 EXPORT_PER_CPU_SYMBOL_GPL(eventfd_wake_count);
31 static DEFINE_IDA(eventfd_ida);
35 wait_queue_head_t wqh;
37 * Every time that a write(2) is performed on an eventfd, the
38 * value of the __u64 being written is added to "count" and a
39 * wakeup is performed on "wqh". A read(2) will return the "count"
40 * value to userspace, and will reset "count" to zero. The kernel
41 * side eventfd_signal() also, adds to the "count" counter and
50 * eventfd_signal - Adds @n to the eventfd counter.
51 * @ctx: [in] Pointer to the eventfd context.
52 * @n: [in] Value of the counter to be added to the eventfd internal counter.
53 * The value cannot be negative.
55 * This function is supposed to be called by the kernel in paths that do not
56 * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
57 * value, and we signal this as overflow condition by returning a EPOLLERR
60 * Returns the amount by which the counter was incremented. This will be less
61 * than @n if the counter has overflowed.
63 __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
68 * Deadlock or stack overflow issues can happen if we recurse here
69 * through waitqueue wakeup handlers. If the caller users potentially
70 * nested waitqueues with custom wakeup handlers, then it should
71 * check eventfd_signal_count() before calling this function. If
72 * it returns true, the eventfd_signal() call should be deferred to a
75 if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
78 spin_lock_irqsave(&ctx->wqh.lock, flags);
79 this_cpu_inc(eventfd_wake_count);
80 if (ULLONG_MAX - ctx->count < n)
81 n = ULLONG_MAX - ctx->count;
83 if (waitqueue_active(&ctx->wqh))
84 wake_up_locked_poll(&ctx->wqh, EPOLLIN);
85 this_cpu_dec(eventfd_wake_count);
86 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
90 EXPORT_SYMBOL_GPL(eventfd_signal);
92 static void eventfd_free_ctx(struct eventfd_ctx *ctx)
95 ida_simple_remove(&eventfd_ida, ctx->id);
99 static void eventfd_free(struct kref *kref)
101 struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);
103 eventfd_free_ctx(ctx);
107 * eventfd_ctx_put - Releases a reference to the internal eventfd context.
108 * @ctx: [in] Pointer to eventfd context.
110 * The eventfd context reference must have been previously acquired either
111 * with eventfd_ctx_fdget() or eventfd_ctx_fileget().
113 void eventfd_ctx_put(struct eventfd_ctx *ctx)
115 kref_put(&ctx->kref, eventfd_free);
117 EXPORT_SYMBOL_GPL(eventfd_ctx_put);
119 static int eventfd_release(struct inode *inode, struct file *file)
121 struct eventfd_ctx *ctx = file->private_data;
123 wake_up_poll(&ctx->wqh, EPOLLHUP);
124 eventfd_ctx_put(ctx);
128 static __poll_t eventfd_poll(struct file *file, poll_table *wait)
130 struct eventfd_ctx *ctx = file->private_data;
134 poll_wait(file, &ctx->wqh, wait);
137 * All writes to ctx->count occur within ctx->wqh.lock. This read
138 * can be done outside ctx->wqh.lock because we know that poll_wait
139 * takes that lock (through add_wait_queue) if our caller will sleep.
141 * The read _can_ therefore seep into add_wait_queue's critical
142 * section, but cannot move above it! add_wait_queue's spin_lock acts
143 * as an acquire barrier and ensures that the read be ordered properly
144 * against the writes. The following CAN happen and is safe:
147 * ----------------- ------------
148 * lock ctx->wqh.lock (in poll_wait)
151 * unlock ctx->wqh.lock
154 * if (waitqueue_active)
155 * wake_up_locked_poll
156 * unlock ctx->qwh.lock
157 * eventfd_poll returns 0
159 * but the following, which would miss a wakeup, cannot happen:
162 * ----------------- ------------
163 * count = ctx->count (INVALID!)
166 * **waitqueue_active is false**
167 * **no wake_up_locked_poll!**
168 * unlock ctx->qwh.lock
169 * lock ctx->wqh.lock (in poll_wait)
171 * unlock ctx->wqh.lock
172 * eventfd_poll returns 0
174 count = READ_ONCE(ctx->count);
178 if (count == ULLONG_MAX)
180 if (ULLONG_MAX - 1 > count)
186 void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
188 lockdep_assert_held(&ctx->wqh.lock);
190 *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
193 EXPORT_SYMBOL_GPL(eventfd_ctx_do_read);
196 * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
197 * @ctx: [in] Pointer to eventfd context.
198 * @wait: [in] Wait queue to be removed.
199 * @cnt: [out] Pointer to the 64-bit counter value.
201 * Returns %0 if successful, or the following error codes:
203 * -EAGAIN : The operation would have blocked.
205 * This is used to atomically remove a wait queue entry from the eventfd wait
206 * queue head, and read/reset the counter value.
208 int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
213 spin_lock_irqsave(&ctx->wqh.lock, flags);
214 eventfd_ctx_do_read(ctx, cnt);
215 __remove_wait_queue(&ctx->wqh, wait);
216 if (*cnt != 0 && waitqueue_active(&ctx->wqh))
217 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
218 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
220 return *cnt != 0 ? 0 : -EAGAIN;
222 EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
224 static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to)
226 struct file *file = iocb->ki_filp;
227 struct eventfd_ctx *ctx = file->private_data;
229 DECLARE_WAITQUEUE(wait, current);
231 if (iov_iter_count(to) < sizeof(ucnt))
233 spin_lock_irq(&ctx->wqh.lock);
235 if ((file->f_flags & O_NONBLOCK) ||
236 (iocb->ki_flags & IOCB_NOWAIT)) {
237 spin_unlock_irq(&ctx->wqh.lock);
240 __add_wait_queue(&ctx->wqh, &wait);
242 set_current_state(TASK_INTERRUPTIBLE);
245 if (signal_pending(current)) {
246 __remove_wait_queue(&ctx->wqh, &wait);
247 __set_current_state(TASK_RUNNING);
248 spin_unlock_irq(&ctx->wqh.lock);
251 spin_unlock_irq(&ctx->wqh.lock);
253 spin_lock_irq(&ctx->wqh.lock);
255 __remove_wait_queue(&ctx->wqh, &wait);
256 __set_current_state(TASK_RUNNING);
258 eventfd_ctx_do_read(ctx, &ucnt);
259 if (waitqueue_active(&ctx->wqh))
260 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
261 spin_unlock_irq(&ctx->wqh.lock);
262 if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt)))
268 static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
271 struct eventfd_ctx *ctx = file->private_data;
274 DECLARE_WAITQUEUE(wait, current);
276 if (count < sizeof(ucnt))
278 if (copy_from_user(&ucnt, buf, sizeof(ucnt)))
280 if (ucnt == ULLONG_MAX)
282 spin_lock_irq(&ctx->wqh.lock);
284 if (ULLONG_MAX - ctx->count > ucnt)
286 else if (!(file->f_flags & O_NONBLOCK)) {
287 __add_wait_queue(&ctx->wqh, &wait);
289 set_current_state(TASK_INTERRUPTIBLE);
290 if (ULLONG_MAX - ctx->count > ucnt) {
294 if (signal_pending(current)) {
298 spin_unlock_irq(&ctx->wqh.lock);
300 spin_lock_irq(&ctx->wqh.lock);
302 __remove_wait_queue(&ctx->wqh, &wait);
303 __set_current_state(TASK_RUNNING);
305 if (likely(res > 0)) {
307 if (waitqueue_active(&ctx->wqh))
308 wake_up_locked_poll(&ctx->wqh, EPOLLIN);
310 spin_unlock_irq(&ctx->wqh.lock);
315 #ifdef CONFIG_PROC_FS
316 static void eventfd_show_fdinfo(struct seq_file *m, struct file *f)
318 struct eventfd_ctx *ctx = f->private_data;
320 spin_lock_irq(&ctx->wqh.lock);
321 seq_printf(m, "eventfd-count: %16llx\n",
322 (unsigned long long)ctx->count);
323 spin_unlock_irq(&ctx->wqh.lock);
324 seq_printf(m, "eventfd-id: %d\n", ctx->id);
328 static const struct file_operations eventfd_fops = {
329 #ifdef CONFIG_PROC_FS
330 .show_fdinfo = eventfd_show_fdinfo,
332 .release = eventfd_release,
333 .poll = eventfd_poll,
334 .read_iter = eventfd_read,
335 .write = eventfd_write,
336 .llseek = noop_llseek,
340 * eventfd_fget - Acquire a reference of an eventfd file descriptor.
341 * @fd: [in] Eventfd file descriptor.
343 * Returns a pointer to the eventfd file structure in case of success, or the
344 * following error pointer:
346 * -EBADF : Invalid @fd file descriptor.
347 * -EINVAL : The @fd file descriptor is not an eventfd file.
349 struct file *eventfd_fget(int fd)
355 return ERR_PTR(-EBADF);
356 if (file->f_op != &eventfd_fops) {
358 return ERR_PTR(-EINVAL);
363 EXPORT_SYMBOL_GPL(eventfd_fget);
366 * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context.
367 * @fd: [in] Eventfd file descriptor.
369 * Returns a pointer to the internal eventfd context, otherwise the error
370 * pointers returned by the following functions:
374 struct eventfd_ctx *eventfd_ctx_fdget(int fd)
376 struct eventfd_ctx *ctx;
377 struct fd f = fdget(fd);
379 return ERR_PTR(-EBADF);
380 ctx = eventfd_ctx_fileget(f.file);
384 EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);
387 * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context.
388 * @file: [in] Eventfd file pointer.
390 * Returns a pointer to the internal eventfd context, otherwise the error
393 * -EINVAL : The @fd file descriptor is not an eventfd file.
395 struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
397 struct eventfd_ctx *ctx;
399 if (file->f_op != &eventfd_fops)
400 return ERR_PTR(-EINVAL);
402 ctx = file->private_data;
403 kref_get(&ctx->kref);
406 EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
408 static int do_eventfd(unsigned int count, int flags)
410 struct eventfd_ctx *ctx;
414 /* Check the EFD_* constants for consistency. */
415 BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC);
416 BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK);
418 if (flags & ~EFD_FLAGS_SET)
421 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
425 kref_init(&ctx->kref);
426 init_waitqueue_head(&ctx->wqh);
429 ctx->id = ida_simple_get(&eventfd_ida, 0, 0, GFP_KERNEL);
431 flags &= EFD_SHARED_FCNTL_FLAGS;
433 fd = get_unused_fd_flags(flags);
437 file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, flags);
444 file->f_mode |= FMODE_NOWAIT;
445 fd_install(fd, file);
448 eventfd_free_ctx(ctx);
452 SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
454 return do_eventfd(count, flags);
457 SYSCALL_DEFINE1(eventfd, unsigned int, count)
459 return do_eventfd(count, 0);