2 Copyright (C) 2005 John McCutchan
3 Copyright © 2015 Canonical Limited
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public License
16 along with this library; if not, see <http://www.gnu.org/licenses/>.
19 Ryan Lortie <desrt@desrt.ca>
20 John McCutchan <john@johnmccutchan.com>
26 #include <sys/ioctl.h>
31 #include "inotify-kernel.h"
32 #include <sys/inotify.h>
33 #ifdef HAVE_SYS_FILIO_H
34 #include <sys/filio.h>
36 #include <glib/glib-unix.h>
38 #include "glib-private.h"
41 #define MAX_EVENT_SIZE (sizeof(struct inotify_event) + NAME_MAX + 1)
43 /* Amount of time to sleep on receipt of uninteresting events */
44 #define BOREDOM_SLEEP_TIME (100 * G_TIME_SPAN_MILLISECOND)
46 /* Define limits on the maximum amount of time and maximum amount of
47 * interceding events between FROM/TO that can be merged.
49 #define MOVE_PAIR_DELAY (10 * G_TIME_SPAN_MILLISECOND)
50 #define MOVE_PAIR_DISTANCE (100)
52 /* We use the lock from inotify-helper.c
54 * We only have to take it on our read callback.
56 * The rest of locking is taken care of in inotify-helper.c
58 G_LOCK_EXTERN (inotify_lock);
61 ik_event_new (struct inotify_event *kevent,
64 ik_event_t *event = g_new0 (ik_event_t, 1);
66 event->wd = kevent->wd;
67 event->mask = kevent->mask;
68 event->cookie = kevent->cookie;
69 event->len = kevent->len;
70 event->timestamp = now;
72 event->name = g_strdup (kevent->name);
80 _ik_event_free (ik_event_t *event)
84 event->pair->pair = NULL;
85 _ik_event_free (event->pair);
100 GHashTable *unmatched_moves;
102 } InotifyKernelSource;
104 static InotifyKernelSource *inotify_source;
107 ik_source_get_dispatch_time (InotifyKernelSource *iks)
111 head = g_queue_peek_head (&iks->queue);
113 /* nothing in the queue: not ready */
117 /* if it's not an unpaired move, it is ready now */
118 if (~head->mask & IN_MOVED_FROM || head->pair)
121 /* if the queue is too long then it's ready now */
122 if (iks->queue.length > MOVE_PAIR_DISTANCE)
125 /* otherwise, it's ready after the delay */
126 return head->timestamp + MOVE_PAIR_DELAY;
130 ik_source_can_dispatch_now (InotifyKernelSource *iks,
133 gint64 dispatch_time;
135 dispatch_time = ik_source_get_dispatch_time (iks);
137 return 0 <= dispatch_time && dispatch_time <= now;
141 ik_source_read_some_events (InotifyKernelSource *iks,
149 result = read (iks->fd, buffer, buffer_len);
160 g_error ("inotify read(): %s", g_strerror (errsv));
162 else if (result == 0)
163 g_error ("inotify unexpectedly hit eof");
169 ik_source_read_all_the_events (InotifyKernelSource *iks,
176 n_read = ik_source_read_some_events (iks, buffer, buffer_len);
178 /* Check if we might have gotten another event if we had passed in a
181 if (n_read + MAX_EVENT_SIZE > buffer_len)
188 /* figure out how many more bytes there are to read */
189 result = ioctl (iks->fd, FIONREAD, &n_readable);
192 g_error ("inotify ioctl(FIONREAD): %s", g_strerror (errsv));
196 /* there is in fact more data. allocate a new buffer, copy
197 * the existing data, and then append the remaining.
199 new_buffer = g_malloc (n_read + n_readable);
200 memcpy (new_buffer, buffer, n_read);
201 n_read += ik_source_read_some_events (iks, new_buffer + n_read, n_readable);
205 /* There may be new events in the buffer that were added after
206 * the FIONREAD was performed, but we can't risk getting into
207 * a loop. We'll get them next time.
212 *length_out = n_read;
218 ik_source_dispatch (GSource *source,
222 InotifyKernelSource *iks = (InotifyKernelSource *) source;
223 gboolean (*user_callback) (ik_event_t *event) = (void *) func;
224 gboolean interesting = FALSE;
227 now = g_source_get_time (source);
229 if (iks->is_bored || g_source_query_unix_fd (source, iks->fd_tag))
231 gchar stack_buffer[4096];
236 /* We want to read all of the available events.
238 * We need to do it in a finite number of steps so that we don't
239 * get caught in a loop of read() with another process
240 * continuously adding events each time we drain them.
242 * In the normal case we will have only a few events in the queue,
243 * so start out by reading into a small stack-allocated buffer.
244 * Even though we're on a fresh stack frame, there is no need to
245 * pointlessly blow up with the size of the worker thread stack
246 * with a huge buffer here.
248 * If the result is large enough to cause us to suspect that
249 * another event may be pending then we allocate a buffer on the
250 * heap that can hold all of the events and read (once!) into that
253 buffer = ik_source_read_all_the_events (iks, stack_buffer, sizeof stack_buffer, &buffer_len);
257 while (offset < buffer_len)
259 struct inotify_event *kevent = (struct inotify_event *) (buffer + offset);
262 event = ik_event_new (kevent, now);
264 offset += sizeof (struct inotify_event) + event->len;
266 if (event->mask & IN_MOVED_TO)
270 pair = g_hash_table_lookup (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie));
273 g_assert (!pair->pair);
275 g_hash_table_remove (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie));
276 event->is_second_in_pair = TRUE;
285 else if (event->mask & IN_MOVED_FROM)
289 new = g_hash_table_insert (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie), event);
291 g_warning ("inotify: got IN_MOVED_FROM event with already-pending cookie %#x", event->cookie);
296 g_queue_push_tail (&iks->queue, event);
301 /* We can end up reading nothing if we arrived here due to a
302 * boredom timer but the stream of events stopped meanwhile.
304 * In that case, we need to switch back to polling the file
305 * descriptor in the usual way.
307 g_assert (iks->is_bored);
311 if (buffer != stack_buffer)
315 while (ik_source_can_dispatch_now (iks, now))
319 /* callback will free the event */
320 event = g_queue_pop_head (&iks->queue);
322 if (event->mask & IN_MOVED_FROM && !event->pair)
323 g_hash_table_remove (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie));
325 G_LOCK (inotify_lock);
327 interesting |= (* user_callback) (event);
329 G_UNLOCK (inotify_lock);
332 /* The queue gets blocked iff we have unmatched moves */
333 g_assert ((iks->queue.length > 0) == (g_hash_table_size (iks->unmatched_moves) > 0));
335 /* Here's where we decide what will wake us up next.
337 * If the last event was interesting then we will wake up on the fd or
338 * when the timeout is reached on an unpaired move (if any).
340 * If the last event was uninteresting then we will wake up after the
341 * shorter of the boredom sleep or any timeout for an unpaired move.
347 g_source_modify_unix_fd (source, iks->fd_tag, G_IO_IN);
348 iks->is_bored = FALSE;
351 g_source_set_ready_time (source, ik_source_get_dispatch_time (iks));
355 guint64 dispatch_time = ik_source_get_dispatch_time (iks);
356 guint64 boredom_time = now + BOREDOM_SLEEP_TIME;
360 g_source_modify_unix_fd (source, iks->fd_tag, 0);
361 iks->is_bored = TRUE;
364 g_source_set_ready_time (source, MIN (dispatch_time, boredom_time));
370 static InotifyKernelSource *
371 ik_source_new (gboolean (* callback) (ik_event_t *event))
373 static GSourceFuncs source_funcs = {
376 /* should have a finalize, but it will never happen */
378 InotifyKernelSource *iks;
381 source = g_source_new (&source_funcs, sizeof (InotifyKernelSource));
382 iks = (InotifyKernelSource *) source;
384 g_source_set_name (source, "inotify kernel source");
386 iks->unmatched_moves = g_hash_table_new (NULL, NULL);
387 iks->fd = inotify_init1 (IN_CLOEXEC);
390 iks->fd = inotify_init ();
394 GError *error = NULL;
396 g_unix_set_fd_nonblocking (iks->fd, TRUE, &error);
397 g_assert_no_error (error);
399 iks->fd_tag = g_source_add_unix_fd (source, iks->fd, G_IO_IN);
402 g_source_set_callback (source, (GSourceFunc) callback, NULL, NULL);
404 g_source_attach (source, GLIB_PRIVATE_CALL (g_get_worker_context) ());
410 _ik_startup (gboolean (*cb)(ik_event_t *event))
412 if (g_once_init_enter (&inotify_source))
413 g_once_init_leave (&inotify_source, ik_source_new (cb));
415 return inotify_source->fd >= 0;
419 _ik_watch (const char *path,
425 g_assert (path != NULL);
426 g_assert (inotify_source && inotify_source->fd >= 0);
428 wd = inotify_add_watch (inotify_source->fd, path, mask);
433 /* FIXME: debug msg failed to add watch */
444 _ik_ignore (const char *path,
448 g_assert (inotify_source && inotify_source->fd >= 0);
450 if (inotify_rm_watch (inotify_source->fd, wd) < 0)
453 /* failed to rm watch */