2 Copyright (C) 2005 John McCutchan
4 The Gnome Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Library General Public License as
6 published by the Free Software Foundation; either version 2 of the
7 License, or (at your option) any later version.
9 The Gnome Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Library General Public License for more details.
14 You should have received a copy of the GNU Library General Public
15 License along with the Gnome Library; see the file COPYING.LIB. If not,
16 see <http://www.gnu.org/licenses/>.
19 John McCutchan <john@johnmccutchan.com>
25 #include <sys/ioctl.h>
30 #include "inotify-kernel.h"
31 #include <sys/inotify.h>
33 #include "glib-private.h"
35 /* Timings for pairing MOVED_TO / MOVED_FROM events */
36 #define PROCESS_EVENTS_TIME 1000 /* 1000 milliseconds (1 hz) */
37 #define DEFAULT_HOLD_UNTIL_TIME 0 /* 0 millisecond */
38 #define MOVE_HOLD_UNTIL_TIME 500 /* 500 microseconds or 0.5 milliseconds */
40 static int inotify_instance_fd = -1;
41 static GQueue *events_to_process = NULL;
42 static GQueue *event_queue = NULL;
43 static GHashTable * cookie_hash = NULL;
44 static GIOChannel *inotify_read_ioc;
45 static GPollFD ik_poll_fd;
46 static gboolean ik_poll_fd_enabled = TRUE;
47 static void (*user_cb)(ik_event_t *event);
49 static gboolean ik_read_callback (gpointer user_data);
50 static gboolean ik_process_eq_callback (gpointer user_data);
52 static guint32 ik_move_matches = 0;
53 static guint32 ik_move_misses = 0;
55 static gboolean process_eq_running = FALSE;
57 /* We use the lock from inotify-helper.c
59 * There are two places that we take this lock
61 * 1) In ik_read_callback
63 * 2) ik_process_eq_callback.
66 * The rest of locking is taken care of in inotify-helper.c
68 G_LOCK_EXTERN (inotify_lock);
70 typedef struct ik_event_internal {
75 struct ik_event_internal *pair;
76 } ik_event_internal_t;
78 /* In order to perform non-sleeping inotify event chunking we need
82 ik_source_prepare (GSource *source,
89 ik_source_timeout (gpointer data)
91 GSource *source = (GSource *)data;
93 /* Re-active the PollFD */
94 g_source_add_poll (source, &ik_poll_fd);
95 g_source_unref (source);
96 ik_poll_fd_enabled = TRUE;
101 #define MAX_PENDING_COUNT 2
102 #define PENDING_THRESHOLD(qsize) ((qsize) >> 1)
103 #define PENDING_MARGINAL_COST(p) ((unsigned int)(1 << (p)))
104 #define MAX_QUEUED_EVENTS 2048
105 #define AVERAGE_EVENT_SIZE sizeof (struct inotify_event) + 16
106 #define TIMEOUT_MILLISECONDS 10
109 ik_source_check (GSource *source)
111 static int prev_pending = 0, pending_count = 0;
113 /* We already disabled the PollFD or
114 * nothing to be read from inotify */
115 if (!ik_poll_fd_enabled || !(ik_poll_fd.revents & G_IO_IN))
118 if (pending_count < MAX_PENDING_COUNT)
120 GSource *timeout_source;
121 unsigned int pending;
123 if (ioctl (inotify_instance_fd, FIONREAD, &pending) == -1)
126 pending /= AVERAGE_EVENT_SIZE;
128 /* Don't wait if the number of pending events is too close
129 * to the maximum queue size.
131 if (pending > PENDING_THRESHOLD (MAX_QUEUED_EVENTS))
134 /* With each successive iteration, the minimum rate for
135 * further sleep doubles.
137 if (pending-prev_pending < PENDING_MARGINAL_COST (pending_count))
140 prev_pending = pending;
143 /* We are going to wait to read the events: */
145 /* Remove the PollFD from the source */
146 g_source_remove_poll (source, &ik_poll_fd);
147 /* To avoid threading issues we need to flag that we've done that */
148 ik_poll_fd_enabled = FALSE;
149 /* Set a timeout to re-add the PollFD to the source */
150 g_source_ref (source);
152 timeout_source = g_timeout_source_new (TIMEOUT_MILLISECONDS);
153 g_source_set_callback (timeout_source, ik_source_timeout, source, NULL);
154 g_source_attach (timeout_source, GLIB_PRIVATE_CALL (g_get_worker_context) ());
155 g_source_unref (timeout_source);
161 /* We are ready to read events from inotify */
170 ik_source_dispatch (GSource *source,
171 GSourceFunc callback,
175 return callback (user_data);
179 static GSourceFuncs ik_source_funcs =
187 gboolean _ik_startup (void (*cb)(ik_event_t *event))
189 static gboolean initialized = FALSE;
193 /* Ignore multi-calls */
195 return inotify_instance_fd >= 0;
199 #ifdef HAVE_INOTIFY_INIT1
200 inotify_instance_fd = inotify_init1 (IN_CLOEXEC);
202 inotify_instance_fd = -1;
204 if (inotify_instance_fd < 0)
205 inotify_instance_fd = inotify_init ();
207 if (inotify_instance_fd < 0)
210 inotify_read_ioc = g_io_channel_unix_new (inotify_instance_fd);
211 ik_poll_fd.fd = inotify_instance_fd;
212 ik_poll_fd.events = G_IO_IN | G_IO_HUP | G_IO_ERR;
213 g_io_channel_set_encoding (inotify_read_ioc, NULL, NULL);
214 g_io_channel_set_flags (inotify_read_ioc, G_IO_FLAG_NONBLOCK, NULL);
216 source = g_source_new (&ik_source_funcs, sizeof (GSource));
217 g_source_set_name (source, "GIO Inotify");
218 g_source_add_poll (source, &ik_poll_fd);
219 g_source_set_callback (source, ik_read_callback, NULL, NULL);
220 g_source_attach (source, GLIB_PRIVATE_CALL (g_get_worker_context) ());
221 g_source_unref (source);
223 cookie_hash = g_hash_table_new (g_direct_hash, g_direct_equal);
224 event_queue = g_queue_new ();
225 events_to_process = g_queue_new ();
230 static ik_event_internal_t *
231 ik_event_internal_new (ik_event_t *event)
233 ik_event_internal_t *internal_event = g_new0 (ik_event_internal_t, 1);
238 g_get_current_time (&tv);
239 g_time_val_add (&tv, DEFAULT_HOLD_UNTIL_TIME);
240 internal_event->event = event;
241 internal_event->hold_until = tv;
243 return internal_event;
247 ik_event_new (char *buffer)
249 struct inotify_event *kevent = (struct inotify_event *)buffer;
250 ik_event_t *event = g_new0 (ik_event_t, 1);
254 event->wd = kevent->wd;
255 event->mask = kevent->mask;
256 event->cookie = kevent->cookie;
257 event->len = kevent->len;
259 event->name = g_strdup (kevent->name);
261 event->name = g_strdup ("");
267 _ik_event_free (ik_event_t *event)
270 _ik_event_free (event->pair);
271 g_free (event->name);
276 _ik_watch (const char *path,
282 g_assert (path != NULL);
283 g_assert (inotify_instance_fd >= 0);
285 wd = inotify_add_watch (inotify_instance_fd, path, mask);
290 /* FIXME: debug msg failed to add watch */
301 _ik_ignore (const char *path,
305 g_assert (inotify_instance_fd >= 0);
307 if (inotify_rm_watch (inotify_instance_fd, wd) < 0)
310 /* failed to rm watch */
318 ik_read_events (gsize *buffer_size_out,
321 static gchar *buffer = NULL;
322 static gsize buffer_size;
324 /* Initialize the buffer on our first call */
327 buffer_size = AVERAGE_EVENT_SIZE;
328 buffer_size *= MAX_QUEUED_EVENTS;
329 buffer = g_malloc (buffer_size);
332 *buffer_size_out = 0;
335 memset (buffer, 0, buffer_size);
337 if (g_io_channel_read_chars (inotify_read_ioc, (char *)buffer, buffer_size, buffer_size_out, NULL) != G_IO_STATUS_NORMAL) {
340 *buffer_out = buffer;
344 ik_read_callback (gpointer user_data)
347 gsize buffer_size, buffer_i, events;
349 G_LOCK (inotify_lock);
350 ik_read_events (&buffer_size, &buffer);
354 while (buffer_i < buffer_size)
356 struct inotify_event *event;
358 event = (struct inotify_event *)&buffer[buffer_i];
359 event_size = sizeof(struct inotify_event) + event->len;
360 g_queue_push_tail (events_to_process, ik_event_internal_new (ik_event_new (&buffer[buffer_i])));
361 buffer_i += event_size;
365 /* If the event process callback is off, turn it back on */
366 if (!process_eq_running && events)
368 GSource *timeout_source;
370 process_eq_running = TRUE;
371 timeout_source = g_timeout_source_new (PROCESS_EVENTS_TIME);
372 g_source_set_callback (timeout_source, ik_process_eq_callback, NULL, NULL);
373 g_source_attach (timeout_source, GLIB_PRIVATE_CALL (g_get_worker_context ()));
374 g_source_unref (timeout_source);
377 G_UNLOCK (inotify_lock);
383 g_timeval_lt (GTimeVal *val1,
386 if (val1->tv_sec < val2->tv_sec)
389 if (val1->tv_sec > val2->tv_sec)
392 /* val1->tv_sec == val2->tv_sec */
393 if (val1->tv_usec < val2->tv_usec)
400 g_timeval_le (GTimeVal *val1,
403 if (val1->tv_sec < val2->tv_sec)
406 if (val1->tv_sec > val2->tv_sec)
409 /* val1->tv_sec == val2->tv_sec */
410 if (val1->tv_usec <= val2->tv_usec)
417 ik_pair_events (ik_event_internal_t *event1,
418 ik_event_internal_t *event2)
420 g_assert (event1 && event2);
421 /* We should only be pairing events that have the same cookie */
422 g_assert (event1->event->cookie == event2->event->cookie);
423 /* We shouldn't pair an event that already is paired */
424 g_assert (event1->pair == NULL && event2->pair == NULL);
426 /* Pair the internal structures and the ik_event_t structures */
427 event1->pair = event2;
428 event1->event->pair = event2->event;
429 event2->event->is_second_in_pair = TRUE;
431 if (g_timeval_lt (&event1->hold_until, &event2->hold_until))
432 event1->hold_until = event2->hold_until;
434 event2->hold_until = event1->hold_until;
438 ik_event_add_microseconds (ik_event_internal_t *event,
442 g_time_val_add (&event->hold_until, ms);
446 ik_event_ready (ik_event_internal_t *event)
451 g_get_current_time (&tv);
453 /* An event is ready if,
455 * it has no cookie -- there is nothing to be gained by holding it
456 * or, it is already paired -- we don't need to hold it anymore
457 * or, we have held it long enough
460 event->event->cookie == 0 ||
461 event->pair != NULL ||
462 g_timeval_le (&event->hold_until, &tv);
466 ik_pair_moves (gpointer data,
469 ik_event_internal_t *event = (ik_event_internal_t *)data;
471 if (event->seen == TRUE || event->sent == TRUE)
474 if (event->event->cookie != 0)
476 /* When we get a MOVED_FROM event we delay sending the event by
477 * MOVE_HOLD_UNTIL_TIME microseconds. We need to do this because a
478 * MOVED_TO pair _might_ be coming in the near future */
479 if (event->event->mask & IN_MOVED_FROM)
481 g_hash_table_insert (cookie_hash, GINT_TO_POINTER (event->event->cookie), event);
482 /* because we don't deliver move events there is no point in waiting for the match right now. */
483 ik_event_add_microseconds (event, MOVE_HOLD_UNTIL_TIME);
485 else if (event->event->mask & IN_MOVED_TO)
487 /* We need to check if we are waiting for this MOVED_TO events cookie to pair it with
489 ik_event_internal_t *match = NULL;
490 match = g_hash_table_lookup (cookie_hash, GINT_TO_POINTER (event->event->cookie));
493 g_hash_table_remove (cookie_hash, GINT_TO_POINTER (event->event->cookie));
494 ik_pair_events (match, event);
502 ik_process_events (void)
504 g_queue_foreach (events_to_process, ik_pair_moves, NULL);
506 while (!g_queue_is_empty (events_to_process))
508 ik_event_internal_t *event = g_queue_peek_head (events_to_process);
510 /* This must have been sent as part of a MOVED_TO/MOVED_FROM */
514 g_queue_pop_head (events_to_process);
515 /* Free the internal event structure */
520 /* The event isn't ready yet */
521 if (!ik_event_ready (event))
525 event = g_queue_pop_head (events_to_process);
527 /* Check if this is a MOVED_FROM that is also sitting in the cookie_hash */
528 if (event->event->cookie && event->pair == NULL &&
529 g_hash_table_lookup (cookie_hash, GINT_TO_POINTER (event->event->cookie)))
530 g_hash_table_remove (cookie_hash, GINT_TO_POINTER (event->event->cookie));
534 /* We send out paired MOVED_FROM/MOVED_TO events in the same event buffer */
535 /* g_assert (event->event->mask == IN_MOVED_FROM && event->pair->event->mask == IN_MOVED_TO); */
536 /* Copy the paired data */
537 event->pair->sent = TRUE;
541 else if (event->event->cookie)
543 /* If we couldn't pair a MOVED_FROM and MOVED_TO together, we change
545 /* Changeing MOVED_FROM to DELETE and MOVED_TO to create lets us make
546 * the gaurantee that you will never see a non-matched MOVE event */
547 event->event->original_mask = event->event->mask;
549 if (event->event->mask & IN_MOVED_FROM)
551 event->event->mask = IN_DELETE|(event->event->mask & IN_ISDIR);
552 ik_move_misses++; /* not super accurate, if we aren't watching the destination it still counts as a miss */
554 if (event->event->mask & IN_MOVED_TO)
555 event->event->mask = IN_CREATE|(event->event->mask & IN_ISDIR);
558 /* Push the ik_event_t onto the event queue */
559 g_queue_push_tail (event_queue, event->event);
560 /* Free the internal event structure */
566 ik_process_eq_callback (gpointer user_data)
570 /* Try and move as many events to the event queue */
571 G_LOCK (inotify_lock);
572 ik_process_events ();
574 while (!g_queue_is_empty (event_queue))
576 ik_event_t *event = g_queue_pop_head (event_queue);
583 if (g_queue_get_length (events_to_process) == 0)
585 process_eq_running = FALSE;
589 G_UNLOCK (inotify_lock);