1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #include <sys/sysctl.h>
30 #include <sys/types.h>
31 #include <sys/event.h>
37 static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags);
40 int uv__kqueue_init(uv_loop_t* loop) {
41 loop->backend_fd = kqueue();
42 if (loop->backend_fd == -1)
45 uv__cloexec(loop->backend_fd, 1);
51 void uv__io_poll(uv_loop_t* loop, int timeout) {
52 struct kevent events[1024];
69 if (loop->nfds == 0) {
70 assert(QUEUE_EMPTY(&loop->watcher_queue));
76 while (!QUEUE_EMPTY(&loop->watcher_queue)) {
77 q = QUEUE_HEAD(&loop->watcher_queue);
81 w = QUEUE_DATA(q, uv__io_t, watcher_queue);
82 assert(w->pevents != 0);
84 assert(w->fd < (int) loop->nwatchers);
86 if ((w->events & UV__POLLIN) == 0 && (w->pevents & UV__POLLIN) != 0) {
91 if (w->cb == uv__fs_event) {
92 filter = EVFILT_VNODE;
93 fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
94 | NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
95 op = EV_ADD | EV_ONESHOT; /* Stop the event from firing repeatedly. */
98 EV_SET(events + nevents, w->fd, filter, op, fflags, 0, 0);
100 if (++nevents == ARRAY_SIZE(events)) {
101 if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
107 if ((w->events & UV__POLLOUT) == 0 && (w->pevents & UV__POLLOUT) != 0) {
108 EV_SET(events + nevents, w->fd, EVFILT_WRITE, EV_ADD, 0, 0, 0);
110 if (++nevents == ARRAY_SIZE(events)) {
111 if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
117 w->events = w->pevents;
120 assert(timeout >= -1);
122 count = 48; /* Benchmarks suggest this gives the best throughput. */
124 for (;; nevents = 0) {
126 spec.tv_sec = timeout / 1000;
127 spec.tv_nsec = (timeout % 1000) * 1000000;
130 nfds = kevent(loop->backend_fd,
135 timeout == -1 ? NULL : &spec);
137 /* Update loop->time unconditionally. It's tempting to skip the update when
138 * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
139 * operating system didn't reschedule our process while in the syscall.
141 SAVE_ERRNO(uv__update_time(loop));
144 assert(timeout != -1);
158 /* Interrupted by a signal. Update timeout and poll again. */
164 assert(loop->watchers != NULL);
165 loop->watchers[loop->nwatchers] = (void*) events;
166 loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
167 for (i = 0; i < nfds; i++) {
170 w = loop->watchers[fd];
172 /* Skip invalidated events, see uv__platform_invalidate_fd */
177 /* File descriptor that we've stopped watching, disarm it. */
179 struct kevent events[1];
181 EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
182 if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
183 if (errno != EBADF && errno != ENOENT)
189 if (ev->filter == EVFILT_VNODE) {
190 assert(w->events == UV__POLLIN);
191 assert(w->pevents == UV__POLLIN);
192 w->cb(loop, w, ev->fflags); /* XXX always uv__fs_event() */
199 if (ev->filter == EVFILT_READ) {
200 if (w->pevents & UV__POLLIN) {
201 revents |= UV__POLLIN;
202 w->rcount = ev->data;
205 struct kevent events[1];
206 EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
207 if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
213 if (ev->filter == EVFILT_WRITE) {
214 if (w->pevents & UV__POLLOUT) {
215 revents |= UV__POLLOUT;
216 w->wcount = ev->data;
219 struct kevent events[1];
220 EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
221 if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
227 if (ev->flags & EV_ERROR)
228 revents |= UV__POLLERR;
233 w->cb(loop, w, revents);
236 loop->watchers[loop->nwatchers] = NULL;
237 loop->watchers[loop->nwatchers + 1] = NULL;
240 if (nfds == ARRAY_SIZE(events) && --count != 0) {
241 /* Poll for more events but don't block this time. */
257 diff = loop->time - base;
258 if (diff >= (uint64_t) timeout)
266 void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
267 struct kevent* events;
271 assert(loop->watchers != NULL);
273 events = (struct kevent*) loop->watchers[loop->nwatchers];
274 nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
278 /* Invalidate events with same file descriptor */
279 for (i = 0; i < nfds; i++)
280 if ((int) events[i].ident == fd)
281 events[i].ident = -1;
285 static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags) {
286 uv_fs_event_t* handle;
290 #if defined(F_GETPATH)
291 /* MAXPATHLEN == PATH_MAX but the former is what XNU calls it internally. */
292 char pathbuf[MAXPATHLEN];
295 handle = container_of(w, uv_fs_event_t, event_watcher);
297 if (fflags & (NOTE_ATTRIB | NOTE_EXTEND))
303 #if defined(F_GETPATH)
304 /* Also works when the file has been unlinked from the file system. Passing
305 * in the path when the file has been deleted is arguably a little strange
306 * but it's consistent with what the inotify backend does.
308 if (fcntl(handle->event_watcher.fd, F_GETPATH, pathbuf) == 0)
309 path = uv__basename_r(pathbuf);
311 handle->cb(handle, path, events, 0);
313 if (handle->event_watcher.fd == -1)
316 /* Watcher operates in one-shot mode, re-arm it. */
317 fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
318 | NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
320 EV_SET(&ev, w->fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, fflags, 0, 0);
322 if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
327 int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
328 uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
333 int uv_fs_event_start(uv_fs_event_t* handle,
335 const char* filename,
336 unsigned int flags) {
337 #if defined(__APPLE__)
339 #endif /* defined(__APPLE__) */
342 if (uv__is_active(handle))
345 /* TODO open asynchronously - but how do we report back errors? */
346 fd = open(filename, O_RDONLY);
350 uv__handle_start(handle);
351 uv__io_init(&handle->event_watcher, uv__fs_event, fd);
352 handle->filename = strdup(filename);
355 #if defined(__APPLE__)
356 /* Nullify field to perform checks later */
357 handle->cf_cb = NULL;
358 handle->realpath = NULL;
359 handle->realpath_len = 0;
360 handle->cf_flags = flags;
362 if (fstat(fd, &statbuf))
364 /* FSEvents works only with directories */
365 if (!(statbuf.st_mode & S_IFDIR))
368 return uv__fsevents_init(handle);
371 #endif /* defined(__APPLE__) */
373 uv__io_start(handle->loop, &handle->event_watcher, UV__POLLIN);
379 int uv_fs_event_stop(uv_fs_event_t* handle) {
380 if (!uv__is_active(handle))
383 uv__handle_stop(handle);
385 #if defined(__APPLE__)
386 if (uv__fsevents_close(handle))
387 uv__io_stop(handle->loop, &handle->event_watcher, UV__POLLIN);
389 uv__io_stop(handle->loop, &handle->event_watcher, UV__POLLIN);
390 #endif /* defined(__APPLE__) */
392 free(handle->filename);
393 handle->filename = NULL;
395 uv__close(handle->event_watcher.fd);
396 handle->event_watcher.fd = -1;
402 void uv__fs_event_close(uv_fs_event_t* handle) {
403 uv_fs_event_stop(handle);