1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #include <sys/sysctl.h>
30 #include <sys/types.h>
31 #include <sys/event.h>
39 * - Until at least FreeBSD 11.0
40 * - Older versions of Mac OS X
42 * http://www.boost.org/doc/libs/1_61_0/boost/asio/detail/kqueue_reactor.hpp
45 #define EV_OOBAND EV_FLAG1
48 static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags);
51 int uv__kqueue_init(uv_loop_t* loop) {
52 loop->backend_fd = kqueue();
53 if (loop->backend_fd == -1)
54 return UV__ERR(errno);
56 uv__cloexec(loop->backend_fd, 1);
62 #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
63 static int uv__has_forked_with_cfrunloop;
66 int uv__io_fork(uv_loop_t* loop) {
68 loop->backend_fd = -1;
69 err = uv__kqueue_init(loop);
73 #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
74 if (loop->cf_state != NULL) {
75 /* We cannot start another CFRunloop and/or thread in the child
76 process; CF aborts if you try or if you try to touch the thread
77 at all to kill it. So the best we can do is ignore it from now
78 on. This means we can't watch directories in the same way
79 anymore (like other BSDs). It also means we cannot properly
80 clean up the allocated resources; calling
81 uv__fsevents_loop_delete from uv_loop_close will crash the
82 process. So we sidestep the issue by pretending like we never
83 started it in the first place.
85 uv__store_relaxed(&uv__has_forked_with_cfrunloop, 1);
86 uv__free(loop->cf_state);
87 loop->cf_state = NULL;
89 #endif /* #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 */
94 int uv__io_check_fd(uv_loop_t* loop, int fd) {
99 EV_SET(&ev, fd, EVFILT_READ, EV_ADD, 0, 0, 0);
100 if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
103 EV_SET(&ev, fd, EVFILT_READ, EV_DELETE, 0, 0, 0);
105 if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
112 void uv__io_poll(uv_loop_t* loop, int timeout) {
113 struct kevent events[1024];
115 struct timespec spec;
116 unsigned int nevents;
117 unsigned int revents;
135 if (loop->nfds == 0) {
136 assert(QUEUE_EMPTY(&loop->watcher_queue));
142 while (!QUEUE_EMPTY(&loop->watcher_queue)) {
143 q = QUEUE_HEAD(&loop->watcher_queue);
147 w = QUEUE_DATA(q, uv__io_t, watcher_queue);
148 assert(w->pevents != 0);
150 assert(w->fd < (int) loop->nwatchers);
152 if ((w->events & POLLIN) == 0 && (w->pevents & POLLIN) != 0) {
153 filter = EVFILT_READ;
157 if (w->cb == uv__fs_event) {
158 filter = EVFILT_VNODE;
159 fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
160 | NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
161 op = EV_ADD | EV_ONESHOT; /* Stop the event from firing repeatedly. */
164 EV_SET(events + nevents, w->fd, filter, op, fflags, 0, 0);
166 if (++nevents == ARRAY_SIZE(events)) {
167 if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
173 if ((w->events & POLLOUT) == 0 && (w->pevents & POLLOUT) != 0) {
174 EV_SET(events + nevents, w->fd, EVFILT_WRITE, EV_ADD, 0, 0, 0);
176 if (++nevents == ARRAY_SIZE(events)) {
177 if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
183 if ((w->events & UV__POLLPRI) == 0 && (w->pevents & UV__POLLPRI) != 0) {
184 EV_SET(events + nevents, w->fd, EV_OOBAND, EV_ADD, 0, 0, 0);
186 if (++nevents == ARRAY_SIZE(events)) {
187 if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
193 w->events = w->pevents;
197 if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
200 sigaddset(pset, SIGPROF);
203 assert(timeout >= -1);
205 count = 48; /* Benchmarks suggest this gives the best throughput. */
207 if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
209 user_timeout = timeout;
215 for (;; nevents = 0) {
216 /* Only need to set the provider_entry_time if timeout != 0. The function
217 * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
220 uv__metrics_set_provider_entry_time(loop);
223 spec.tv_sec = timeout / 1000;
224 spec.tv_nsec = (timeout % 1000) * 1000000;
228 pthread_sigmask(SIG_BLOCK, pset, NULL);
230 nfds = kevent(loop->backend_fd,
235 timeout == -1 ? NULL : &spec);
238 pthread_sigmask(SIG_UNBLOCK, pset, NULL);
240 /* Update loop->time unconditionally. It's tempting to skip the update when
241 * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
242 * operating system didn't reschedule our process while in the syscall.
244 SAVE_ERRNO(uv__update_time(loop));
247 if (reset_timeout != 0) {
248 timeout = user_timeout;
256 assert(timeout != -1);
264 if (reset_timeout != 0) {
265 timeout = user_timeout;
275 /* Interrupted by a signal. Update timeout and poll again. */
282 assert(loop->watchers != NULL);
283 loop->watchers[loop->nwatchers] = (void*) events;
284 loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
285 for (i = 0; i < nfds; i++) {
288 /* Skip invalidated events, see uv__platform_invalidate_fd */
291 w = loop->watchers[fd];
294 /* File descriptor that we've stopped watching, disarm it.
296 struct kevent events[1];
298 EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
299 if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
300 if (errno != EBADF && errno != ENOENT)
306 if (ev->filter == EVFILT_VNODE) {
307 assert(w->events == POLLIN);
308 assert(w->pevents == POLLIN);
309 uv__metrics_update_idle_time(loop);
310 w->cb(loop, w, ev->fflags); /* XXX always uv__fs_event() */
317 if (ev->filter == EVFILT_READ) {
318 if (w->pevents & POLLIN) {
320 w->rcount = ev->data;
323 struct kevent events[1];
324 EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
325 if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
329 if ((ev->flags & EV_EOF) && (w->pevents & UV__POLLRDHUP))
330 revents |= UV__POLLRDHUP;
333 if (ev->filter == EV_OOBAND) {
334 if (w->pevents & UV__POLLPRI) {
335 revents |= UV__POLLPRI;
336 w->rcount = ev->data;
339 struct kevent events[1];
340 EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
341 if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
347 if (ev->filter == EVFILT_WRITE) {
348 if (w->pevents & POLLOUT) {
350 w->wcount = ev->data;
353 struct kevent events[1];
354 EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
355 if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
361 if (ev->flags & EV_ERROR)
367 /* Run signal watchers last. This also affects child process watchers
368 * because those are implemented in terms of signal watchers.
370 if (w == &loop->signal_io_watcher) {
373 uv__metrics_update_idle_time(loop);
374 w->cb(loop, w, revents);
380 if (reset_timeout != 0) {
381 timeout = user_timeout;
385 if (have_signals != 0) {
386 uv__metrics_update_idle_time(loop);
387 loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
390 loop->watchers[loop->nwatchers] = NULL;
391 loop->watchers[loop->nwatchers + 1] = NULL;
393 if (have_signals != 0)
394 return; /* Event loop should cycle now so don't poll again. */
397 if (nfds == ARRAY_SIZE(events) && --count != 0) {
398 /* Poll for more events but don't block this time. */
414 diff = loop->time - base;
415 if (diff >= (uint64_t) timeout)
423 void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
424 struct kevent* events;
428 assert(loop->watchers != NULL);
431 events = (struct kevent*) loop->watchers[loop->nwatchers];
432 nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
436 /* Invalidate events with same file descriptor */
437 for (i = 0; i < nfds; i++)
438 if ((int) events[i].ident == fd)
439 events[i].ident = -1;
443 static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags) {
444 uv_fs_event_t* handle;
448 #if defined(F_GETPATH)
449 /* MAXPATHLEN == PATH_MAX but the former is what XNU calls it internally. */
450 char pathbuf[MAXPATHLEN];
453 handle = container_of(w, uv_fs_event_t, event_watcher);
455 if (fflags & (NOTE_ATTRIB | NOTE_EXTEND))
461 #if defined(F_GETPATH)
462 /* Also works when the file has been unlinked from the file system. Passing
463 * in the path when the file has been deleted is arguably a little strange
464 * but it's consistent with what the inotify backend does.
466 if (fcntl(handle->event_watcher.fd, F_GETPATH, pathbuf) == 0)
467 path = uv__basename_r(pathbuf);
469 handle->cb(handle, path, events, 0);
471 if (handle->event_watcher.fd == -1)
474 /* Watcher operates in one-shot mode, re-arm it. */
475 fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
476 | NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
478 EV_SET(&ev, w->fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, fflags, 0, 0);
480 if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
485 int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
486 uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
491 int uv_fs_event_start(uv_fs_event_t* handle,
494 unsigned int flags) {
496 #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
500 if (uv__is_active(handle))
504 handle->path = uv__strdup(path);
505 if (handle->path == NULL)
508 /* TODO open asynchronously - but how do we report back errors? */
509 fd = open(handle->path, O_RDONLY);
511 uv__free(handle->path);
513 return UV__ERR(errno);
516 #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
517 /* Nullify field to perform checks later */
518 handle->cf_cb = NULL;
519 handle->realpath = NULL;
520 handle->realpath_len = 0;
521 handle->cf_flags = flags;
523 if (fstat(fd, &statbuf))
525 /* FSEvents works only with directories */
526 if (!(statbuf.st_mode & S_IFDIR))
529 if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop)) {
531 /* The fallback fd is no longer needed */
532 uv__close_nocheckstdio(fd);
533 handle->event_watcher.fd = -1;
534 r = uv__fsevents_init(handle);
536 uv__handle_start(handle);
538 uv__free(handle->path);
544 #endif /* #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 */
546 uv__handle_start(handle);
547 uv__io_init(&handle->event_watcher, uv__fs_event, fd);
548 uv__io_start(handle->loop, &handle->event_watcher, POLLIN);
554 int uv_fs_event_stop(uv_fs_event_t* handle) {
558 if (!uv__is_active(handle))
561 uv__handle_stop(handle);
563 #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
564 if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop))
565 if (handle->cf_cb != NULL)
566 r = uv__fsevents_close(handle);
569 if (handle->event_watcher.fd != -1) {
570 uv__io_close(handle->loop, &handle->event_watcher);
571 uv__close(handle->event_watcher.fd);
572 handle->event_watcher.fd = -1;
575 uv__free(handle->path);
582 void uv__fs_event_close(uv_fs_event_t* handle) {
583 uv_fs_event_stop(handle);