1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #include <sys/sysctl.h>
30 #include <sys/types.h>
31 #include <sys/event.h>
39 * - Until at least FreeBSD 11.0
40 * - Older versions of Mac OS X
42 * http://www.boost.org/doc/libs/1_61_0/boost/asio/detail/kqueue_reactor.hpp
45 #define EV_OOBAND EV_FLAG1
48 static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags);
51 int uv__kqueue_init(uv_loop_t* loop) {
52 loop->backend_fd = kqueue();
53 if (loop->backend_fd == -1)
54 return UV__ERR(errno);
56 uv__cloexec(loop->backend_fd, 1);
62 #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
63 static int uv__has_forked_with_cfrunloop;
66 int uv__io_fork(uv_loop_t* loop) {
68 loop->backend_fd = -1;
69 err = uv__kqueue_init(loop);
73 #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
74 if (loop->cf_state != NULL) {
75 /* We cannot start another CFRunloop and/or thread in the child
76 process; CF aborts if you try or if you try to touch the thread
77 at all to kill it. So the best we can do is ignore it from now
78 on. This means we can't watch directories in the same way
79 anymore (like other BSDs). It also means we cannot properly
80 clean up the allocated resources; calling
81 uv__fsevents_loop_delete from uv_loop_close will crash the
82 process. So we sidestep the issue by pretending like we never
83 started it in the first place.
85 uv__store_relaxed(&uv__has_forked_with_cfrunloop, 1);
86 uv__free(loop->cf_state);
87 loop->cf_state = NULL;
89 #endif /* #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 */
94 int uv__io_check_fd(uv_loop_t* loop, int fd) {
99 EV_SET(&ev, fd, EVFILT_READ, EV_ADD, 0, 0, 0);
100 if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
103 EV_SET(&ev, fd, EVFILT_READ, EV_DELETE, 0, 0, 0);
105 if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
112 void uv__io_poll(uv_loop_t* loop, int timeout) {
113 struct kevent events[1024];
115 struct timespec spec;
116 unsigned int nevents;
117 unsigned int revents;
120 uv_process_t* process;
136 if (loop->nfds == 0) {
137 assert(QUEUE_EMPTY(&loop->watcher_queue));
143 while (!QUEUE_EMPTY(&loop->watcher_queue)) {
144 q = QUEUE_HEAD(&loop->watcher_queue);
148 w = QUEUE_DATA(q, uv__io_t, watcher_queue);
149 assert(w->pevents != 0);
151 assert(w->fd < (int) loop->nwatchers);
153 if ((w->events & POLLIN) == 0 && (w->pevents & POLLIN) != 0) {
154 filter = EVFILT_READ;
158 if (w->cb == uv__fs_event) {
159 filter = EVFILT_VNODE;
160 fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
161 | NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
162 op = EV_ADD | EV_ONESHOT; /* Stop the event from firing repeatedly. */
165 EV_SET(events + nevents, w->fd, filter, op, fflags, 0, 0);
167 if (++nevents == ARRAY_SIZE(events)) {
168 if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
174 if ((w->events & POLLOUT) == 0 && (w->pevents & POLLOUT) != 0) {
175 EV_SET(events + nevents, w->fd, EVFILT_WRITE, EV_ADD, 0, 0, 0);
177 if (++nevents == ARRAY_SIZE(events)) {
178 if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
184 if ((w->events & UV__POLLPRI) == 0 && (w->pevents & UV__POLLPRI) != 0) {
185 EV_SET(events + nevents, w->fd, EV_OOBAND, EV_ADD, 0, 0, 0);
187 if (++nevents == ARRAY_SIZE(events)) {
188 if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
194 w->events = w->pevents;
198 if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
201 sigaddset(pset, SIGPROF);
204 assert(timeout >= -1);
206 count = 48; /* Benchmarks suggest this gives the best throughput. */
208 if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
210 user_timeout = timeout;
216 for (;; nevents = 0) {
217 /* Only need to set the provider_entry_time if timeout != 0. The function
218 * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
221 uv__metrics_set_provider_entry_time(loop);
224 spec.tv_sec = timeout / 1000;
225 spec.tv_nsec = (timeout % 1000) * 1000000;
229 pthread_sigmask(SIG_BLOCK, pset, NULL);
231 nfds = kevent(loop->backend_fd,
236 timeout == -1 ? NULL : &spec);
239 pthread_sigmask(SIG_UNBLOCK, pset, NULL);
241 /* Update loop->time unconditionally. It's tempting to skip the update when
242 * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
243 * operating system didn't reschedule our process while in the syscall.
245 SAVE_ERRNO(uv__update_time(loop));
248 if (reset_timeout != 0) {
249 timeout = user_timeout;
257 assert(timeout != -1);
265 if (reset_timeout != 0) {
266 timeout = user_timeout;
276 /* Interrupted by a signal. Update timeout and poll again. */
283 assert(loop->watchers != NULL);
284 loop->watchers[loop->nwatchers] = (void*) events;
285 loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
286 for (i = 0; i < nfds; i++) {
290 /* Handle kevent NOTE_EXIT results */
291 if (ev->filter == EVFILT_PROC) {
292 QUEUE_FOREACH(q, &loop->process_handles) {
293 process = QUEUE_DATA(q, uv_process_t, queue);
294 if (process->pid == fd) {
295 process->flags |= UV_HANDLE_REAP;
296 loop->flags |= UV_LOOP_REAP_CHILDREN;
304 /* Skip invalidated events, see uv__platform_invalidate_fd */
307 w = loop->watchers[fd];
310 /* File descriptor that we've stopped watching, disarm it.
312 struct kevent events[1];
314 EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
315 if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
316 if (errno != EBADF && errno != ENOENT)
322 if (ev->filter == EVFILT_VNODE) {
323 assert(w->events == POLLIN);
324 assert(w->pevents == POLLIN);
325 uv__metrics_update_idle_time(loop);
326 w->cb(loop, w, ev->fflags); /* XXX always uv__fs_event() */
333 if (ev->filter == EVFILT_READ) {
334 if (w->pevents & POLLIN) {
336 w->rcount = ev->data;
339 struct kevent events[1];
340 EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
341 if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
345 if ((ev->flags & EV_EOF) && (w->pevents & UV__POLLRDHUP))
346 revents |= UV__POLLRDHUP;
349 if (ev->filter == EV_OOBAND) {
350 if (w->pevents & UV__POLLPRI) {
351 revents |= UV__POLLPRI;
352 w->rcount = ev->data;
355 struct kevent events[1];
356 EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
357 if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
363 if (ev->filter == EVFILT_WRITE) {
364 if (w->pevents & POLLOUT) {
366 w->wcount = ev->data;
369 struct kevent events[1];
370 EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
371 if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
377 if (ev->flags & EV_ERROR)
383 /* Run signal watchers last. This also affects child process watchers
384 * because those are implemented in terms of signal watchers.
386 if (w == &loop->signal_io_watcher) {
389 uv__metrics_update_idle_time(loop);
390 w->cb(loop, w, revents);
396 if (loop->flags & UV_LOOP_REAP_CHILDREN) {
397 loop->flags &= ~UV_LOOP_REAP_CHILDREN;
398 uv__wait_children(loop);
401 if (reset_timeout != 0) {
402 timeout = user_timeout;
406 if (have_signals != 0) {
407 uv__metrics_update_idle_time(loop);
408 loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
411 loop->watchers[loop->nwatchers] = NULL;
412 loop->watchers[loop->nwatchers + 1] = NULL;
414 if (have_signals != 0)
415 return; /* Event loop should cycle now so don't poll again. */
418 if (nfds == ARRAY_SIZE(events) && --count != 0) {
419 /* Poll for more events but don't block this time. */
435 diff = loop->time - base;
436 if (diff >= (uint64_t) timeout)
444 void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
445 struct kevent* events;
449 assert(loop->watchers != NULL);
452 events = (struct kevent*) loop->watchers[loop->nwatchers];
453 nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
457 /* Invalidate events with same file descriptor */
458 for (i = 0; i < nfds; i++)
459 if ((int) events[i].ident == fd && events[i].filter != EVFILT_PROC)
460 events[i].ident = -1;
464 static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags) {
465 uv_fs_event_t* handle;
469 #if defined(F_GETPATH)
470 /* MAXPATHLEN == PATH_MAX but the former is what XNU calls it internally. */
471 char pathbuf[MAXPATHLEN];
474 handle = container_of(w, uv_fs_event_t, event_watcher);
476 if (fflags & (NOTE_ATTRIB | NOTE_EXTEND))
482 #if defined(F_GETPATH)
483 /* Also works when the file has been unlinked from the file system. Passing
484 * in the path when the file has been deleted is arguably a little strange
485 * but it's consistent with what the inotify backend does.
487 if (fcntl(handle->event_watcher.fd, F_GETPATH, pathbuf) == 0)
488 path = uv__basename_r(pathbuf);
490 handle->cb(handle, path, events, 0);
492 if (handle->event_watcher.fd == -1)
495 /* Watcher operates in one-shot mode, re-arm it. */
496 fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
497 | NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
499 EV_SET(&ev, w->fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, fflags, 0, 0);
501 if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
506 int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
507 uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
512 int uv_fs_event_start(uv_fs_event_t* handle,
515 unsigned int flags) {
517 #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
521 if (uv__is_active(handle))
525 handle->path = uv__strdup(path);
526 if (handle->path == NULL)
529 /* TODO open asynchronously - but how do we report back errors? */
530 fd = open(handle->path, O_RDONLY);
532 uv__free(handle->path);
534 return UV__ERR(errno);
537 #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
538 /* Nullify field to perform checks later */
539 handle->cf_cb = NULL;
540 handle->realpath = NULL;
541 handle->realpath_len = 0;
542 handle->cf_flags = flags;
544 if (fstat(fd, &statbuf))
546 /* FSEvents works only with directories */
547 if (!(statbuf.st_mode & S_IFDIR))
550 if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop)) {
552 /* The fallback fd is no longer needed */
553 uv__close_nocheckstdio(fd);
554 handle->event_watcher.fd = -1;
555 r = uv__fsevents_init(handle);
557 uv__handle_start(handle);
559 uv__free(handle->path);
565 #endif /* #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 */
567 uv__handle_start(handle);
568 uv__io_init(&handle->event_watcher, uv__fs_event, fd);
569 uv__io_start(handle->loop, &handle->event_watcher, POLLIN);
575 int uv_fs_event_stop(uv_fs_event_t* handle) {
579 if (!uv__is_active(handle))
582 uv__handle_stop(handle);
584 #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
585 if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop))
586 if (handle->cf_cb != NULL)
587 r = uv__fsevents_close(handle);
590 if (handle->event_watcher.fd != -1) {
591 uv__io_close(handle->loop, &handle->event_watcher);
592 uv__close(handle->event_watcher.fd);
593 handle->event_watcher.fd = -1;
596 uv__free(handle->path);
603 void uv__fs_event_close(uv_fs_event_t* handle) {
604 uv_fs_event_stop(handle);