1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 #if !defined(SUNOS_NO_IFADDRS) && _XOPEN_SOURCE < 600
32 #define SUNOS_NO_IFADDRS
35 #ifndef SUNOS_NO_IFADDRS
39 #include <net/if_dl.h>
40 #include <net/if_arp.h>
41 #include <sys/sockio.h>
43 #include <sys/loadavg.h>
52 #define PORT_FIRED 0x69
53 #define PORT_UNUSED 0x0
54 #define PORT_LOADED 0x99
55 #define PORT_DELETED -1
57 #if (!defined(_LP64)) && (_FILE_OFFSET_BITS - 0 == 64)
58 #define PROCFS_FILE_OFFSET_BITS_HACK 1
59 #undef _FILE_OFFSET_BITS
61 #define PROCFS_FILE_OFFSET_BITS_HACK 0
66 #if (PROCFS_FILE_OFFSET_BITS_HACK - 0 == 1)
67 #define _FILE_OFFSET_BITS 64
71 int uv__platform_loop_init(uv_loop_t* loop) {
76 loop->backend_fd = -1;
80 return UV__ERR(errno);
82 err = uv__cloexec(fd, 1);
87 loop->backend_fd = fd;
93 void uv__platform_loop_delete(uv_loop_t* loop) {
94 if (loop->fs_fd != -1) {
95 uv__close(loop->fs_fd);
99 if (loop->backend_fd != -1) {
100 uv__close(loop->backend_fd);
101 loop->backend_fd = -1;
106 int uv__io_fork(uv_loop_t* loop) {
107 #if defined(PORT_SOURCE_FILE)
108 if (loop->fs_fd != -1) {
109 /* stop the watcher before we blow away its fileno */
110 uv__io_stop(loop, &loop->fs_event_watcher, POLLIN);
113 uv__platform_loop_delete(loop);
114 return uv__platform_loop_init(loop);
118 void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
119 struct port_event* events;
123 assert(loop->watchers != NULL);
126 events = (struct port_event*) loop->watchers[loop->nwatchers];
127 nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
131 /* Invalidate events with same file descriptor */
132 for (i = 0; i < nfds; i++)
133 if ((int) events[i].portev_object == fd)
134 events[i].portev_object = -1;
138 int uv__io_check_fd(uv_loop_t* loop, int fd) {
139 if (port_associate(loop->backend_fd, PORT_SOURCE_FD, fd, POLLIN, 0))
140 return UV__ERR(errno);
142 if (port_dissociate(loop->backend_fd, PORT_SOURCE_FD, fd)) {
143 perror("(libuv) port_dissociate()");
151 void uv__io_poll(uv_loop_t* loop, int timeout) {
152 struct port_event events[1024];
153 struct port_event* pe;
154 struct timespec spec;
172 if (loop->nfds == 0) {
173 assert(QUEUE_EMPTY(&loop->watcher_queue));
177 while (!QUEUE_EMPTY(&loop->watcher_queue)) {
178 q = QUEUE_HEAD(&loop->watcher_queue);
182 w = QUEUE_DATA(q, uv__io_t, watcher_queue);
183 assert(w->pevents != 0);
185 if (port_associate(loop->backend_fd,
190 perror("(libuv) port_associate()");
194 w->events = w->pevents;
198 if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
201 sigaddset(pset, SIGPROF);
204 assert(timeout >= -1);
206 count = 48; /* Benchmarks suggest this gives the best throughput. */
208 if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
210 user_timeout = timeout;
217 /* Only need to set the provider_entry_time if timeout != 0. The function
218 * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
221 uv__metrics_set_provider_entry_time(loop);
224 spec.tv_sec = timeout / 1000;
225 spec.tv_nsec = (timeout % 1000) * 1000000;
228 /* Work around a kernel bug where nfds is not updated. */
229 events[0].portev_source = 0;
235 pthread_sigmask(SIG_BLOCK, pset, NULL);
237 err = port_getn(loop->backend_fd,
241 timeout == -1 ? NULL : &spec);
244 pthread_sigmask(SIG_UNBLOCK, pset, NULL);
247 /* Work around another kernel bug: port_getn() may return events even
250 if (errno == EINTR || errno == ETIME) {
253 perror("(libuv) port_getn()");
258 /* Update loop->time unconditionally. It's tempting to skip the update when
259 * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
260 * operating system didn't reschedule our process while in the syscall.
262 SAVE_ERRNO(uv__update_time(loop));
264 if (events[0].portev_source == 0) {
265 if (reset_timeout != 0) {
266 timeout = user_timeout;
280 assert(timeout != -1);
287 assert(loop->watchers != NULL);
288 loop->watchers[loop->nwatchers] = (void*) events;
289 loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
290 for (i = 0; i < nfds; i++) {
292 fd = pe->portev_object;
294 /* Skip invalidated events, see uv__platform_invalidate_fd */
299 assert((unsigned) fd < loop->nwatchers);
301 w = loop->watchers[fd];
303 /* File descriptor that we've stopped watching, ignore. */
307 /* Run signal watchers last. This also affects child process watchers
308 * because those are implemented in terms of signal watchers.
310 if (w == &loop->signal_io_watcher) {
313 uv__metrics_update_idle_time(loop);
314 w->cb(loop, w, pe->portev_events);
319 if (w != loop->watchers[fd])
320 continue; /* Disabled by callback. */
322 /* Events Ports operates in oneshot mode, rearm timer on next run. */
323 if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue))
324 QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
327 if (reset_timeout != 0) {
328 timeout = user_timeout;
332 if (have_signals != 0) {
333 uv__metrics_update_idle_time(loop);
334 loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
337 loop->watchers[loop->nwatchers] = NULL;
338 loop->watchers[loop->nwatchers + 1] = NULL;
340 if (have_signals != 0)
341 return; /* Event loop should cycle now so don't poll again. */
344 if (nfds == ARRAY_SIZE(events) && --count != 0) {
345 /* Poll for more events but don't block this time. */
352 if (saved_errno == ETIME) {
353 assert(timeout != -1);
366 diff = loop->time - base;
367 if (diff >= (uint64_t) timeout)
375 uint64_t uv__hrtime(uv_clocktype_t type) {
381 * We could use a static buffer for the path manipulations that we need outside
382 * of the function, but this function could be called by multiple consumers and
383 * we don't want to potentially create a race condition in the use of snprintf.
385 int uv_exepath(char* buffer, size_t* size) {
389 if (buffer == NULL || size == NULL || *size == 0)
392 snprintf(buf, sizeof(buf), "/proc/%lu/path/a.out", (unsigned long) getpid());
396 res = readlink(buf, buffer, res);
399 return UV__ERR(errno);
407 uint64_t uv_get_free_memory(void) {
408 return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_AVPHYS_PAGES);
412 uint64_t uv_get_total_memory(void) {
413 return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_PHYS_PAGES);
417 uint64_t uv_get_constrained_memory(void) {
418 return 0; /* Memory constraints are unknown. */
422 void uv_loadavg(double avg[3]) {
423 (void) getloadavg(avg, 3);
427 #if defined(PORT_SOURCE_FILE)
429 static int uv__fs_event_rearm(uv_fs_event_t *handle) {
430 if (handle->fd == PORT_DELETED)
433 if (port_associate(handle->loop->fs_fd,
435 (uintptr_t) &handle->fo,
436 FILE_ATTRIB | FILE_MODIFIED,
438 return UV__ERR(errno);
440 handle->fd = PORT_LOADED;
446 static void uv__fs_event_read(uv_loop_t* loop,
448 unsigned int revents) {
449 uv_fs_event_t *handle = NULL;
462 * Note that our use of port_getn() here (and not port_get()) is deliberate:
463 * there is a bug in event ports (Sun bug 6456558) whereby a zeroed timeout
464 * causes port_get() to return success instead of ETIME when there aren't
465 * actually any events (!); by using port_getn() in lieu of port_get(),
466 * we can at least workaround the bug by checking for zero returned events
467 * and treating it as we would ETIME.
470 memset(&timeout, 0, sizeof timeout);
471 r = port_getn(loop->fs_fd, &pe, 1, &n, &timeout);
473 while (r == -1 && errno == EINTR);
475 if ((r == -1 && errno == ETIME) || n == 0)
478 handle = (uv_fs_event_t*) pe.portev_user;
479 assert((r == 0) && "unexpected port_get() error");
481 if (uv__is_closing(handle)) {
482 uv__handle_stop(handle);
483 uv__make_close_pending((uv_handle_t*) handle);
488 if (pe.portev_events & (FILE_ATTRIB | FILE_MODIFIED))
490 if (pe.portev_events & ~(FILE_ATTRIB | FILE_MODIFIED))
493 handle->fd = PORT_FIRED;
494 handle->cb(handle, NULL, events, 0);
496 if (handle->fd != PORT_DELETED) {
497 r = uv__fs_event_rearm(handle);
499 handle->cb(handle, NULL, 0, r);
502 while (handle->fd != PORT_DELETED);
506 int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
507 uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
512 int uv_fs_event_start(uv_fs_event_t* handle,
515 unsigned int flags) {
520 if (uv__is_active(handle))
524 if (handle->loop->fs_fd == -1) {
525 portfd = port_create();
527 return UV__ERR(errno);
528 handle->loop->fs_fd = portfd;
532 uv__handle_start(handle);
533 handle->path = uv__strdup(path);
534 handle->fd = PORT_UNUSED;
537 memset(&handle->fo, 0, sizeof handle->fo);
538 handle->fo.fo_name = handle->path;
539 err = uv__fs_event_rearm(handle);
541 uv_fs_event_stop(handle);
546 uv__io_init(&handle->loop->fs_event_watcher, uv__fs_event_read, portfd);
547 uv__io_start(handle->loop, &handle->loop->fs_event_watcher, POLLIN);
554 static int uv__fs_event_stop(uv_fs_event_t* handle) {
557 if (!uv__is_active(handle))
560 if (handle->fd == PORT_LOADED) {
561 ret = port_dissociate(handle->loop->fs_fd,
563 (uintptr_t) &handle->fo);
566 handle->fd = PORT_DELETED;
567 uv__free(handle->path);
569 handle->fo.fo_name = NULL;
571 uv__handle_stop(handle);
576 int uv_fs_event_stop(uv_fs_event_t* handle) {
577 (void) uv__fs_event_stop(handle);
581 void uv__fs_event_close(uv_fs_event_t* handle) {
583 * If we were unable to dissociate the port here, then it is most likely
584 * that there is a pending queued event. When this happens, we don't want
585 * to complete the close as it will free the underlying memory for the
586 * handle, causing a use-after-free problem when the event is processed.
587 * We defer the final cleanup until after the event is consumed in
588 * uv__fs_event_read().
590 if (uv__fs_event_stop(handle) == 0)
591 uv__make_close_pending((uv_handle_t*) handle);
594 #else /* !defined(PORT_SOURCE_FILE) */
596 int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
601 int uv_fs_event_start(uv_fs_event_t* handle,
603 const char* filename,
604 unsigned int flags) {
609 int uv_fs_event_stop(uv_fs_event_t* handle) {
614 void uv__fs_event_close(uv_fs_event_t* handle) {
618 #endif /* defined(PORT_SOURCE_FILE) */
621 int uv_resident_set_memory(size_t* rss) {
626 fd = open("/proc/self/psinfo", O_RDONLY);
628 return UV__ERR(errno);
630 /* FIXME(bnoordhuis) Handle EINTR. */
632 if (read(fd, &psinfo, sizeof(psinfo)) == sizeof(psinfo)) {
633 *rss = (size_t)psinfo.pr_rssize * 1024;
642 int uv_uptime(double* uptime) {
647 long hz = sysconf(_SC_CLK_TCK);
653 ksp = kstat_lookup(kc, (char*) "unix", 0, (char*) "system_misc");
654 if (kstat_read(kc, ksp, NULL) == -1) {
657 knp = (kstat_named_t*) kstat_data_lookup(ksp, (char*) "clk_intr");
658 *uptime = knp->value.ul / hz;
666 int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
671 uv_cpu_info_t* cpu_info;
677 /* Get count of cpus */
679 while ((ksp = kstat_lookup(kc, (char*) "cpu_info", lookup_instance, NULL))) {
683 *cpu_infos = uv__malloc(lookup_instance * sizeof(**cpu_infos));
689 *count = lookup_instance;
691 cpu_info = *cpu_infos;
693 while ((ksp = kstat_lookup(kc, (char*) "cpu_info", lookup_instance, NULL))) {
694 if (kstat_read(kc, ksp, NULL) == -1) {
696 cpu_info->model = NULL;
698 knp = kstat_data_lookup(ksp, (char*) "clock_MHz");
699 assert(knp->data_type == KSTAT_DATA_INT32 ||
700 knp->data_type == KSTAT_DATA_INT64);
701 cpu_info->speed = (knp->data_type == KSTAT_DATA_INT32) ? knp->value.i32
704 knp = kstat_data_lookup(ksp, (char*) "brand");
705 assert(knp->data_type == KSTAT_DATA_STRING);
706 cpu_info->model = uv__strdup(KSTAT_NAMED_STR_PTR(knp));
713 cpu_info = *cpu_infos;
716 ksp = kstat_lookup(kc, (char*) "cpu", lookup_instance, (char*) "sys");
721 if (kstat_read(kc, ksp, NULL) == -1) {
722 cpu_info->cpu_times.user = 0;
723 cpu_info->cpu_times.nice = 0;
724 cpu_info->cpu_times.sys = 0;
725 cpu_info->cpu_times.idle = 0;
726 cpu_info->cpu_times.irq = 0;
728 knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_user");
729 assert(knp->data_type == KSTAT_DATA_UINT64);
730 cpu_info->cpu_times.user = knp->value.ui64;
732 knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_kernel");
733 assert(knp->data_type == KSTAT_DATA_UINT64);
734 cpu_info->cpu_times.sys = knp->value.ui64;
736 knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_idle");
737 assert(knp->data_type == KSTAT_DATA_UINT64);
738 cpu_info->cpu_times.idle = knp->value.ui64;
740 knp = kstat_data_lookup(ksp, (char*) "intr");
741 assert(knp->data_type == KSTAT_DATA_UINT64);
742 cpu_info->cpu_times.irq = knp->value.ui64;
743 cpu_info->cpu_times.nice = 0;
756 #ifdef SUNOS_NO_IFADDRS
757 int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
762 #else /* SUNOS_NO_IFADDRS */
765 * https://blogs.oracle.com/paulie/entry/retrieving_mac_address_in_solaris
766 * http://www.pauliesworld.org/project/getmac.c
768 static int uv__set_phys_addr(uv_interface_address_t* address,
769 struct ifaddrs* ent) {
771 struct sockaddr_dl* sa_addr;
774 struct arpreq arpreq;
776 /* This appears to only work as root */
777 sa_addr = (struct sockaddr_dl*)(ent->ifa_addr);
778 memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
779 for (i = 0; i < sizeof(address->phys_addr); i++) {
780 /* Check that all bytes of phys_addr are zero. */
781 if (address->phys_addr[i] != 0)
784 memset(&arpreq, 0, sizeof(arpreq));
785 if (address->address.address4.sin_family == AF_INET) {
786 struct sockaddr_in* sin = ((struct sockaddr_in*)&arpreq.arp_pa);
787 sin->sin_addr.s_addr = address->address.address4.sin_addr.s_addr;
788 } else if (address->address.address4.sin_family == AF_INET6) {
789 struct sockaddr_in6* sin = ((struct sockaddr_in6*)&arpreq.arp_pa);
790 memcpy(sin->sin6_addr.s6_addr,
791 address->address.address6.sin6_addr.s6_addr,
792 sizeof(address->address.address6.sin6_addr.s6_addr));
797 sockfd = socket(AF_INET, SOCK_DGRAM, 0);
799 return UV__ERR(errno);
801 if (ioctl(sockfd, SIOCGARP, (char*)&arpreq) == -1) {
803 return UV__ERR(errno);
805 memcpy(address->phys_addr, arpreq.arp_ha.sa_data, sizeof(address->phys_addr));
811 static int uv__ifaddr_exclude(struct ifaddrs *ent) {
812 if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
814 if (ent->ifa_addr == NULL)
816 if (ent->ifa_addr->sa_family != AF_INET &&
817 ent->ifa_addr->sa_family != AF_INET6)
822 int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
823 uv_interface_address_t* address;
824 struct ifaddrs* addrs;
830 if (getifaddrs(&addrs))
831 return UV__ERR(errno);
833 /* Count the number of interfaces */
834 for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
835 if (uv__ifaddr_exclude(ent))
845 *addresses = uv__malloc(*count * sizeof(**addresses));
851 address = *addresses;
853 for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
854 if (uv__ifaddr_exclude(ent))
857 address->name = uv__strdup(ent->ifa_name);
859 if (ent->ifa_addr->sa_family == AF_INET6) {
860 address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
862 address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
865 if (ent->ifa_netmask->sa_family == AF_INET6) {
866 address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
868 address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
871 address->is_internal = !!((ent->ifa_flags & IFF_PRIVATE) ||
872 (ent->ifa_flags & IFF_LOOPBACK));
874 uv__set_phys_addr(address, ent);
882 #endif /* SUNOS_NO_IFADDRS */
884 void uv_free_interface_addresses(uv_interface_address_t* addresses,
888 for (i = 0; i < count; i++) {
889 uv__free(addresses[i].name);
896 #if !defined(_POSIX_VERSION) || _POSIX_VERSION < 200809L
897 size_t strnlen(const char* s, size_t maxlen) {
899 end = memchr(s, '\0', maxlen);