1 /* Copyright libuv project contributors. All rights reserved.
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 #include <sys/ioctl.h>
31 #include <sys/resource.h>
33 #if defined(__clang__)
36 #include "//'SYS1.SAMPLIB(CSRSIC)'"
41 #define CSD_OFFSET 0x294
44 Long-term average CPU service used by this logical partition,
45 in millions of service units per hour. If this value is above
46 the partition's defined capacity, the partition will be capped.
47 It is calculated using the physical CPU adjustment factor
48 (RCTPCPUA) so it may not match other measures of service which
49 are based on the logical CPU adjustment factor. It is available
50 if the hardware supports LPAR cluster.
52 #define RCTLACS_OFFSET 0xC4
54 /* 32-bit count of alive CPUs. This includes both CPs and IFAs */
55 #define CSD_NUMBER_ONLINE_CPUS 0xD4
57 /* Address of system resources manager (SRM) control table */
58 #define CVTOPCTP_OFFSET 0x25C
60 /* Address of the RCT table */
61 #define RMCTRCT_OFFSET 0xE4
63 /* Address of the rsm control and enumeration area. */
64 #define CVTRCEP_OFFSET 0x490
66 /* Total number of frames currently on all available frame queues. */
67 #define RCEAFC_OFFSET 0x088
69 /* CPC model length from the CSRSI Service. */
70 #define CPCMODEL_LENGTH 16
72 /* Pointer to the home (current) ASCB. */
75 /* Pointer to rsm address space block extension. */
76 #define ASCBRSME 0x16C
79 NUMBER OF FRAMES CURRENTLY IN USE BY THIS ADDRESS SPACE.
80 It does not include 2G frames.
84 /* Thread Entry constants */
85 #define PGTH_CURRENT 1
87 #define PGTHAPATH 0x20
88 #pragma linkage(BPX4GTH, OS)
89 #pragma linkage(BPX1GTH, OS)
91 /* TOD Clock resolution in nanoseconds */
94 typedef unsigned data_area_ptr_assign_type;
99 data_area_ptr_assign_type lower;
101 data_area_ptr_assign_type assign;
107 void uv_loadavg(double avg[3]) {
108 /* TODO: implement the following */
115 int uv__platform_loop_init(uv_loop_t* loop) {
118 ep = epoll_create1(0);
121 return UV__ERR(errno);
127 void uv__platform_loop_delete(uv_loop_t* loop) {
128 if (loop->ep != NULL) {
129 epoll_queue_close(loop->ep);
135 uint64_t uv__hrtime(uv_clocktype_t type) {
136 unsigned long long timestamp;
138 /* Convert to nanoseconds */
139 return timestamp / TOD_RES;
143 static int getexe(char* buf, size_t len) {
144 return uv__strscpy(buf, __getargv()[0], len);
149 * We could use a static buffer for the path manipulations that we need outside
150 * of the function, but this function could be called by multiple consumers and
151 * we don't want to potentially create a race condition in the use of snprintf.
152 * There is no direct way of getting the exe path in zOS - either through /procfs
153 * or through some libc APIs. The below approach is to parse the argv[0]'s pattern
154 * and use it in conjunction with PATH environment variable to craft one.
156 int uv_exepath(char* buffer, size_t* size) {
161 if (buffer == NULL || size == NULL || *size == 0)
164 res = getexe(args, sizeof(args));
168 return uv__search_path(args, buffer, size);
172 uint64_t uv_get_free_memory(void) {
175 data_area_ptr cvt = {0};
176 data_area_ptr rcep = {0};
177 cvt.assign = *(data_area_ptr_assign_type*)(CVT_PTR);
178 rcep.assign = *(data_area_ptr_assign_type*)(cvt.deref + CVTRCEP_OFFSET);
179 freeram = (uint64_t)*((uint32_t*)(rcep.deref + RCEAFC_OFFSET)) * 4096;
184 uint64_t uv_get_total_memory(void) {
185 /* Use CVTRLSTG to get the size of actual real storage online at IPL in K. */
186 return (uint64_t)((int)((char *__ptr32 *__ptr32 *)0)[4][214]) * 1024;
190 uint64_t uv_get_constrained_memory(void) {
193 /* RLIMIT_MEMLIMIT return value is in megabytes rather than bytes. */
194 if (getrlimit(RLIMIT_MEMLIMIT, &rl) == 0)
195 return rl.rlim_cur * 1024 * 1024;
197 return 0; /* There is no memory limit set. */
201 int uv_resident_set_memory(size_t* rss) {
206 ascb = *(char* __ptr32 *)(PSA_PTR + PSAAOLD);
207 rax = *(char* __ptr32 *)(ascb + ASCBRSME);
208 nframes = *(unsigned int*)(rax + RAXFMCT);
210 *rss = nframes * sysconf(_SC_PAGESIZE);
215 int uv_uptime(double* uptime) {
220 u.ut_type = BOOT_TIME;
224 *uptime = difftime64(time64(&t), v->ut_tv.tv_sec);
229 int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
230 uv_cpu_info_t* cpu_info;
233 data_area_ptr cvt = {0};
234 data_area_ptr csd = {0};
235 data_area_ptr rmctrct = {0};
236 data_area_ptr cvtopctp = {0};
239 cvt.assign = *(data_area_ptr_assign_type*)(CVT_PTR);
241 csd.assign = *((data_area_ptr_assign_type *) (cvt.deref + CSD_OFFSET));
242 cvtopctp.assign = *((data_area_ptr_assign_type *) (cvt.deref + CVTOPCTP_OFFSET));
243 rmctrct.assign = *((data_area_ptr_assign_type *) (cvtopctp.deref + RMCTRCT_OFFSET));
245 *count = *((int*) (csd.deref + CSD_NUMBER_ONLINE_CPUS));
246 cpu_usage_avg = *((unsigned short int*) (rmctrct.deref + RCTLACS_OFFSET));
248 *cpu_infos = uv__malloc(*count * sizeof(uv_cpu_info_t));
252 cpu_info = *cpu_infos;
254 while (idx < *count) {
255 cpu_info->speed = *(int*)(info.siv1v2si22v1.si22v1cpucapability);
256 cpu_info->model = uv__malloc(CPCMODEL_LENGTH + 1);
257 memset(cpu_info->model, '\0', CPCMODEL_LENGTH + 1);
258 memcpy(cpu_info->model, info.siv1v2si11v1.si11v1cpcmodel, CPCMODEL_LENGTH);
259 cpu_info->cpu_times.user = cpu_usage_avg;
260 /* TODO: implement the following */
261 cpu_info->cpu_times.sys = 0;
262 cpu_info->cpu_times.idle = 0;
263 cpu_info->cpu_times.irq = 0;
264 cpu_info->cpu_times.nice = 0;
273 static int uv__interface_addresses_v6(uv_interface_address_t** addresses,
275 uv_interface_address_t* address;
278 __net_ifconf6header_t ifc;
279 __net_ifconf6entry_t* ifr;
280 __net_ifconf6entry_t* p;
283 unsigned char netmask[16] = {0};
286 /* Assume maximum buffer size allowable */
289 if (0 > (sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP)))
290 return UV__ERR(errno);
292 ifc.__nif6h_buffer = uv__calloc(1, maxsize);
294 if (ifc.__nif6h_buffer == NULL) {
299 ifc.__nif6h_version = 1;
300 ifc.__nif6h_buflen = maxsize;
302 if (ioctl(sockfd, SIOCGIFCONF6, &ifc) == -1) {
303 /* This will error on a system that does not support IPv6. However, we want
304 * to treat this as there being 0 interfaces so we can continue to get IPv4
305 * interfaces in uv_interface_addresses(). So return 0 instead of the error.
307 uv__free(ifc.__nif6h_buffer);
313 ifr = (__net_ifconf6entry_t*)(ifc.__nif6h_buffer);
314 while ((char*)ifr < (char*)ifc.__nif6h_buffer + ifc.__nif6h_buflen) {
316 ifr = (__net_ifconf6entry_t*)((char*)ifr + ifc.__nif6h_entrylen);
318 if (!(p->__nif6e_addr.sin6_family == AF_INET6))
321 if (!(p->__nif6e_flags & _NIF6E_FLAGS_ON_LINK_ACTIVE))
328 uv__free(ifc.__nif6h_buffer);
333 /* Alloc the return interface structs */
334 *addresses = uv__calloc(1, *count * sizeof(uv_interface_address_t));
336 uv__free(ifc.__nif6h_buffer);
340 address = *addresses;
343 ifr = (__net_ifconf6entry_t*)(ifc.__nif6h_buffer);
344 while ((char*)ifr < (char*)ifc.__nif6h_buffer + ifc.__nif6h_buflen) {
346 ifr = (__net_ifconf6entry_t*)((char*)ifr + ifc.__nif6h_entrylen);
348 if (!(p->__nif6e_addr.sin6_family == AF_INET6))
351 if (!(p->__nif6e_flags & _NIF6E_FLAGS_ON_LINK_ACTIVE))
354 /* All conditions above must match count loop */
357 /* Ignore EBCDIC space (0x40) padding in name */
358 while (i < ARRAY_SIZE(p->__nif6e_name) &&
359 p->__nif6e_name[i] != 0x40 &&
360 p->__nif6e_name[i] != 0)
362 address->name = uv__malloc(i + 1);
363 if (address->name == NULL) {
364 uv_free_interface_addresses(*addresses, count_names);
365 uv__free(ifc.__nif6h_buffer);
369 memcpy(address->name, p->__nif6e_name, i);
370 address->name[i] = '\0';
371 __e2a_s(address->name);
374 address->address.address6 = *((struct sockaddr_in6*) &p->__nif6e_addr);
376 for (i = 0; i < (p->__nif6e_prefixlen / 8); i++)
379 if (p->__nif6e_prefixlen % 8)
380 netmask[i] = 0xFF << (8 - (p->__nif6e_prefixlen % 8));
382 address->netmask.netmask6.sin6_len = p->__nif6e_prefixlen;
383 memcpy(&(address->netmask.netmask6.sin6_addr), netmask, 16);
384 address->netmask.netmask6.sin6_family = AF_INET6;
386 address->is_internal = p->__nif6e_flags & _NIF6E_FLAGS_LOOPBACK ? 1 : 0;
390 uv__free(ifc.__nif6h_buffer);
396 int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
397 uv_interface_address_t* address;
404 uv_interface_address_t* addresses_v6;
413 /* get the ipv6 addresses first */
414 if ((rc = uv__interface_addresses_v6(&addresses_v6, &count_v6)) != 0)
417 /* now get the ipv4 addresses */
419 /* Assume maximum buffer size allowable */
422 sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
425 uv_free_interface_addresses(addresses_v6, count_v6);
426 return UV__ERR(errno);
429 ifc.ifc_req = uv__calloc(1, maxsize);
431 if (ifc.ifc_req == NULL) {
433 uv_free_interface_addresses(addresses_v6, count_v6);
438 ifc.ifc_len = maxsize;
440 if (ioctl(sockfd, SIOCGIFCONF, &ifc) == -1) {
442 uv_free_interface_addresses(addresses_v6, count_v6);
443 uv__free(ifc.ifc_req);
445 return UV__ERR(errno);
448 #define MAX(a,b) (((a)>(b))?(a):(b))
449 #define ADDR_SIZE(p) MAX((p).sa_len, sizeof(p))
451 /* Count all up and running ipv4/ipv6 addresses */
453 while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
455 ifr = (struct ifreq*)
456 ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
458 if (!(p->ifr_addr.sa_family == AF_INET6 ||
459 p->ifr_addr.sa_family == AF_INET))
462 memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
463 if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) {
465 uv_free_interface_addresses(addresses_v6, count_v6);
466 uv__free(ifc.ifc_req);
468 return UV__ERR(errno);
471 if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING))
477 if (*count == 0 && count_v6 == 0) {
478 uv__free(ifc.ifc_req);
483 /* Alloc the return interface structs */
484 *addresses = uv__calloc(1, (*count + count_v6) *
485 sizeof(uv_interface_address_t));
489 uv_free_interface_addresses(addresses_v6, count_v6);
490 uv__free(ifc.ifc_req);
494 address = *addresses;
496 /* copy over the ipv6 addresses if any are found */
498 memcpy(address, addresses_v6, count_v6 * sizeof(uv_interface_address_t));
501 /* free ipv6 addresses, but keep address names */
502 uv__free(addresses_v6);
505 count_names = *count;
507 while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
509 ifr = (struct ifreq*)
510 ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
512 if (!(p->ifr_addr.sa_family == AF_INET6 ||
513 p->ifr_addr.sa_family == AF_INET))
516 memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
517 if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) {
518 uv_free_interface_addresses(*addresses, count_names);
519 uv__free(ifc.ifc_req);
524 if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING))
527 /* All conditions above must match count loop */
530 /* Ignore EBCDIC space (0x40) padding in name */
531 while (i < ARRAY_SIZE(p->ifr_name) &&
532 p->ifr_name[i] != 0x40 &&
535 address->name = uv__malloc(i + 1);
536 if (address->name == NULL) {
537 uv_free_interface_addresses(*addresses, count_names);
538 uv__free(ifc.ifc_req);
542 memcpy(address->name, p->ifr_name, i);
543 address->name[i] = '\0';
544 __e2a_s(address->name);
547 address->address.address4 = *((struct sockaddr_in*) &p->ifr_addr);
549 if (ioctl(sockfd, SIOCGIFNETMASK, p) == -1) {
550 uv_free_interface_addresses(*addresses, count_names);
551 uv__free(ifc.ifc_req);
553 return UV__ERR(errno);
556 address->netmask.netmask4 = *((struct sockaddr_in*) &p->ifr_addr);
557 address->netmask.netmask4.sin_family = AF_INET;
558 address->is_internal = flg.ifr_flags & IFF_LOOPBACK ? 1 : 0;
565 uv__free(ifc.ifc_req);
571 void uv_free_interface_addresses(uv_interface_address_t* addresses,
574 for (i = 0; i < count; ++i)
575 uv__free(addresses[i].name);
580 void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
581 struct epoll_event* events;
582 struct epoll_event dummy;
586 assert(loop->watchers != NULL);
589 events = (struct epoll_event*) loop->watchers[loop->nwatchers];
590 nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
592 /* Invalidate events with same file descriptor */
593 for (i = 0; i < nfds; i++)
594 if ((int) events[i].fd == fd)
597 /* Remove the file descriptor from the epoll. */
598 if (loop->ep != NULL)
599 epoll_ctl(loop->ep, EPOLL_CTL_DEL, fd, &dummy);
603 int uv__io_check_fd(uv_loop_t* loop, int fd) {
608 p[0].events = POLLIN;
612 while (rv == -1 && errno == EINTR);
617 if (p[0].revents & POLLNVAL)
624 int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
625 uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
630 static int os390_regfileint(uv_fs_event_t* handle, char* path) {
635 ep = handle->loop->ep;
636 assert(ep->msg_queue != -1);
638 reg_struct.__rfis_cmd = _RFIS_REG;
639 reg_struct.__rfis_qid = ep->msg_queue;
640 reg_struct.__rfis_type = 1;
641 memcpy(reg_struct.__rfis_utok, &handle, sizeof(handle));
643 rc = __w_pioctl(path, _IOCC_REGFILEINT, sizeof(reg_struct), ®_struct);
645 return UV__ERR(errno);
647 memcpy(handle->rfis_rftok, reg_struct.__rfis_rftok,
648 sizeof(handle->rfis_rftok));
654 int uv_fs_event_start(uv_fs_event_t* handle, uv_fs_event_cb cb,
655 const char* filename, unsigned int flags) {
659 if (uv__is_active(handle))
662 path = uv__strdup(filename);
666 rc = os390_regfileint(handle, path);
672 uv__handle_start(handle);
680 int uv__fs_event_stop(uv_fs_event_t* handle) {
685 if (!uv__is_active(handle))
688 ep = handle->loop->ep;
689 assert(ep->msg_queue != -1);
691 reg_struct.__rfis_cmd = _RFIS_UNREG;
692 reg_struct.__rfis_qid = ep->msg_queue;
693 reg_struct.__rfis_type = 1;
694 memcpy(reg_struct.__rfis_rftok, handle->rfis_rftok,
695 sizeof(handle->rfis_rftok));
698 * This call will take "/" as the path argument in case we
699 * don't care to supply the correct path. The system will simply
702 rc = __w_pioctl("/", _IOCC_REGFILEINT, sizeof(reg_struct), ®_struct);
703 if (rc != 0 && errno != EALREADY && errno != ENOENT)
706 if (handle->path != NULL) {
707 uv__free(handle->path);
711 if (rc != 0 && errno == EALREADY)
714 uv__handle_stop(handle);
720 int uv_fs_event_stop(uv_fs_event_t* handle) {
721 uv__fs_event_stop(handle);
726 void uv__fs_event_close(uv_fs_event_t* handle) {
728 * If we were unable to unregister file interest here, then it is most likely
729 * that there is a pending queued change notification. When this happens, we
730 * don't want to complete the close as it will free the underlying memory for
731 * the handle, causing a use-after-free problem when the event is processed.
732 * We defer the final cleanup until after the event is consumed in
733 * os390_message_queue_handler().
735 if (uv__fs_event_stop(handle) == 0)
736 uv__make_close_pending((uv_handle_t*) handle);
740 static int os390_message_queue_handler(uv__os390_epoll* ep) {
741 uv_fs_event_t* handle;
746 if (ep->msg_queue == -1)
749 msglen = msgrcv(ep->msg_queue, &msg, sizeof(msg), 0, IPC_NOWAIT);
751 if (msglen == -1 && errno == ENOMSG)
758 if (msg.__rfim_event == _RFIM_ATTR || msg.__rfim_event == _RFIM_WRITE)
760 else if (msg.__rfim_event == _RFIM_RENAME || msg.__rfim_event == _RFIM_UNLINK)
762 else if (msg.__rfim_event == 156)
763 /* TODO(gabylb): zos - this event should not happen, need to investigate.
765 * This event seems to occur when the watched file is [re]moved, or an
766 * editor (like vim) renames then creates the file on save (for vim, that's
767 * when backupcopy=no|auto).
771 /* Some event that we are not interested in. */
774 /* `__rfim_utok` is treated as text when it should be treated as binary while
775 * running in ASCII mode, resulting in an unwanted autoconversion.
777 __a2e_l(msg.__rfim_utok, sizeof(msg.__rfim_utok));
778 handle = *(uv_fs_event_t**)(msg.__rfim_utok);
779 assert(handle != NULL);
781 assert((handle->flags & UV_HANDLE_CLOSED) == 0);
782 if (uv__is_closing(handle)) {
783 uv__handle_stop(handle);
784 uv__make_close_pending((uv_handle_t*) handle);
786 } else if (handle->path == NULL) {
787 /* _RFIS_UNREG returned EALREADY. */
788 uv__handle_stop(handle);
792 /* The file is implicitly unregistered when the change notification is
793 * sent, only one notification is sent per registration. So we need to
794 * re-register interest in a file after each change notification we
797 assert(handle->path != NULL);
798 os390_regfileint(handle, handle->path);
799 handle->cb(handle, uv__basename_r(handle->path), events, 0);
804 void uv__io_poll(uv_loop_t* loop, int timeout) {
805 static const int max_safe_timeout = 1789569;
806 struct epoll_event events[1024];
807 struct epoll_event* pe;
808 struct epoll_event e;
823 if (loop->nfds == 0) {
824 assert(QUEUE_EMPTY(&loop->watcher_queue));
828 while (!QUEUE_EMPTY(&loop->watcher_queue)) {
831 q = QUEUE_HEAD(&loop->watcher_queue);
834 w = QUEUE_DATA(q, uv__io_t, watcher_queue);
836 assert(w->pevents != 0);
839 stream= container_of(w, uv_stream_t, io_watcher);
841 assert(w->fd < (int) loop->nwatchers);
843 e.events = w->pevents;
851 /* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
852 * events, skip the syscall and squelch the events after epoll_wait().
854 if (epoll_ctl(loop->ep, op, w->fd, &e)) {
858 assert(op == EPOLL_CTL_ADD);
860 /* We've reactivated a file descriptor that's been watched before. */
861 if (epoll_ctl(loop->ep, EPOLL_CTL_MOD, w->fd, &e))
865 w->events = w->pevents;
868 assert(timeout >= -1);
870 count = 48; /* Benchmarks suggest this gives the best throughput. */
871 real_timeout = timeout;
875 if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
877 user_timeout = timeout;
885 /* Only need to set the provider_entry_time if timeout != 0. The function
886 * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
889 uv__metrics_set_provider_entry_time(loop);
891 if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
892 timeout = max_safe_timeout;
894 nfds = epoll_wait(loop->ep, events,
895 ARRAY_SIZE(events), timeout);
897 /* Update loop->time unconditionally. It's tempting to skip the update when
898 * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
899 * operating system didn't reschedule our process while in the syscall.
902 SAVE_ERRNO(uv__update_time(loop));
904 assert(timeout != -1);
906 if (reset_timeout != 0) {
907 timeout = user_timeout;
917 /* We may have been inside the system call for longer than |timeout|
918 * milliseconds so we need to update the timestamp to avoid drift.
928 if (reset_timeout != 0) {
929 timeout = user_timeout;
939 /* Interrupted by a signal. Update timeout and poll again. */
944 assert(loop->watchers != NULL);
945 loop->watchers[loop->nwatchers] = (void*) events;
946 loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
947 for (i = 0; i < nfds; i++) {
951 /* Skip invalidated events, see uv__platform_invalidate_fd */
957 os390_message_queue_handler(ep);
963 assert((unsigned) fd < loop->nwatchers);
965 w = loop->watchers[fd];
968 /* File descriptor that we've stopped watching, disarm it.
970 * Ignore all errors because we may be racing with another thread
971 * when the file descriptor is closed.
973 epoll_ctl(loop->ep, EPOLL_CTL_DEL, fd, pe);
977 /* Give users only events they're interested in. Prevents spurious
978 * callbacks when previous callback invocation in this loop has stopped
979 * the current watcher. Also, filters out events that users has not
980 * requested us to watch.
982 pe->events &= w->pevents | POLLERR | POLLHUP;
984 if (pe->events == POLLERR || pe->events == POLLHUP)
985 pe->events |= w->pevents & (POLLIN | POLLOUT);
987 if (pe->events != 0) {
988 /* Run signal watchers last. This also affects child process watchers
989 * because those are implemented in terms of signal watchers.
991 if (w == &loop->signal_io_watcher) {
994 uv__metrics_update_idle_time(loop);
995 w->cb(loop, w, pe->events);
1001 if (reset_timeout != 0) {
1002 timeout = user_timeout;
1006 if (have_signals != 0) {
1007 uv__metrics_update_idle_time(loop);
1008 loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
1011 loop->watchers[loop->nwatchers] = NULL;
1012 loop->watchers[loop->nwatchers + 1] = NULL;
1014 if (have_signals != 0)
1015 return; /* Event loop should cycle now so don't poll again. */
1018 if (nfds == ARRAY_SIZE(events) && --count != 0) {
1019 /* Poll for more events but don't block this time. */
1033 assert(timeout > 0);
1035 real_timeout -= (loop->time - base);
1036 if (real_timeout <= 0)
1039 timeout = real_timeout;
1044 int uv__io_fork(uv_loop_t* loop) {
1046 Nullify the msg queue but don't close it because
1047 it is still being used by the parent.
1051 return uv__platform_loop_init(loop);