1 /* Copyright libuv project contributors. All rights reserved.
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 #include <sys/ioctl.h>
31 #include <sys/resource.h>
33 #if defined(__clang__)
36 #include "//'SYS1.SAMPLIB(CSRSIC)'"
41 #define CSD_OFFSET 0x294
44 Long-term average CPU service used by this logical partition,
45 in millions of service units per hour. If this value is above
46 the partition's defined capacity, the partition will be capped.
47 It is calculated using the physical CPU adjustment factor
48 (RCTPCPUA) so it may not match other measures of service which
49 are based on the logical CPU adjustment factor. It is available
50 if the hardware supports LPAR cluster.
52 #define RCTLACS_OFFSET 0xC4
54 /* 32-bit count of alive CPUs. This includes both CPs and IFAs */
55 #define CSD_NUMBER_ONLINE_CPUS 0xD4
57 /* Address of system resources manager (SRM) control table */
58 #define CVTOPCTP_OFFSET 0x25C
60 /* Address of the RCT table */
61 #define RMCTRCT_OFFSET 0xE4
63 /* Address of the rsm control and enumeration area. */
64 #define CVTRCEP_OFFSET 0x490
66 /* Total number of frames currently on all available frame queues. */
67 #define RCEAFC_OFFSET 0x088
69 /* CPC model length from the CSRSI Service. */
70 #define CPCMODEL_LENGTH 16
72 /* Pointer to the home (current) ASCB. */
75 /* Pointer to rsm address space block extension. */
76 #define ASCBRSME 0x16C
79 NUMBER OF FRAMES CURRENTLY IN USE BY THIS ADDRESS SPACE.
80 It does not include 2G frames.
84 /* Thread Entry constants */
85 #define PGTH_CURRENT 1
87 #define PGTHAPATH 0x20
88 #pragma linkage(BPX4GTH, OS)
89 #pragma linkage(BPX1GTH, OS)
91 /* TOD Clock resolution in nanoseconds */
94 typedef unsigned data_area_ptr_assign_type;
99 data_area_ptr_assign_type lower;
101 data_area_ptr_assign_type assign;
107 void uv_loadavg(double avg[3]) {
108 /* TODO: implement the following */
115 int uv__platform_loop_init(uv_loop_t* loop) {
118 ep = epoll_create1(0);
121 return UV__ERR(errno);
127 void uv__platform_loop_delete(uv_loop_t* loop) {
128 if (loop->ep != NULL) {
129 epoll_queue_close(loop->ep);
135 uint64_t uv__hrtime(uv_clocktype_t type) {
136 unsigned long long timestamp;
138 /* Convert to nanoseconds */
139 return timestamp / TOD_RES;
143 static int getexe(char* buf, size_t len) {
144 return uv__strscpy(buf, __getargv()[0], len);
149 * We could use a static buffer for the path manipulations that we need outside
150 * of the function, but this function could be called by multiple consumers and
151 * we don't want to potentially create a race condition in the use of snprintf.
152 * There is no direct way of getting the exe path in zOS - either through /procfs
153 * or through some libc APIs. The below approach is to parse the argv[0]'s pattern
154 * and use it in conjunction with PATH environment variable to craft one.
156 int uv_exepath(char* buffer, size_t* size) {
161 if (buffer == NULL || size == NULL || *size == 0)
164 res = getexe(args, sizeof(args));
168 return uv__search_path(args, buffer, size);
172 uint64_t uv_get_free_memory(void) {
175 data_area_ptr cvt = {0};
176 data_area_ptr rcep = {0};
177 cvt.assign = *(data_area_ptr_assign_type*)(CVT_PTR);
178 rcep.assign = *(data_area_ptr_assign_type*)(cvt.deref + CVTRCEP_OFFSET);
179 freeram = (uint64_t)*((uint32_t*)(rcep.deref + RCEAFC_OFFSET)) * 4096;
184 uint64_t uv_get_total_memory(void) {
185 /* Use CVTRLSTG to get the size of actual real storage online at IPL in K. */
186 return (uint64_t)((int)((char *__ptr32 *__ptr32 *)0)[4][214]) * 1024;
190 uint64_t uv_get_constrained_memory(void) {
193 /* RLIMIT_MEMLIMIT return value is in megabytes rather than bytes. */
194 if (getrlimit(RLIMIT_MEMLIMIT, &rl) == 0)
195 return rl.rlim_cur * 1024 * 1024;
197 return 0; /* There is no memory limit set. */
201 int uv_resident_set_memory(size_t* rss) {
206 ascb = *(char* __ptr32 *)(PSA_PTR + PSAAOLD);
207 rax = *(char* __ptr32 *)(ascb + ASCBRSME);
208 nframes = *(unsigned int*)(rax + RAXFMCT);
210 *rss = nframes * sysconf(_SC_PAGESIZE);
215 int uv_uptime(double* uptime) {
220 u.ut_type = BOOT_TIME;
224 *uptime = difftime64(time64(&t), v->ut_tv.tv_sec);
229 int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
230 uv_cpu_info_t* cpu_info;
233 data_area_ptr cvt = {0};
234 data_area_ptr csd = {0};
235 data_area_ptr rmctrct = {0};
236 data_area_ptr cvtopctp = {0};
239 cvt.assign = *(data_area_ptr_assign_type*)(CVT_PTR);
241 csd.assign = *((data_area_ptr_assign_type *) (cvt.deref + CSD_OFFSET));
242 cvtopctp.assign = *((data_area_ptr_assign_type *) (cvt.deref + CVTOPCTP_OFFSET));
243 rmctrct.assign = *((data_area_ptr_assign_type *) (cvtopctp.deref + RMCTRCT_OFFSET));
245 *count = *((int*) (csd.deref + CSD_NUMBER_ONLINE_CPUS));
246 cpu_usage_avg = *((unsigned short int*) (rmctrct.deref + RCTLACS_OFFSET));
248 *cpu_infos = uv__malloc(*count * sizeof(uv_cpu_info_t));
252 cpu_info = *cpu_infos;
254 while (idx < *count) {
255 cpu_info->speed = *(int*)(info.siv1v2si22v1.si22v1cpucapability);
256 cpu_info->model = uv__malloc(CPCMODEL_LENGTH + 1);
257 memset(cpu_info->model, '\0', CPCMODEL_LENGTH + 1);
258 memcpy(cpu_info->model, info.siv1v2si11v1.si11v1cpcmodel, CPCMODEL_LENGTH);
259 cpu_info->cpu_times.user = cpu_usage_avg;
260 /* TODO: implement the following */
261 cpu_info->cpu_times.sys = 0;
262 cpu_info->cpu_times.idle = 0;
263 cpu_info->cpu_times.irq = 0;
264 cpu_info->cpu_times.nice = 0;
273 static int uv__interface_addresses_v6(uv_interface_address_t** addresses,
275 uv_interface_address_t* address;
278 __net_ifconf6header_t ifc;
279 __net_ifconf6entry_t* ifr;
280 __net_ifconf6entry_t* p;
281 __net_ifconf6entry_t flg;
284 /* Assume maximum buffer size allowable */
287 if (0 > (sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP)))
288 return UV__ERR(errno);
290 ifc.__nif6h_version = 1;
291 ifc.__nif6h_buflen = maxsize;
292 ifc.__nif6h_buffer = uv__calloc(1, maxsize);;
294 if (ioctl(sockfd, SIOCGIFCONF6, &ifc) == -1) {
296 return UV__ERR(errno);
301 ifr = (__net_ifconf6entry_t*)(ifc.__nif6h_buffer);
302 while ((char*)ifr < (char*)ifc.__nif6h_buffer + ifc.__nif6h_buflen) {
304 ifr = (__net_ifconf6entry_t*)((char*)ifr + ifc.__nif6h_entrylen);
306 if (!(p->__nif6e_addr.sin6_family == AF_INET6 ||
307 p->__nif6e_addr.sin6_family == AF_INET))
310 if (!(p->__nif6e_flags & _NIF6E_FLAGS_ON_LINK_ACTIVE))
316 /* Alloc the return interface structs */
317 *addresses = uv__malloc(*count * sizeof(uv_interface_address_t));
322 address = *addresses;
324 ifr = (__net_ifconf6entry_t*)(ifc.__nif6h_buffer);
325 while ((char*)ifr < (char*)ifc.__nif6h_buffer + ifc.__nif6h_buflen) {
327 ifr = (__net_ifconf6entry_t*)((char*)ifr + ifc.__nif6h_entrylen);
329 if (!(p->__nif6e_addr.sin6_family == AF_INET6 ||
330 p->__nif6e_addr.sin6_family == AF_INET))
333 if (!(p->__nif6e_flags & _NIF6E_FLAGS_ON_LINK_ACTIVE))
336 /* All conditions above must match count loop */
338 address->name = uv__strdup(p->__nif6e_name);
340 if (p->__nif6e_addr.sin6_family == AF_INET6)
341 address->address.address6 = *((struct sockaddr_in6*) &p->__nif6e_addr);
343 address->address.address4 = *((struct sockaddr_in*) &p->__nif6e_addr);
345 /* TODO: Retrieve netmask using SIOCGIFNETMASK ioctl */
347 address->is_internal = flg.__nif6e_flags & _NIF6E_FLAGS_LOOPBACK ? 1 : 0;
348 memset(address->phys_addr, 0, sizeof(address->phys_addr));
357 int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
358 uv_interface_address_t* address;
370 /* get the ipv6 addresses first */
371 uv_interface_address_t* addresses_v6;
372 uv__interface_addresses_v6(&addresses_v6, &count_v6);
374 /* now get the ipv4 addresses */
376 /* Assume maximum buffer size allowable */
379 sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
381 return UV__ERR(errno);
383 ifc.ifc_req = uv__calloc(1, maxsize);
384 ifc.ifc_len = maxsize;
385 if (ioctl(sockfd, SIOCGIFCONF, &ifc) == -1) {
387 return UV__ERR(errno);
390 #define MAX(a,b) (((a)>(b))?(a):(b))
391 #define ADDR_SIZE(p) MAX((p).sa_len, sizeof(p))
393 /* Count all up and running ipv4/ipv6 addresses */
395 while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
397 ifr = (struct ifreq*)
398 ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
400 if (!(p->ifr_addr.sa_family == AF_INET6 ||
401 p->ifr_addr.sa_family == AF_INET))
404 memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
405 if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) {
407 return UV__ERR(errno);
410 if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING))
421 /* Alloc the return interface structs */
422 *addresses = uv__malloc((*count + count_v6) *
423 sizeof(uv_interface_address_t));
429 address = *addresses;
431 /* copy over the ipv6 addresses */
432 memcpy(address, addresses_v6, count_v6 * sizeof(uv_interface_address_t));
435 uv__free(addresses_v6);
438 while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
440 ifr = (struct ifreq*)
441 ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
443 if (!(p->ifr_addr.sa_family == AF_INET6 ||
444 p->ifr_addr.sa_family == AF_INET))
447 memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
448 if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) {
453 if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING))
456 /* All conditions above must match count loop */
458 address->name = uv__strdup(p->ifr_name);
460 if (p->ifr_addr.sa_family == AF_INET6) {
461 address->address.address6 = *((struct sockaddr_in6*) &p->ifr_addr);
463 address->address.address4 = *((struct sockaddr_in*) &p->ifr_addr);
466 address->is_internal = flg.ifr_flags & IFF_LOOPBACK ? 1 : 0;
467 memset(address->phys_addr, 0, sizeof(address->phys_addr));
479 void uv_free_interface_addresses(uv_interface_address_t* addresses,
482 for (i = 0; i < count; ++i)
483 uv__free(addresses[i].name);
488 void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
489 struct epoll_event* events;
490 struct epoll_event dummy;
494 assert(loop->watchers != NULL);
497 events = (struct epoll_event*) loop->watchers[loop->nwatchers];
498 nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
500 /* Invalidate events with same file descriptor */
501 for (i = 0; i < nfds; i++)
502 if ((int) events[i].fd == fd)
505 /* Remove the file descriptor from the epoll. */
506 if (loop->ep != NULL)
507 epoll_ctl(loop->ep, EPOLL_CTL_DEL, fd, &dummy);
511 int uv__io_check_fd(uv_loop_t* loop, int fd) {
516 p[0].events = POLLIN;
520 while (rv == -1 && errno == EINTR);
525 if (p[0].revents & POLLNVAL)
532 void uv__fs_event_close(uv_fs_event_t* handle) {
533 uv_fs_event_stop(handle);
537 int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
538 uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
543 int uv_fs_event_start(uv_fs_event_t* handle, uv_fs_event_cb cb,
544 const char* filename, unsigned int flags) {
550 if (uv__is_active(handle))
553 ep = handle->loop->ep;
554 assert(ep->msg_queue != -1);
556 reg_struct.__rfis_cmd = _RFIS_REG;
557 reg_struct.__rfis_qid = ep->msg_queue;
558 reg_struct.__rfis_type = 1;
559 memcpy(reg_struct.__rfis_utok, &handle, sizeof(handle));
561 path = uv__strdup(filename);
565 rc = __w_pioctl(path, _IOCC_REGFILEINT, sizeof(reg_struct), ®_struct);
567 return UV__ERR(errno);
569 uv__handle_start(handle);
572 memcpy(handle->rfis_rftok, reg_struct.__rfis_rftok,
573 sizeof(handle->rfis_rftok));
579 int uv_fs_event_stop(uv_fs_event_t* handle) {
584 if (!uv__is_active(handle))
587 ep = handle->loop->ep;
588 assert(ep->msg_queue != -1);
590 reg_struct.__rfis_cmd = _RFIS_UNREG;
591 reg_struct.__rfis_qid = ep->msg_queue;
592 reg_struct.__rfis_type = 1;
593 memcpy(reg_struct.__rfis_rftok, handle->rfis_rftok,
594 sizeof(handle->rfis_rftok));
597 * This call will take "/" as the path argument in case we
598 * don't care to supply the correct path. The system will simply
601 rc = __w_pioctl("/", _IOCC_REGFILEINT, sizeof(reg_struct), ®_struct);
602 if (rc != 0 && errno != EALREADY && errno != ENOENT)
605 uv__handle_stop(handle);
611 static int os390_message_queue_handler(uv__os390_epoll* ep) {
612 uv_fs_event_t* handle;
617 if (ep->msg_queue == -1)
620 msglen = msgrcv(ep->msg_queue, &msg, sizeof(msg), 0, IPC_NOWAIT);
622 if (msglen == -1 && errno == ENOMSG)
629 if (msg.__rfim_event == _RFIM_ATTR || msg.__rfim_event == _RFIM_WRITE)
631 else if (msg.__rfim_event == _RFIM_RENAME)
634 /* Some event that we are not interested in. */
637 /* `__rfim_utok` is treated as text when it should be treated as binary while
638 * running in ASCII mode, resulting in an unwanted autoconversion.
640 __a2e_l(msg.__rfim_utok, sizeof(msg.__rfim_utok));
641 handle = *(uv_fs_event_t**)(msg.__rfim_utok);
642 handle->cb(handle, uv__basename_r(handle->path), events, 0);
647 void uv__io_poll(uv_loop_t* loop, int timeout) {
648 static const int max_safe_timeout = 1789569;
649 struct epoll_event events[1024];
650 struct epoll_event* pe;
651 struct epoll_event e;
665 if (loop->nfds == 0) {
666 assert(QUEUE_EMPTY(&loop->watcher_queue));
670 while (!QUEUE_EMPTY(&loop->watcher_queue)) {
673 q = QUEUE_HEAD(&loop->watcher_queue);
676 w = QUEUE_DATA(q, uv__io_t, watcher_queue);
678 assert(w->pevents != 0);
681 stream= container_of(w, uv_stream_t, io_watcher);
683 assert(w->fd < (int) loop->nwatchers);
685 e.events = w->pevents;
693 /* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
694 * events, skip the syscall and squelch the events after epoll_wait().
696 if (epoll_ctl(loop->ep, op, w->fd, &e)) {
700 assert(op == EPOLL_CTL_ADD);
702 /* We've reactivated a file descriptor that's been watched before. */
703 if (epoll_ctl(loop->ep, EPOLL_CTL_MOD, w->fd, &e))
707 w->events = w->pevents;
710 assert(timeout >= -1);
712 count = 48; /* Benchmarks suggest this gives the best throughput. */
713 real_timeout = timeout;
716 if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
718 user_timeout = timeout;
726 /* Only need to set the provider_entry_time if timeout != 0. The function
727 * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
730 uv__metrics_set_provider_entry_time(loop);
732 if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
733 timeout = max_safe_timeout;
735 nfds = epoll_wait(loop->ep, events,
736 ARRAY_SIZE(events), timeout);
738 /* Update loop->time unconditionally. It's tempting to skip the update when
739 * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
740 * operating system didn't reschedule our process while in the syscall.
743 SAVE_ERRNO(uv__update_time(loop));
745 assert(timeout != -1);
747 if (reset_timeout != 0) {
748 timeout = user_timeout;
758 /* We may have been inside the system call for longer than |timeout|
759 * milliseconds so we need to update the timestamp to avoid drift.
769 if (reset_timeout != 0) {
770 timeout = user_timeout;
780 /* Interrupted by a signal. Update timeout and poll again. */
785 assert(loop->watchers != NULL);
786 loop->watchers[loop->nwatchers] = (void*) events;
787 loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
788 for (i = 0; i < nfds; i++) {
792 /* Skip invalidated events, see uv__platform_invalidate_fd */
798 os390_message_queue_handler(ep);
803 assert((unsigned) fd < loop->nwatchers);
805 w = loop->watchers[fd];
808 /* File descriptor that we've stopped watching, disarm it.
810 * Ignore all errors because we may be racing with another thread
811 * when the file descriptor is closed.
813 epoll_ctl(loop->ep, EPOLL_CTL_DEL, fd, pe);
817 /* Give users only events they're interested in. Prevents spurious
818 * callbacks when previous callback invocation in this loop has stopped
819 * the current watcher. Also, filters out events that users has not
820 * requested us to watch.
822 pe->events &= w->pevents | POLLERR | POLLHUP;
824 if (pe->events == POLLERR || pe->events == POLLHUP)
825 pe->events |= w->pevents & (POLLIN | POLLOUT);
827 if (pe->events != 0) {
828 uv__metrics_update_idle_time(loop);
829 w->cb(loop, w, pe->events);
833 loop->watchers[loop->nwatchers] = NULL;
834 loop->watchers[loop->nwatchers + 1] = NULL;
836 if (reset_timeout != 0) {
837 timeout = user_timeout;
842 if (nfds == ARRAY_SIZE(events) && --count != 0) {
843 /* Poll for more events but don't block this time. */
859 real_timeout -= (loop->time - base);
860 if (real_timeout <= 0)
863 timeout = real_timeout;
868 int uv__io_fork(uv_loop_t* loop) {
870 Nullify the msg queue but don't close it because
871 it is still being used by the parent.
875 uv__platform_loop_delete(loop);
876 return uv__platform_loop_init(loop);