1 /* Copyright libuv project contributors. All rights reserved.
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 #include "os390-syscalls.h"
30 static QUEUE global_epoll_queue;
31 static uv_mutex_t global_epoll_lock;
32 static uv_once_t once = UV_ONCE_INIT;
34 int scandir(const char* maindir, struct dirent*** namelist,
35 int (*filter)(const struct dirent*),
36 int (*compar)(const struct dirent**,
37 const struct dirent **)) {
39 struct dirent** nl_copy;
40 struct dirent* dirent;
48 mdir = opendir(maindir);
53 dirent = readdir(mdir);
56 if (!filter || filter(dirent)) {
58 copy = uv__malloc(sizeof(*copy));
61 memcpy(copy, dirent, sizeof(*copy));
63 nl_copy = uv__realloc(nl, sizeof(*copy) * (count + 1));
64 if (nl_copy == NULL) {
74 qsort(nl, count, sizeof(struct dirent *),
75 (int (*)(const void *, const void *)) compar);
94 static unsigned int next_power_of_two(unsigned int val) {
106 static void maybe_resize(uv__os390_epoll* lst, unsigned int len) {
107 unsigned int newsize;
109 struct pollfd* newlst;
112 if (len <= lst->size)
118 /* Extract the message queue at the end. */
119 event = lst->items[lst->size - 1];
120 lst->items[lst->size - 1].fd = -1;
123 newsize = next_power_of_two(len);
124 newlst = uv__reallocf(lst->items, newsize * sizeof(lst->items[0]));
128 for (i = lst->size; i < newsize; ++i)
131 /* Restore the message queue at the end */
132 newlst[newsize - 1] = event;
139 void uv__os390_cleanup(void) {
140 msgctl(uv_backend_fd(uv_default_loop()), IPC_RMID, NULL);
144 static void init_message_queue(uv__os390_epoll* lst) {
150 /* initialize message queue */
151 lst->msg_queue = msgget(IPC_PRIVATE, 0600 | IPC_CREAT);
152 if (lst->msg_queue == -1)
156 On z/OS, the message queue will be affiliated with the process only
157 when a send is performed on it. Once this is done, the system
158 can be queried for all message queues belonging to our process id.
161 if (msgsnd(lst->msg_queue, &msg, sizeof(msg.body), 0) != 0)
164 /* Clean up the dummy message sent above */
165 if (msgrcv(lst->msg_queue, &msg, sizeof(msg.body), 0, 0) != sizeof(msg.body))
170 static void before_fork(void) {
171 uv_mutex_lock(&global_epoll_lock);
175 static void after_fork(void) {
176 uv_mutex_unlock(&global_epoll_lock);
180 static void child_fork(void) {
182 uv_once_t child_once = UV_ONCE_INIT;
185 memcpy(&once, &child_once, sizeof(child_once));
187 /* reset epoll list */
188 while (!QUEUE_EMPTY(&global_epoll_queue)) {
189 uv__os390_epoll* lst;
190 q = QUEUE_HEAD(&global_epoll_queue);
192 lst = QUEUE_DATA(q, uv__os390_epoll, member);
193 uv__free(lst->items);
198 uv_mutex_unlock(&global_epoll_lock);
199 uv_mutex_destroy(&global_epoll_lock);
203 static void epoll_init(void) {
204 QUEUE_INIT(&global_epoll_queue);
205 if (uv_mutex_init(&global_epoll_lock))
208 if (pthread_atfork(&before_fork, &after_fork, &child_fork))
213 uv__os390_epoll* epoll_create1(int flags) {
214 uv__os390_epoll* lst;
216 lst = uv__malloc(sizeof(*lst));
218 /* initialize list */
221 init_message_queue(lst);
222 maybe_resize(lst, 1);
223 lst->items[lst->size - 1].fd = lst->msg_queue;
224 lst->items[lst->size - 1].events = POLLIN;
225 lst->items[lst->size - 1].revents = 0;
226 uv_once(&once, epoll_init);
227 uv_mutex_lock(&global_epoll_lock);
228 QUEUE_INSERT_TAIL(&global_epoll_queue, &lst->member);
229 uv_mutex_unlock(&global_epoll_lock);
236 int epoll_ctl(uv__os390_epoll* lst,
239 struct epoll_event *event) {
240 uv_mutex_lock(&global_epoll_lock);
242 if (op == EPOLL_CTL_DEL) {
243 if (fd >= lst->size || lst->items[fd].fd == -1) {
244 uv_mutex_unlock(&global_epoll_lock);
248 lst->items[fd].fd = -1;
249 } else if (op == EPOLL_CTL_ADD) {
251 /* Resizing to 'fd + 1' would expand the list to contain at least
252 * 'fd'. But we need to guarantee that the last index on the list
253 * is reserved for the message queue. So specify 'fd + 2' instead.
255 maybe_resize(lst, fd + 2);
256 if (lst->items[fd].fd != -1) {
257 uv_mutex_unlock(&global_epoll_lock);
261 lst->items[fd].fd = fd;
262 lst->items[fd].events = event->events;
263 lst->items[fd].revents = 0;
264 } else if (op == EPOLL_CTL_MOD) {
265 if (fd >= lst->size - 1 || lst->items[fd].fd == -1) {
266 uv_mutex_unlock(&global_epoll_lock);
270 lst->items[fd].events = event->events;
271 lst->items[fd].revents = 0;
275 uv_mutex_unlock(&global_epoll_lock);
279 #define EP_MAX_PFDS (ULONG_MAX / sizeof(struct pollfd))
280 #define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
282 int epoll_wait(uv__os390_epoll* lst, struct epoll_event* events,
283 int maxevents, int timeout) {
289 struct pollfd msg_fd;
292 if (!lst || !lst->items || !events) {
297 if (lst->size > EP_MAX_PFDS) {
302 if (maxevents <= 0 || maxevents > EP_MAX_EVENTS) {
308 _SET_FDS_MSGS(size, 1, lst->size - 1);
310 _SET_FDS_MSGS(size, 0, 0);
312 pollret = poll(pfds, size, timeout);
316 assert(lst->size > 0);
318 pollret = _NFDS(pollret) + _NMSGS(pollret);
322 msg_fd = pfds[lst->size - 1];
324 i < lst->size && i < maxevents && reventcount < pollret; ++i) {
325 struct epoll_event ev;
329 if (pfd->fd == -1 || pfd->revents == 0)
333 ev.events = pfd->revents;
335 if (pfd->revents & POLLIN && pfd->revents & POLLOUT)
337 else if (pfd->revents & (POLLIN | POLLOUT))
341 events[nevents++] = ev;
344 if (msg_fd.revents != 0 && msg_fd.fd != -1)
346 events[nevents - 1].is_msg = 1;
352 int epoll_file_close(int fd) {
355 uv_once(&once, epoll_init);
356 uv_mutex_lock(&global_epoll_lock);
357 QUEUE_FOREACH(q, &global_epoll_queue) {
358 uv__os390_epoll* lst;
360 lst = QUEUE_DATA(q, uv__os390_epoll, member);
361 if (fd < lst->size && lst->items != NULL && lst->items[fd].fd != -1)
362 lst->items[fd].fd = -1;
365 uv_mutex_unlock(&global_epoll_lock);
369 void epoll_queue_close(uv__os390_epoll* lst) {
370 /* Remove epoll instance from global queue */
371 uv_mutex_lock(&global_epoll_lock);
372 QUEUE_REMOVE(&lst->member);
373 uv_mutex_unlock(&global_epoll_lock);
376 msgctl(lst->msg_queue, IPC_RMID, NULL);
378 uv__free(lst->items);
383 char* mkdtemp(char* path) {
384 static const char* tempchars =
385 "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
386 static const size_t num_chars = 62;
387 static const size_t num_x = 6;
389 unsigned int tries, i;
398 if (len < num_x || strncmp(ep - num_x, "XXXXXX", num_x)) {
403 fd = open("/dev/urandom", O_RDONLY);
410 if (read(fd, &v, sizeof(v)) != sizeof(v))
414 for (i = 0; i < num_x; i++) {
415 *cp++ = tempchars[v % num_chars];
419 if (mkdir(path, S_IRWXU) == 0) {
423 else if (errno != EEXIST)
443 ssize_t os390_readlink(const char* path, char* buf, size_t len) {
450 char realpathstr[PATH_MAX + 1];
452 tmpbuf = uv__malloc(len + 1);
453 if (tmpbuf == NULL) {
458 rlen = readlink(path, tmpbuf, len);
464 if (rlen < 3 || strncmp("/$", tmpbuf, 2) != 0) {
465 /* Straightforward readlink. */
466 memcpy(buf, tmpbuf, rlen);
472 * There is a parmlib variable at the beginning
473 * which needs interpretation.
476 delimiter = strchr(tmpbuf + 2, '/');
477 if (delimiter == NULL)
478 /* No slash at the end */
479 delimiter = strchr(tmpbuf + 2, '\0');
481 /* Read real path of the variable. */
482 old_delim = *delimiter;
484 if (realpath(tmpbuf, realpathstr) == NULL) {
489 /* realpathstr is not guaranteed to end with null byte.*/
490 realpathstr[PATH_MAX] = '\0';
492 /* Reset the delimiter and fill up the buffer. */
493 *delimiter = old_delim;
494 plen = strlen(delimiter);
495 vlen = strlen(realpathstr);
499 errno = ENAMETOOLONG;
502 memcpy(buf, realpathstr, vlen);
503 memcpy(buf + vlen, delimiter, plen);
505 /* Done using temporary buffer. */
512 int sem_init(UV_PLATFORM_SEM_T* semid, int pshared, unsigned int value) {
517 int sem_destroy(UV_PLATFORM_SEM_T* semid) {
522 int sem_post(UV_PLATFORM_SEM_T* semid) {
527 int sem_trywait(UV_PLATFORM_SEM_T* semid) {
532 int sem_wait(UV_PLATFORM_SEM_T* semid) {