2 * Copyright (C) 1999 Erik Walthinsen <omega@cse.ogi.edu>
3 * Copyright (C) 2004 Wim Taymans <wim.taymans@gmail.com>
4 * Copyright (C) 2007 Peter Kjellerstedt <pkj@axis.com>
5 * Copyright (C) 2008 Ole André Vadla Ravnås <ole.andre.ravnas@tandberg.com>
7 * gstpoll.c: File descriptor set
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Library General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Library General Public License for more details.
19 * You should have received a copy of the GNU Library General Public
20 * License along with this library; if not, write to the
21 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
22 * Boston, MA 02110-1301, USA.
27 * @short_description: Keep track of file descriptors and make it possible
28 * to wait on them in a cancellable way
30 * A #GstPoll keeps track of file descriptors much like fd_set (used with
31 * select ()) or a struct pollfd array (used with poll ()). Once created with
32 * gst_poll_new(), the set can be used to wait for file descriptors to be
33 * readable and/or writable. It is possible to make this wait be controlled
34 * by specifying %TRUE for the @controllable flag when creating the set (or
35 * later calling gst_poll_set_controllable()).
37 * New file descriptors are added to the set using gst_poll_add_fd(), and
38 * removed using gst_poll_remove_fd(). Controlling which file descriptors
39 * should be waited for to become readable and/or writable are done using
40 * gst_poll_fd_ctl_read(), gst_poll_fd_ctl_write() and gst_poll_fd_ctl_pri().
42 * Use gst_poll_wait() to wait for the file descriptors to actually become
43 * readable and/or writable, or to timeout if no file descriptor is available
44 * in time. The wait can be controlled by calling gst_poll_restart() and
45 * gst_poll_set_flushing().
47 * Once the file descriptor set has been waited for, one can use
48 * gst_poll_fd_has_closed() to see if the file descriptor has been closed,
49 * gst_poll_fd_has_error() to see if it has generated an error,
50 * gst_poll_fd_can_read() to see if it is possible to read from the file
51 * descriptor, and gst_poll_fd_can_write() to see if it is possible to
60 #include "gst_private.h"
61 #include "glib-compat-private.h"
63 #include <sys/types.h>
78 #ifdef HAVE_SYS_POLL_H
85 #include <sys/socket.h>
90 # define EWOULDBLOCK EAGAIN /* This is just to placate gcc */
92 #endif /* G_OS_WIN32 */
94 /* OS/X needs this because of bad headers */
97 /* The poll() emulation on OS/X doesn't handle fds=NULL, nfds=0,
98 * so we prefer our own poll emulation.
100 #if defined(BROKEN_POLL)
106 #define GST_CAT_DEFAULT GST_CAT_POLL
109 typedef struct _WinsockFd WinsockFd;
115 WSANETWORKEVENTS events;
116 glong ignored_event_mask;
123 GST_POLL_MODE_SELECT,
124 GST_POLL_MODE_PSELECT,
127 GST_POLL_MODE_WINDOWS
135 /* array of fds, always written to and read from with lock */
137 /* array of active fds, only written to from the waiting thread with the
138 * lock and read from with the lock or without the lock from the waiting
143 GstPollFD control_read_fd;
144 GstPollFD control_write_fd;
146 GArray *active_fds_ignored;
148 GArray *active_events;
153 gboolean controllable;
155 gint control_pending;
161 static gboolean gst_poll_fd_ctl_read_unlocked (GstPoll * set, GstPollFD * fd,
163 static gboolean gst_poll_add_fd_unlocked (GstPoll * set, GstPollFD * fd);
165 #define IS_FLUSHING(s) (g_atomic_int_get(&(s)->flushing))
166 #define SET_FLUSHING(s,val) (g_atomic_int_set(&(s)->flushing, (val)))
168 #define INC_WAITING(s) (g_atomic_int_add(&(s)->waiting, 1))
169 #define DEC_WAITING(s) (g_atomic_int_add(&(s)->waiting, -1))
170 #define GET_WAITING(s) (g_atomic_int_get(&(s)->waiting))
172 #define TEST_REBUILD(s) (g_atomic_int_compare_and_exchange(&(s)->rebuild, 1, 0))
173 #define MARK_REBUILD(s) (g_atomic_int_set(&(s)->rebuild, 1))
178 wake_event (GstPoll * set)
181 while ((num_written = write (set->control_write_fd.fd, "W", 1)) != 1) {
182 if (num_written == -1 && errno != EAGAIN && errno != EINTR) {
183 g_critical ("%p: failed to wake event: %s", set, strerror (errno));
191 release_event (GstPoll * set)
193 gchar buf[1] = { '\0' };
195 while ((num_read = read (set->control_read_fd.fd, buf, 1)) != 1) {
196 if (num_read == -1 && errno != EAGAIN && errno != EINTR) {
197 g_critical ("%p: failed to release event: %s", set, strerror (errno));
207 format_last_error (gchar * buf, size_t buf_len)
209 DWORD flags = FORMAT_MESSAGE_FROM_SYSTEM;
213 id = GetLastError ();
214 FormatMessage (flags, src, id, lang, buf, (DWORD) buf_len, NULL);
219 wake_event (GstPoll * set)
223 if (!SetEvent (set->wakeup_event)) {
224 gchar msg[1024] = "<unknown>";
225 format_last_error (msg, sizeof (msg));
226 g_critical ("%p: failed to set wakup_event: %s", set, msg);
235 release_event (GstPoll * set)
241 status = WaitForSingleObject (set->wakeup_event, INFINITE);
243 const gchar *reason = "unknown";
244 gchar msg[1024] = "<unknown>";
247 reason = "WAIT_ABANDONED";
250 reason = "WAIT_TIMEOUT";
253 format_last_error (msg, sizeof (msg));
260 g_critical ("%p: failed to block on wakup_event: %s", set, reason);
265 if (!ResetEvent (set->wakeup_event)) {
266 gchar msg[1024] = "<unknown>";
267 format_last_error (msg, sizeof (msg));
268 g_critical ("%p: failed to reset wakup_event: %s", set, msg);
278 /* the poll/select call is also performed on a control socket, that way
279 * we can send special commands to control it */
280 static inline gboolean
281 raise_wakeup (GstPoll * set)
283 gboolean result = TRUE;
285 /* makes testing control_pending and WAKE_EVENT() atomic. */
286 g_mutex_lock (&set->lock);
288 if (set->control_pending == 0) {
289 /* raise when nothing pending */
290 GST_LOG ("%p: raise", set);
291 result = wake_event (set);
295 set->control_pending++;
298 g_mutex_unlock (&set->lock);
303 static inline gboolean
304 release_wakeup (GstPoll * set)
306 gboolean result = FALSE;
308 /* makes testing/modifying control_pending and RELEASE_EVENT() atomic. */
309 g_mutex_lock (&set->lock);
311 if (set->control_pending > 0) {
312 /* release, only if this was the last pending. */
313 if (set->control_pending == 1) {
314 GST_LOG ("%p: release", set);
315 result = release_event (set);
321 set->control_pending--;
327 g_mutex_unlock (&set->lock);
333 release_all_wakeup (GstPoll * set)
337 /* makes testing control_pending and RELEASE_EVENT() atomic. */
338 g_mutex_lock (&set->lock);
340 if ((old = set->control_pending) > 0) {
341 GST_LOG ("%p: releasing %d", set, old);
342 if (release_event (set)) {
343 set->control_pending = 0;
349 g_mutex_unlock (&set->lock);
355 find_index (GArray * array, GstPollFD * fd)
364 /* start by assuming the index found in the fd is still valid */
365 if (fd->idx >= 0 && fd->idx < array->len) {
367 ifd = &g_array_index (array, struct pollfd, fd->idx);
369 ifd = &g_array_index (array, WinsockFd, fd->idx);
372 if (ifd->fd == fd->fd) {
377 /* the pollfd array has changed and we need to lookup the fd again */
378 for (i = 0; i < array->len; i++) {
380 ifd = &g_array_index (array, struct pollfd, i);
382 ifd = &g_array_index (array, WinsockFd, i);
385 if (ifd->fd == fd->fd) {
395 #if !defined(HAVE_PPOLL) && defined(HAVE_POLL)
396 /* check if all file descriptors will fit in an fd_set */
398 selectable_fds (GstPoll * set)
402 g_mutex_lock (&set->lock);
403 for (i = 0; i < set->fds->len; i++) {
404 struct pollfd *pfd = &g_array_index (set->fds, struct pollfd, i);
406 if (pfd->fd >= FD_SETSIZE)
409 g_mutex_unlock (&set->lock);
415 g_mutex_unlock (&set->lock);
420 /* check if the timeout will convert to a timeout value used for poll()
421 * without a loss of precision
424 pollable_timeout (GstClockTime timeout)
426 if (timeout == GST_CLOCK_TIME_NONE)
429 /* not a nice multiple of milliseconds */
430 if (timeout % 1000000)
438 choose_mode (GstPoll * set, GstClockTime timeout)
442 if (set->mode == GST_POLL_MODE_AUTO) {
444 mode = GST_POLL_MODE_PPOLL;
445 #elif defined(HAVE_POLL)
446 if (!selectable_fds (set) || pollable_timeout (timeout)) {
447 mode = GST_POLL_MODE_POLL;
450 mode = GST_POLL_MODE_PSELECT;
452 mode = GST_POLL_MODE_SELECT;
455 #elif defined(HAVE_PSELECT)
456 mode = GST_POLL_MODE_PSELECT;
458 mode = GST_POLL_MODE_SELECT;
468 pollfd_to_fd_set (GstPoll * set, fd_set * readfds, fd_set * writefds,
478 g_mutex_lock (&set->lock);
480 for (i = 0; i < set->active_fds->len; i++) {
481 struct pollfd *pfd = &g_array_index (set->fds, struct pollfd, i);
483 if (pfd->fd < FD_SETSIZE) {
484 if (pfd->events & POLLIN)
485 FD_SET (pfd->fd, readfds);
486 if (pfd->events & POLLOUT)
487 FD_SET (pfd->fd, writefds);
489 FD_SET (pfd->fd, errorfds);
490 if (pfd->fd > max_fd && (pfd->events & (POLLIN | POLLOUT)))
495 g_mutex_unlock (&set->lock);
501 fd_set_to_pollfd (GstPoll * set, fd_set * readfds, fd_set * writefds,
506 g_mutex_lock (&set->lock);
508 for (i = 0; i < set->active_fds->len; i++) {
509 struct pollfd *pfd = &g_array_index (set->active_fds, struct pollfd, i);
511 if (pfd->fd < FD_SETSIZE) {
513 if (FD_ISSET (pfd->fd, readfds))
514 pfd->revents |= POLLIN;
515 if (FD_ISSET (pfd->fd, writefds))
516 pfd->revents |= POLLOUT;
517 if (FD_ISSET (pfd->fd, errorfds))
518 pfd->revents |= POLLERR;
522 g_mutex_unlock (&set->lock);
524 #else /* G_OS_WIN32 */
526 * Translate errors thrown by the Winsock API used by GstPoll:
527 * WSAEventSelect, WSAWaitForMultipleEvents and WSAEnumNetworkEvents
530 gst_poll_winsock_error_to_errno (DWORD last_error)
532 switch (last_error) {
533 case WSA_INVALID_HANDLE:
538 case WSA_NOT_ENOUGH_MEMORY:
542 * Anything else, including:
543 * WSA_INVALID_PARAMETER, WSAEFAULT, WSAEINPROGRESS, WSAENETDOWN,
552 gst_poll_free_winsock_event (GstPoll * set, gint idx)
554 WinsockFd *wfd = &g_array_index (set->fds, WinsockFd, idx);
555 HANDLE event = g_array_index (set->events, HANDLE, idx);
557 WSAEventSelect (wfd->fd, event, 0);
562 gst_poll_update_winsock_event_mask (GstPoll * set, gint idx, glong flags,
567 wfd = &g_array_index (set->fds, WinsockFd, idx);
570 wfd->event_mask |= flags;
572 wfd->event_mask &= ~flags;
574 /* reset ignored state if the new mask doesn't overlap at all */
575 if ((wfd->ignored_event_mask & wfd->event_mask) == 0)
576 wfd->ignored_event_mask = 0;
580 gst_poll_prepare_winsock_active_sets (GstPoll * set)
584 g_array_set_size (set->active_fds, 0);
585 g_array_set_size (set->active_fds_ignored, 0);
586 g_array_set_size (set->active_events, 0);
587 g_array_append_val (set->active_events, set->wakeup_event);
589 for (i = 0; i < set->fds->len; i++) {
590 WinsockFd *wfd = &g_array_index (set->fds, WinsockFd, i);
591 HANDLE event = g_array_index (set->events, HANDLE, i);
593 if (wfd->ignored_event_mask == 0) {
596 g_array_append_val (set->active_fds, *wfd);
597 g_array_append_val (set->active_events, event);
599 ret = WSAEventSelect (wfd->fd, event, wfd->event_mask);
600 if (G_UNLIKELY (ret != 0)) {
601 errno = gst_poll_winsock_error_to_errno (WSAGetLastError ());
605 g_array_append_val (set->active_fds_ignored, wfd);
613 gst_poll_collect_winsock_events (GstPoll * set)
618 * We need to check which events are signaled, and call
619 * WSAEnumNetworkEvents for those that are, which resets
620 * the event and clears the internal network event records.
623 for (i = 0; i < set->active_fds->len; i++) {
624 WinsockFd *wfd = &g_array_index (set->active_fds, WinsockFd, i);
625 HANDLE event = g_array_index (set->active_events, HANDLE, i + 1);
628 wait_ret = WaitForSingleObject (event, 0);
629 if (wait_ret == WAIT_OBJECT_0) {
630 gint enum_ret = WSAEnumNetworkEvents (wfd->fd, event, &wfd->events);
632 if (G_UNLIKELY (enum_ret != 0)) {
634 errno = gst_poll_winsock_error_to_errno (WSAGetLastError ());
640 /* clear any previously stored result */
641 memset (&wfd->events, 0, sizeof (wfd->events));
645 /* If all went well we also need to reset the ignored fds. */
647 res += set->active_fds_ignored->len;
649 for (i = 0; i < set->active_fds_ignored->len; i++) {
650 WinsockFd *wfd = g_array_index (set->active_fds_ignored, WinsockFd *, i);
652 wfd->ignored_event_mask = 0;
655 g_array_set_size (set->active_fds_ignored, 0);
663 * gst_poll_new: (skip)
664 * @controllable: whether it should be possible to control a wait.
666 * Create a new file descriptor set. If @controllable, it
667 * is possible to restart or flush a call to gst_poll_wait() with
668 * gst_poll_restart() and gst_poll_set_flushing() respectively.
670 * Free-function: gst_poll_free
672 * Returns: (transfer full) (nullable): a new #GstPoll, or %NULL in
673 * case of an error. Free with gst_poll_free().
676 gst_poll_new (gboolean controllable)
680 nset = g_slice_new0 (GstPoll);
681 GST_DEBUG ("%p: new controllable : %d", nset, controllable);
682 g_mutex_init (&nset->lock);
684 nset->mode = GST_POLL_MODE_AUTO;
685 nset->fds = g_array_new (FALSE, FALSE, sizeof (struct pollfd));
686 nset->active_fds = g_array_new (FALSE, FALSE, sizeof (struct pollfd));
687 nset->control_read_fd.fd = -1;
688 nset->control_write_fd.fd = -1;
690 gint control_sock[2];
692 if (socketpair (PF_UNIX, SOCK_STREAM, 0, control_sock) < 0)
695 nset->control_read_fd.fd = control_sock[0];
696 nset->control_write_fd.fd = control_sock[1];
698 gst_poll_add_fd_unlocked (nset, &nset->control_read_fd);
699 gst_poll_fd_ctl_read_unlocked (nset, &nset->control_read_fd, TRUE);
702 nset->mode = GST_POLL_MODE_WINDOWS;
703 nset->fds = g_array_new (FALSE, FALSE, sizeof (WinsockFd));
704 nset->active_fds = g_array_new (FALSE, FALSE, sizeof (WinsockFd));
705 nset->active_fds_ignored = g_array_new (FALSE, FALSE, sizeof (WinsockFd *));
706 nset->events = g_array_new (FALSE, FALSE, sizeof (HANDLE));
707 nset->active_events = g_array_new (FALSE, FALSE, sizeof (HANDLE));
709 nset->wakeup_event = CreateEvent (NULL, TRUE, FALSE, NULL);
712 /* ensure (re)build, though already sneakily set in non-windows case */
715 nset->controllable = controllable;
716 nset->control_pending = 0;
724 GST_WARNING ("%p: can't create socket pair !", nset);
725 gst_poll_free (nset);
732 * gst_poll_new_timer: (skip)
734 * Create a new poll object that can be used for scheduling cancellable
737 * A timeout is performed with gst_poll_wait(). Multiple timeouts can be
738 * performed from different threads.
740 * Free-function: gst_poll_free
742 * Returns: (transfer full) (nullable): a new #GstPoll, or %NULL in
743 * case of an error. Free with gst_poll_free().
746 gst_poll_new_timer (void)
750 /* make a new controllable poll set */
751 if (!(poll = gst_poll_new (TRUE)))
763 * @set: (transfer full): a file descriptor set.
765 * Free a file descriptor set.
768 gst_poll_free (GstPoll * set)
770 g_return_if_fail (set != NULL);
772 GST_DEBUG ("%p: freeing", set);
775 if (set->control_write_fd.fd >= 0)
776 close (set->control_write_fd.fd);
777 if (set->control_read_fd.fd >= 0)
778 close (set->control_read_fd.fd);
780 CloseHandle (set->wakeup_event);
785 for (i = 0; i < set->events->len; i++)
786 gst_poll_free_winsock_event (set, i);
789 g_array_free (set->active_events, TRUE);
790 g_array_free (set->events, TRUE);
791 g_array_free (set->active_fds_ignored, TRUE);
794 g_array_free (set->active_fds, TRUE);
795 g_array_free (set->fds, TRUE);
796 g_mutex_clear (&set->lock);
797 g_slice_free (GstPoll, set);
801 * gst_poll_get_read_gpollfd:
805 * Get a GPollFD for the reading part of the control socket. This is useful when
806 * integrating with a GSource and GMainLoop.
809 gst_poll_get_read_gpollfd (GstPoll * set, GPollFD * fd)
811 g_return_if_fail (set != NULL);
812 g_return_if_fail (fd != NULL);
815 fd->fd = set->control_read_fd.fd;
817 #if GLIB_SIZEOF_VOID_P == 8
818 fd->fd = (gint64) set->wakeup_event;
820 fd->fd = (gint) set->wakeup_event;
823 fd->events = G_IO_IN | G_IO_HUP | G_IO_ERR;
831 * Initializes @fd. Alternatively you can initialize it with
835 gst_poll_fd_init (GstPollFD * fd)
837 g_return_if_fail (fd != NULL);
844 gst_poll_add_fd_unlocked (GstPoll * set, GstPollFD * fd)
848 GST_DEBUG ("%p: fd (fd:%d, idx:%d)", set, fd->fd, fd->idx);
850 idx = find_index (set->fds, fd);
856 nfd.events = POLLERR | POLLNVAL | POLLHUP;
859 g_array_append_val (set->fds, nfd);
861 fd->idx = set->fds->len - 1;
867 wfd.event_mask = FD_CLOSE;
868 memset (&wfd.events, 0, sizeof (wfd.events));
869 wfd.ignored_event_mask = 0;
870 event = WSACreateEvent ();
872 g_array_append_val (set->fds, wfd);
873 g_array_append_val (set->events, event);
875 fd->idx = set->fds->len - 1;
879 GST_WARNING ("%p: fd already added !", set);
887 * @set: a file descriptor set.
888 * @fd: a file descriptor.
890 * Add a file descriptor to the file descriptor set.
892 * Returns: %TRUE if the file descriptor was successfully added to the set.
895 gst_poll_add_fd (GstPoll * set, GstPollFD * fd)
899 g_return_val_if_fail (set != NULL, FALSE);
900 g_return_val_if_fail (fd != NULL, FALSE);
901 g_return_val_if_fail (fd->fd >= 0, FALSE);
903 g_mutex_lock (&set->lock);
905 ret = gst_poll_add_fd_unlocked (set, fd);
907 g_mutex_unlock (&set->lock);
913 * gst_poll_remove_fd:
914 * @set: a file descriptor set.
915 * @fd: a file descriptor.
917 * Remove a file descriptor from the file descriptor set.
919 * Returns: %TRUE if the file descriptor was successfully removed from the set.
922 gst_poll_remove_fd (GstPoll * set, GstPollFD * fd)
926 g_return_val_if_fail (set != NULL, FALSE);
927 g_return_val_if_fail (fd != NULL, FALSE);
928 g_return_val_if_fail (fd->fd >= 0, FALSE);
931 GST_DEBUG ("%p: fd (fd:%d, idx:%d)", set, fd->fd, fd->idx);
933 g_mutex_lock (&set->lock);
935 /* get the index, -1 is an fd that is not added */
936 idx = find_index (set->fds, fd);
939 gst_poll_free_winsock_event (set, idx);
940 g_array_remove_index_fast (set->events, idx);
943 /* remove the fd at index, we use _remove_index_fast, which copies the last
944 * element of the array to the freed index */
945 g_array_remove_index_fast (set->fds, idx);
947 /* mark fd as removed by setting the index to -1 */
951 GST_WARNING ("%p: couldn't find fd !", set);
954 g_mutex_unlock (&set->lock);
960 * gst_poll_fd_ctl_write:
961 * @set: a file descriptor set.
962 * @fd: a file descriptor.
963 * @active: a new status.
965 * Control whether the descriptor @fd in @set will be monitored for
968 * Returns: %TRUE if the descriptor was successfully updated.
971 gst_poll_fd_ctl_write (GstPoll * set, GstPollFD * fd, gboolean active)
975 g_return_val_if_fail (set != NULL, FALSE);
976 g_return_val_if_fail (fd != NULL, FALSE);
977 g_return_val_if_fail (fd->fd >= 0, FALSE);
979 GST_DEBUG ("%p: fd (fd:%d, idx:%d), active : %d", set,
980 fd->fd, fd->idx, active);
982 g_mutex_lock (&set->lock);
984 idx = find_index (set->fds, fd);
987 struct pollfd *pfd = &g_array_index (set->fds, struct pollfd, idx);
990 pfd->events |= POLLOUT;
992 pfd->events &= ~POLLOUT;
994 GST_LOG ("%p: pfd->events now %d (POLLOUT:%d)", set, pfd->events, POLLOUT);
996 gst_poll_update_winsock_event_mask (set, idx, FD_WRITE | FD_CONNECT,
1001 GST_WARNING ("%p: couldn't find fd !", set);
1004 g_mutex_unlock (&set->lock);
1010 gst_poll_fd_ctl_read_unlocked (GstPoll * set, GstPollFD * fd, gboolean active)
1014 GST_DEBUG ("%p: fd (fd:%d, idx:%d), active : %d", set,
1015 fd->fd, fd->idx, active);
1017 idx = find_index (set->fds, fd);
1021 struct pollfd *pfd = &g_array_index (set->fds, struct pollfd, idx);
1024 pfd->events |= POLLIN;
1026 pfd->events &= ~POLLIN;
1028 gst_poll_update_winsock_event_mask (set, idx, FD_READ | FD_ACCEPT, active);
1032 GST_WARNING ("%p: couldn't find fd !", set);
1039 * gst_poll_fd_ctl_read:
1040 * @set: a file descriptor set.
1041 * @fd: a file descriptor.
1042 * @active: a new status.
1044 * Control whether the descriptor @fd in @set will be monitored for
1047 * Returns: %TRUE if the descriptor was successfully updated.
1050 gst_poll_fd_ctl_read (GstPoll * set, GstPollFD * fd, gboolean active)
1054 g_return_val_if_fail (set != NULL, FALSE);
1055 g_return_val_if_fail (fd != NULL, FALSE);
1056 g_return_val_if_fail (fd->fd >= 0, FALSE);
1058 g_mutex_lock (&set->lock);
1060 ret = gst_poll_fd_ctl_read_unlocked (set, fd, active);
1062 g_mutex_unlock (&set->lock);
1068 * gst_poll_fd_ctl_pri:
1069 * @set: a file descriptor set.
1070 * @fd: a file descriptor.
1071 * @active: a new status.
1073 * Control whether the descriptor @fd in @set will be monitored for
1074 * exceptional conditions (POLLPRI).
1076 * Not implemented on Windows (will just return %FALSE there).
1078 * Returns: %TRUE if the descriptor was successfully updated.
1083 gst_poll_fd_ctl_pri (GstPoll * set, GstPollFD * fd, gboolean active)
1090 g_return_val_if_fail (set != NULL, FALSE);
1091 g_return_val_if_fail (fd != NULL, FALSE);
1092 g_return_val_if_fail (fd->fd >= 0, FALSE);
1094 GST_DEBUG ("%p: fd (fd:%d, idx:%d), active : %d", set,
1095 fd->fd, fd->idx, active);
1097 g_mutex_lock (&set->lock);
1099 idx = find_index (set->fds, fd);
1101 struct pollfd *pfd = &g_array_index (set->fds, struct pollfd, idx);
1104 pfd->events |= POLLPRI;
1106 pfd->events &= ~POLLPRI;
1108 GST_LOG ("%p: pfd->events now %d (POLLPRI:%d)", set, pfd->events, POLLOUT);
1111 GST_WARNING ("%p: couldn't find fd !", set);
1114 g_mutex_unlock (&set->lock);
1121 * gst_poll_fd_ignored:
1122 * @set: a file descriptor set.
1123 * @fd: a file descriptor.
1125 * Mark @fd as ignored so that the next call to gst_poll_wait() will yield
1126 * the same result for @fd as last time. This function must be called if no
1127 * operation (read/write/recv/send/etc.) will be performed on @fd before
1128 * the next call to gst_poll_wait().
1130 * The reason why this is needed is because the underlying implementation
1131 * might not allow querying the fd more than once between calls to one of
1132 * the re-enabling operations.
1135 gst_poll_fd_ignored (GstPoll * set, GstPollFD * fd)
1140 g_return_if_fail (set != NULL);
1141 g_return_if_fail (fd != NULL);
1142 g_return_if_fail (fd->fd >= 0);
1144 g_mutex_lock (&set->lock);
1146 idx = find_index (set->fds, fd);
1148 WinsockFd *wfd = &g_array_index (set->fds, WinsockFd, idx);
1150 wfd->ignored_event_mask = wfd->event_mask & (FD_READ | FD_WRITE);
1154 g_mutex_unlock (&set->lock);
1159 * gst_poll_fd_has_closed:
1160 * @set: a file descriptor set.
1161 * @fd: a file descriptor.
1163 * Check if @fd in @set has closed the connection.
1165 * Returns: %TRUE if the connection was closed.
1168 gst_poll_fd_has_closed (const GstPoll * set, GstPollFD * fd)
1170 gboolean res = FALSE;
1173 g_return_val_if_fail (set != NULL, FALSE);
1174 g_return_val_if_fail (fd != NULL, FALSE);
1175 g_return_val_if_fail (fd->fd >= 0, FALSE);
1177 g_mutex_lock (&((GstPoll *) set)->lock);
1179 idx = find_index (set->active_fds, fd);
1182 struct pollfd *pfd = &g_array_index (set->active_fds, struct pollfd, idx);
1184 res = (pfd->revents & POLLHUP) != 0;
1186 WinsockFd *wfd = &g_array_index (set->active_fds, WinsockFd, idx);
1188 res = (wfd->events.lNetworkEvents & FD_CLOSE) != 0;
1191 GST_WARNING ("%p: couldn't find fd !", set);
1193 g_mutex_unlock (&((GstPoll *) set)->lock);
1195 GST_DEBUG ("%p: fd (fd:%d, idx:%d) %d", set, fd->fd, fd->idx, res);
1201 * gst_poll_fd_has_error:
1202 * @set: a file descriptor set.
1203 * @fd: a file descriptor.
1205 * Check if @fd in @set has an error.
1207 * Returns: %TRUE if the descriptor has an error.
1210 gst_poll_fd_has_error (const GstPoll * set, GstPollFD * fd)
1212 gboolean res = FALSE;
1215 g_return_val_if_fail (set != NULL, FALSE);
1216 g_return_val_if_fail (fd != NULL, FALSE);
1217 g_return_val_if_fail (fd->fd >= 0, FALSE);
1219 g_mutex_lock (&((GstPoll *) set)->lock);
1221 idx = find_index (set->active_fds, fd);
1224 struct pollfd *pfd = &g_array_index (set->active_fds, struct pollfd, idx);
1226 res = (pfd->revents & (POLLERR | POLLNVAL)) != 0;
1228 WinsockFd *wfd = &g_array_index (set->active_fds, WinsockFd, idx);
1230 res = (wfd->events.iErrorCode[FD_CLOSE_BIT] != 0) ||
1231 (wfd->events.iErrorCode[FD_READ_BIT] != 0) ||
1232 (wfd->events.iErrorCode[FD_WRITE_BIT] != 0) ||
1233 (wfd->events.iErrorCode[FD_ACCEPT_BIT] != 0) ||
1234 (wfd->events.iErrorCode[FD_CONNECT_BIT] != 0);
1237 GST_WARNING ("%p: couldn't find fd !", set);
1239 g_mutex_unlock (&((GstPoll *) set)->lock);
1241 GST_DEBUG ("%p: fd (fd:%d, idx:%d) %d", set, fd->fd, fd->idx, res);
1247 gst_poll_fd_can_read_unlocked (const GstPoll * set, GstPollFD * fd)
1249 gboolean res = FALSE;
1252 idx = find_index (set->active_fds, fd);
1255 struct pollfd *pfd = &g_array_index (set->active_fds, struct pollfd, idx);
1257 res = (pfd->revents & POLLIN) != 0;
1259 WinsockFd *wfd = &g_array_index (set->active_fds, WinsockFd, idx);
1261 res = (wfd->events.lNetworkEvents & (FD_READ | FD_ACCEPT)) != 0;
1264 GST_WARNING ("%p: couldn't find fd !", set);
1266 GST_DEBUG ("%p: fd (fd:%d, idx:%d) %d", set, fd->fd, fd->idx, res);
1272 * gst_poll_fd_can_read:
1273 * @set: a file descriptor set.
1274 * @fd: a file descriptor.
1276 * Check if @fd in @set has data to be read.
1278 * Returns: %TRUE if the descriptor has data to be read.
1281 gst_poll_fd_can_read (const GstPoll * set, GstPollFD * fd)
1283 gboolean res = FALSE;
1285 g_return_val_if_fail (set != NULL, FALSE);
1286 g_return_val_if_fail (fd != NULL, FALSE);
1287 g_return_val_if_fail (fd->fd >= 0, FALSE);
1289 g_mutex_lock (&((GstPoll *) set)->lock);
1291 res = gst_poll_fd_can_read_unlocked (set, fd);
1293 g_mutex_unlock (&((GstPoll *) set)->lock);
1299 * gst_poll_fd_can_write:
1300 * @set: a file descriptor set.
1301 * @fd: a file descriptor.
1303 * Check if @fd in @set can be used for writing.
1305 * Returns: %TRUE if the descriptor can be used for writing.
1308 gst_poll_fd_can_write (const GstPoll * set, GstPollFD * fd)
1310 gboolean res = FALSE;
1313 g_return_val_if_fail (set != NULL, FALSE);
1314 g_return_val_if_fail (fd != NULL, FALSE);
1315 g_return_val_if_fail (fd->fd >= 0, FALSE);
1317 g_mutex_lock (&((GstPoll *) set)->lock);
1319 idx = find_index (set->active_fds, fd);
1322 struct pollfd *pfd = &g_array_index (set->active_fds, struct pollfd, idx);
1324 res = (pfd->revents & POLLOUT) != 0;
1326 WinsockFd *wfd = &g_array_index (set->active_fds, WinsockFd, idx);
1328 res = (wfd->events.lNetworkEvents & FD_WRITE) != 0;
1331 GST_WARNING ("%p: couldn't find fd !", set);
1333 g_mutex_unlock (&((GstPoll *) set)->lock);
1335 GST_DEBUG ("%p: fd (fd:%d, idx:%d) %d", set, fd->fd, fd->idx, res);
1341 * gst_poll_fd_has_pri:
1342 * @set: a file descriptor set.
1343 * @fd: a file descriptor.
1345 * Check if @fd in @set has an exceptional condition (POLLPRI).
1347 * Not implemented on Windows (will just return %FALSE there).
1349 * Returns: %TRUE if the descriptor has an exceptional condition.
1354 gst_poll_fd_has_pri (const GstPoll * set, GstPollFD * fd)
1359 gboolean res = FALSE;
1362 g_return_val_if_fail (set != NULL, FALSE);
1363 g_return_val_if_fail (fd != NULL, FALSE);
1364 g_return_val_if_fail (fd->fd >= 0, FALSE);
1366 g_mutex_lock (&((GstPoll *) set)->lock);
1368 idx = find_index (set->active_fds, fd);
1370 struct pollfd *pfd = &g_array_index (set->active_fds, struct pollfd, idx);
1372 res = (pfd->revents & POLLPRI) != 0;
1374 GST_WARNING ("%p: couldn't find fd !", set);
1376 g_mutex_unlock (&((GstPoll *) set)->lock);
1378 GST_DEBUG ("%p: fd (fd:%d, idx:%d) %d", set, fd->fd, fd->idx, res);
1387 * @timeout: a timeout in nanoseconds.
1389 * Wait for activity on the file descriptors in @set. This function waits up to
1390 * the specified @timeout. A timeout of #GST_CLOCK_TIME_NONE waits forever.
1392 * For #GstPoll objects created with gst_poll_new(), this function can only be
1393 * called from a single thread at a time. If called from multiple threads,
1394 * -1 will be returned with errno set to EPERM.
1396 * This is not true for timer #GstPoll objects created with
1397 * gst_poll_new_timer(), where it is allowed to have multiple threads waiting
1400 * Returns: The number of #GstPollFD in @set that have activity or 0 when no
1401 * activity was detected after @timeout. If an error occurs, -1 is returned
1405 gst_poll_wait (GstPoll * set, GstClockTime timeout)
1407 gboolean restarting;
1412 g_return_val_if_fail (set != NULL, -1);
1414 GST_DEBUG ("%p: timeout :%" GST_TIME_FORMAT, set, GST_TIME_ARGS (timeout));
1416 is_timer = set->timer;
1418 /* add one more waiter */
1419 old_waiting = INC_WAITING (set);
1421 /* we cannot wait from multiple threads unless we are a timer */
1422 if (G_UNLIKELY (old_waiting > 0 && !is_timer))
1423 goto already_waiting;
1425 /* flushing, exit immediately */
1426 if (G_UNLIKELY (IS_FLUSHING (set)))
1435 mode = choose_mode (set, timeout);
1437 if (TEST_REBUILD (set)) {
1438 g_mutex_lock (&set->lock);
1440 g_array_set_size (set->active_fds, set->fds->len);
1441 memcpy (set->active_fds->data, set->fds->data,
1442 set->fds->len * sizeof (struct pollfd));
1444 if (!gst_poll_prepare_winsock_active_sets (set))
1447 g_mutex_unlock (&set->lock);
1451 case GST_POLL_MODE_AUTO:
1452 g_assert_not_reached ();
1454 case GST_POLL_MODE_PPOLL:
1458 struct timespec *tsptr;
1460 if (timeout != GST_CLOCK_TIME_NONE) {
1461 GST_TIME_TO_TIMESPEC (timeout, ts);
1468 ppoll ((struct pollfd *) set->active_fds->data,
1469 set->active_fds->len, tsptr, NULL);
1471 g_assert_not_reached ();
1476 case GST_POLL_MODE_POLL:
1481 if (timeout != GST_CLOCK_TIME_NONE) {
1482 t = GST_TIME_AS_MSECONDS (timeout);
1488 poll ((struct pollfd *) set->active_fds->data,
1489 set->active_fds->len, t);
1491 g_assert_not_reached ();
1496 case GST_POLL_MODE_PSELECT:
1497 #ifndef HAVE_PSELECT
1499 g_assert_not_reached ();
1504 case GST_POLL_MODE_SELECT:
1512 max_fd = pollfd_to_fd_set (set, &readfds, &writefds, &errorfds);
1514 if (mode == GST_POLL_MODE_SELECT) {
1516 struct timeval *tvptr;
1518 if (timeout != GST_CLOCK_TIME_NONE) {
1519 GST_TIME_TO_TIMEVAL (timeout, tv);
1525 GST_DEBUG ("%p: Calling select", set);
1526 res = select (max_fd + 1, &readfds, &writefds, &errorfds, tvptr);
1527 GST_DEBUG ("%p: After select, res:%d", set, res);
1531 struct timespec *tsptr;
1533 if (timeout != GST_CLOCK_TIME_NONE) {
1534 GST_TIME_TO_TIMESPEC (timeout, ts);
1540 GST_DEBUG ("%p: Calling pselect", set);
1542 pselect (max_fd + 1, &readfds, &writefds, &errorfds, tsptr, NULL);
1543 GST_DEBUG ("%p: After pselect, res:%d", set, res);
1548 fd_set_to_pollfd (set, &readfds, &writefds, &errorfds);
1550 #else /* G_OS_WIN32 */
1551 g_assert_not_reached ();
1556 case GST_POLL_MODE_WINDOWS:
1559 gint ignore_count = set->active_fds_ignored->len;
1562 if (G_LIKELY (ignore_count == 0)) {
1563 if (timeout != GST_CLOCK_TIME_NONE)
1564 t = GST_TIME_AS_MSECONDS (timeout);
1568 /* already one or more ignored fds, so we quickly sweep the others */
1572 if (set->active_events->len != 0) {
1573 wait_ret = WSAWaitForMultipleEvents (set->active_events->len,
1574 (HANDLE *) set->active_events->data, FALSE, t, FALSE);
1576 wait_ret = WSA_WAIT_FAILED;
1577 WSASetLastError (WSA_INVALID_PARAMETER);
1580 if (ignore_count == 0 && wait_ret == WSA_WAIT_TIMEOUT) {
1582 } else if (wait_ret == WSA_WAIT_FAILED) {
1584 errno = gst_poll_winsock_error_to_errno (WSAGetLastError ());
1586 /* the first entry is the wakeup event */
1587 if (wait_ret - WSA_WAIT_EVENT_0 >= 1) {
1588 res = gst_poll_collect_winsock_events (set);
1590 res = 1; /* wakeup event */
1594 g_assert_not_reached ();
1602 /* Applications needs to clear the control socket themselves for timer
1604 * For other polls, we need to clear the control socket. If there was only
1605 * one socket with activity and it was the control socket, we need to
1607 if (release_all_wakeup (set) > 0 && res == 1)
1611 /* we got woken up and we are flushing, we need to stop */
1612 if (G_UNLIKELY (IS_FLUSHING (set)))
1615 } while (G_UNLIKELY (restarting));
1624 GST_LOG ("%p: we are already waiting", set);
1631 GST_LOG ("%p: we are flushing", set);
1639 GST_LOG ("%p: winsock error", set);
1640 g_mutex_unlock (&set->lock);
1648 * gst_poll_set_controllable:
1650 * @controllable: new controllable state.
1652 * When @controllable is %TRUE, this function ensures that future calls to
1653 * gst_poll_wait() will be affected by gst_poll_restart() and
1654 * gst_poll_set_flushing().
1656 * This function only works for non-timer #GstPoll objects created with
1659 * Returns: %TRUE if the controllability of @set could be updated.
1662 gst_poll_set_controllable (GstPoll * set, gboolean controllable)
1664 g_return_val_if_fail (set != NULL, FALSE);
1665 g_return_val_if_fail (!set->timer, FALSE);
1667 GST_LOG ("%p: controllable : %d", set, controllable);
1669 set->controllable = controllable;
1678 * Restart any gst_poll_wait() that is in progress. This function is typically
1679 * used after adding or removing descriptors to @set.
1681 * If @set is not controllable, then this call will have no effect.
1683 * This function only works for non-timer #GstPoll objects created with
1687 gst_poll_restart (GstPoll * set)
1689 g_return_if_fail (set != NULL);
1690 g_return_if_fail (!set->timer);
1692 if (set->controllable && GET_WAITING (set) > 0) {
1693 /* we are controllable and waiting, wake up the waiter. The socket will be
1694 * cleared by the _wait() thread and the poll will be restarted */
1700 * gst_poll_set_flushing:
1702 * @flushing: new flushing state.
1704 * When @flushing is %TRUE, this function ensures that current and future calls
1705 * to gst_poll_wait() will return -1, with errno set to EBUSY.
1707 * Unsetting the flushing state will restore normal operation of @set.
1709 * This function only works for non-timer #GstPoll objects created with
1713 gst_poll_set_flushing (GstPoll * set, gboolean flushing)
1715 g_return_if_fail (set != NULL);
1716 g_return_if_fail (!set->timer);
1718 GST_LOG ("%p: flushing: %d", set, flushing);
1720 /* update the new state first */
1721 SET_FLUSHING (set, flushing);
1723 if (flushing && set->controllable && GET_WAITING (set) > 0) {
1724 /* we are flushing, controllable and waiting, wake up the waiter. When we
1725 * stop the flushing operation we don't clear the wakeup fd here, this will
1726 * happen in the _wait() thread. */
1732 * gst_poll_write_control:
1735 * Write a byte to the control socket of the controllable @set.
1736 * This function is mostly useful for timer #GstPoll objects created with
1737 * gst_poll_new_timer().
1739 * It will make any current and future gst_poll_wait() function return with
1740 * 1, meaning the control socket is set. After an equal amount of calls to
1741 * gst_poll_read_control() have been performed, calls to gst_poll_wait() will
1742 * block again until their timeout expired.
1744 * This function only works for timer #GstPoll objects created with
1745 * gst_poll_new_timer().
1747 * Returns: %TRUE on success. %FALSE when when the byte could not be written.
1748 * errno contains the detailed error code but will never be EAGAIN, EINTR or
1749 * EWOULDBLOCK. %FALSE always signals a critical error.
1752 gst_poll_write_control (GstPoll * set)
1756 g_return_val_if_fail (set != NULL, FALSE);
1757 g_return_val_if_fail (set->timer, FALSE);
1759 res = raise_wakeup (set);
1765 * gst_poll_read_control:
1768 * Read a byte from the control socket of the controllable @set.
1770 * This function only works for timer #GstPoll objects created with
1771 * gst_poll_new_timer().
1773 * Returns: %TRUE on success. %FALSE when when there was no byte to read or
1774 * reading the byte failed. If there was no byte to read, and only then, errno
1775 * will contain EWOULDBLOCK or EAGAIN. For all other values of errno this always signals a
1779 gst_poll_read_control (GstPoll * set)
1783 g_return_val_if_fail (set != NULL, FALSE);
1784 g_return_val_if_fail (set->timer, FALSE);
1786 res = release_wakeup (set);