2 * Copyright (C) 2006 Edward Hervey <edward@fluendo.com>
3 * Copyright (C) 2007 Jan Schmidt <jan@fluendo.com>
4 * Copyright (C) 2007 Wim Taymans <wim@fluendo.com>
5 * Copyright (C) 2011 Sebastian Dröge <sebastian.droege@collabora.co.uk>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Library General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Library General Public License for more details.
19 * You should have received a copy of the GNU Library General Public
20 * License along with this library; if not, write to the
21 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
22 * Boston, MA 02110-1301, USA.
26 * SECTION:element-multiqueue
27 * @see_also: #GstQueue
31 * Multiqueue is similar to a normal #GstQueue with the following additional
35 * <itemizedlist><title>Multiple streamhandling</title>
37 * The element handles queueing data on more than one stream at once. To
38 * achieve such a feature it has request sink pads (sink%u) and
39 * 'sometimes' src pads (src%u).
41 * When requesting a given sinkpad with gst_element_request_pad(),
42 * the associated srcpad for that stream will be created.
43 * Example: requesting sink1 will generate src1.
48 * <itemizedlist><title>Non-starvation on multiple streams</title>
50 * If more than one stream is used with the element, the streams' queues
51 * will be dynamically grown (up to a limit), in order to ensure that no
52 * stream is risking data starvation. This guarantees that at any given
53 * time there are at least N bytes queued and available for each individual
56 * If an EOS event comes through a srcpad, the associated queue will be
57 * considered as 'not-empty' in the queue-size-growing algorithm.
62 * <itemizedlist><title>Non-linked srcpads graceful handling</title>
64 * In order to better support dynamic switching between streams, the multiqueue
65 * (unlike the current GStreamer queue) continues to push buffers on non-linked
66 * pads rather than shutting down.
68 * In addition, to prevent a non-linked stream from very quickly consuming all
69 * available buffers and thus 'racing ahead' of the other streams, the element
70 * must ensure that buffers and inlined events for a non-linked stream are pushed
71 * in the same order as they were received, relative to the other streams
72 * controlled by the element. This means that a buffer cannot be pushed to a
73 * non-linked pad any sooner than buffers in any other stream which were received
81 * Data is queued until one of the limits specified by the
82 * #GstMultiQueue:max-size-buffers, #GstMultiQueue:max-size-bytes and/or
83 * #GstMultiQueue:max-size-time properties has been reached. Any attempt to push
84 * more buffers into the queue will block the pushing thread until more space
85 * becomes available. #GstMultiQueue:extra-size-buffers,
88 * #GstMultiQueue:extra-size-bytes and #GstMultiQueue:extra-size-time are
92 * The default queue size limits are 5 buffers, 10MB of data, or
93 * two second worth of data, whichever is reached first. Note that the number
94 * of buffers will dynamically grow depending on the fill level of
98 * The #GstMultiQueue::underrun signal is emitted when all of the queues
99 * are empty. The #GstMultiQueue::overrun signal is emitted when one of the
101 * Both signals are emitted from the context of the streaming thread.
112 #include "gstmultiqueue.h"
113 #include <gst/glib-compat-private.h>
117 * @sinkpad: associated sink #GstPad
118 * @srcpad: associated source #GstPad
120 * Structure containing all information and properties about
123 typedef struct _GstSingleQueue GstSingleQueue;
125 struct _GstSingleQueue
127 /* unique identifier of the queue */
130 GstMultiQueue *mqueue;
135 /* flowreturn of previous srcpad push */
136 GstFlowReturn srcresult;
137 /* If something was actually pushed on
138 * this pad after flushing/pad activation
139 * and the srcresult corresponds to something
145 GstSegment sink_segment;
146 GstSegment src_segment;
147 gboolean has_src_segment; /* preferred over initializing the src_segment to
148 * UNDEFINED as this doesn't requires adding ifs
149 * in every segment usage */
151 /* position of src/sink */
152 GstClockTimeDiff sinktime, srctime;
153 /* cached input value, used for interleave */
154 GstClockTimeDiff cached_sinktime;
155 /* TRUE if either position needs to be recalculated */
156 gboolean sink_tainted, src_tainted;
160 GstDataQueueSize max_size, extra_size;
161 GstClockTime cur_time;
167 /* Protected by global lock */
168 guint32 nextid; /* ID of the next object waiting to be pushed */
169 guint32 oldid; /* ID of the last object pushed (last in a series) */
170 guint32 last_oldid; /* Previously observed old_id, reset to MAXUINT32 on flush */
171 GstClockTimeDiff next_time; /* End running time of next buffer to be pushed */
172 GstClockTimeDiff last_time; /* Start running time of last pushed buffer */
173 GCond turn; /* SingleQueue turn waiting conditional */
175 /* for serialized queries */
178 GstQuery *last_handled_query;
180 /* For interleave calculation */
185 /* Extension of GstDataQueueItem structure for our usage */
186 typedef struct _GstMultiQueueItem GstMultiQueueItem;
188 struct _GstMultiQueueItem
190 GstMiniObject *object;
195 GDestroyNotify destroy;
201 static GstSingleQueue *gst_single_queue_new (GstMultiQueue * mqueue, guint id);
202 static void gst_single_queue_free (GstSingleQueue * squeue);
204 static void wake_up_next_non_linked (GstMultiQueue * mq);
205 static void compute_high_id (GstMultiQueue * mq);
206 static void compute_high_time (GstMultiQueue * mq);
207 static void single_queue_overrun_cb (GstDataQueue * dq, GstSingleQueue * sq);
208 static void single_queue_underrun_cb (GstDataQueue * dq, GstSingleQueue * sq);
210 static void update_buffering (GstMultiQueue * mq, GstSingleQueue * sq);
211 static void gst_multi_queue_post_buffering (GstMultiQueue * mq);
212 static void recheck_buffering_status (GstMultiQueue * mq);
214 static void gst_single_queue_flush_queue (GstSingleQueue * sq, gboolean full);
216 static GstStaticPadTemplate sinktemplate = GST_STATIC_PAD_TEMPLATE ("sink_%u",
219 GST_STATIC_CAPS_ANY);
221 static GstStaticPadTemplate srctemplate = GST_STATIC_PAD_TEMPLATE ("src_%u",
224 GST_STATIC_CAPS_ANY);
226 GST_DEBUG_CATEGORY_STATIC (multi_queue_debug);
227 #define GST_CAT_DEFAULT (multi_queue_debug)
229 /* Signals and args */
237 /* default limits, we try to keep up to 2 seconds of data and if there is not
238 * time, up to 10 MB. The number of buffers is dynamically scaled to make sure
239 * there is data in the queues. Normally, the byte and time limits are not hit
240 * in theses conditions. */
241 #define DEFAULT_MAX_SIZE_BYTES 10 * 1024 * 1024 /* 10 MB */
242 #define DEFAULT_MAX_SIZE_BUFFERS 5
243 #define DEFAULT_MAX_SIZE_TIME 2 * GST_SECOND
245 /* second limits. When we hit one of the above limits we are probably dealing
246 * with a badly muxed file and we scale the limits to these emergency values.
247 * This is currently not yet implemented.
248 * Since we dynamically scale the queue buffer size up to the limits but avoid
249 * going above the max-size-buffers when we can, we don't really need this
250 * aditional extra size. */
251 #define DEFAULT_EXTRA_SIZE_BYTES 10 * 1024 * 1024 /* 10 MB */
252 #define DEFAULT_EXTRA_SIZE_BUFFERS 5
253 #define DEFAULT_EXTRA_SIZE_TIME 3 * GST_SECOND
255 #define DEFAULT_USE_BUFFERING FALSE
256 #define DEFAULT_LOW_PERCENT 10
257 #define DEFAULT_HIGH_PERCENT 99
258 #define DEFAULT_SYNC_BY_RUNNING_TIME FALSE
259 #define DEFAULT_USE_INTERLEAVE FALSE
260 #define DEFAULT_UNLINKED_CACHE_TIME 250 * GST_MSECOND
265 PROP_EXTRA_SIZE_BYTES,
266 PROP_EXTRA_SIZE_BUFFERS,
267 PROP_EXTRA_SIZE_TIME,
269 PROP_MAX_SIZE_BUFFERS,
274 PROP_SYNC_BY_RUNNING_TIME,
276 PROP_UNLINKED_CACHE_TIME,
280 #define GST_MULTI_QUEUE_MUTEX_LOCK(q) G_STMT_START { \
281 g_mutex_lock (&q->qlock); \
284 #define GST_MULTI_QUEUE_MUTEX_UNLOCK(q) G_STMT_START { \
285 g_mutex_unlock (&q->qlock); \
288 #define SET_PERCENT(mq, perc) G_STMT_START { \
289 if (perc != mq->percent) { \
290 mq->percent = perc; \
291 mq->percent_changed = TRUE; \
292 GST_DEBUG_OBJECT (mq, "buffering %d percent", perc); \
296 /* Convenience function */
297 static inline GstClockTimeDiff
298 my_segment_to_running_time (GstSegment * segment, GstClockTime val)
300 GstClockTimeDiff res = GST_CLOCK_STIME_NONE;
302 if (GST_CLOCK_TIME_IS_VALID (val)) {
304 gst_segment_to_running_time_full (segment, GST_FORMAT_TIME, val, &val);
313 static void gst_multi_queue_finalize (GObject * object);
314 static void gst_multi_queue_set_property (GObject * object,
315 guint prop_id, const GValue * value, GParamSpec * pspec);
316 static void gst_multi_queue_get_property (GObject * object,
317 guint prop_id, GValue * value, GParamSpec * pspec);
319 static GstPad *gst_multi_queue_request_new_pad (GstElement * element,
320 GstPadTemplate * temp, const gchar * name, const GstCaps * caps);
321 static void gst_multi_queue_release_pad (GstElement * element, GstPad * pad);
322 static GstStateChangeReturn gst_multi_queue_change_state (GstElement *
323 element, GstStateChange transition);
325 static void gst_multi_queue_loop (GstPad * pad);
328 GST_DEBUG_CATEGORY_INIT (multi_queue_debug, "multiqueue", 0, "multiqueue element");
329 #define gst_multi_queue_parent_class parent_class
330 G_DEFINE_TYPE_WITH_CODE (GstMultiQueue, gst_multi_queue, GST_TYPE_ELEMENT,
333 static guint gst_multi_queue_signals[LAST_SIGNAL] = { 0 };
336 gst_multi_queue_class_init (GstMultiQueueClass * klass)
338 GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
339 GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
341 gobject_class->set_property = gst_multi_queue_set_property;
342 gobject_class->get_property = gst_multi_queue_get_property;
347 * GstMultiQueue::underrun:
348 * @multiqueue: the multiqueue instance
350 * This signal is emitted from the streaming thread when there is
351 * no data in any of the queues inside the multiqueue instance (underrun).
353 * This indicates either starvation or EOS from the upstream data sources.
355 gst_multi_queue_signals[SIGNAL_UNDERRUN] =
356 g_signal_new ("underrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
357 G_STRUCT_OFFSET (GstMultiQueueClass, underrun), NULL, NULL,
358 g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0);
361 * GstMultiQueue::overrun:
362 * @multiqueue: the multiqueue instance
364 * Reports that one of the queues in the multiqueue is full (overrun).
365 * A queue is full if the total amount of data inside it (num-buffers, time,
366 * size) is higher than the boundary values which can be set through the
367 * GObject properties.
369 * This can be used as an indicator of pre-roll.
371 gst_multi_queue_signals[SIGNAL_OVERRUN] =
372 g_signal_new ("overrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
373 G_STRUCT_OFFSET (GstMultiQueueClass, overrun), NULL, NULL,
374 g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0);
378 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_BYTES,
379 g_param_spec_uint ("max-size-bytes", "Max. size (kB)",
380 "Max. amount of data in the queue (bytes, 0=disable)",
381 0, G_MAXUINT, DEFAULT_MAX_SIZE_BYTES,
382 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
383 G_PARAM_STATIC_STRINGS));
384 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_BUFFERS,
385 g_param_spec_uint ("max-size-buffers", "Max. size (buffers)",
386 "Max. number of buffers in the queue (0=disable)", 0, G_MAXUINT,
387 DEFAULT_MAX_SIZE_BUFFERS,
388 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
389 G_PARAM_STATIC_STRINGS));
390 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_TIME,
391 g_param_spec_uint64 ("max-size-time", "Max. size (ns)",
392 "Max. amount of data in the queue (in ns, 0=disable)", 0, G_MAXUINT64,
393 DEFAULT_MAX_SIZE_TIME, G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
394 G_PARAM_STATIC_STRINGS));
396 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_BYTES,
397 g_param_spec_uint ("extra-size-bytes", "Extra Size (kB)",
398 "Amount of data the queues can grow if one of them is empty (bytes, 0=disable)"
399 " (NOT IMPLEMENTED)",
400 0, G_MAXUINT, DEFAULT_EXTRA_SIZE_BYTES,
401 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
402 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_BUFFERS,
403 g_param_spec_uint ("extra-size-buffers", "Extra Size (buffers)",
404 "Amount of buffers the queues can grow if one of them is empty (0=disable)"
405 " (NOT IMPLEMENTED)",
406 0, G_MAXUINT, DEFAULT_EXTRA_SIZE_BUFFERS,
407 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
408 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_TIME,
409 g_param_spec_uint64 ("extra-size-time", "Extra Size (ns)",
410 "Amount of time the queues can grow if one of them is empty (in ns, 0=disable)"
411 " (NOT IMPLEMENTED)",
412 0, G_MAXUINT64, DEFAULT_EXTRA_SIZE_TIME,
413 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
416 * GstMultiQueue:use-buffering
418 * Enable the buffering option in multiqueue so that BUFFERING messages are
419 * emitted based on low-/high-percent thresholds.
421 g_object_class_install_property (gobject_class, PROP_USE_BUFFERING,
422 g_param_spec_boolean ("use-buffering", "Use buffering",
423 "Emit GST_MESSAGE_BUFFERING based on low-/high-percent thresholds",
424 DEFAULT_USE_BUFFERING, G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
425 G_PARAM_STATIC_STRINGS));
427 * GstMultiQueue:low-percent
429 * Low threshold percent for buffering to start.
431 g_object_class_install_property (gobject_class, PROP_LOW_PERCENT,
432 g_param_spec_int ("low-percent", "Low percent",
433 "Low threshold for buffering to start", 0, 100,
434 DEFAULT_LOW_PERCENT, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
436 * GstMultiQueue:high-percent
438 * High threshold percent for buffering to finish.
440 g_object_class_install_property (gobject_class, PROP_HIGH_PERCENT,
441 g_param_spec_int ("high-percent", "High percent",
442 "High threshold for buffering to finish", 0, 100,
443 DEFAULT_HIGH_PERCENT, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
446 * GstMultiQueue:sync-by-running-time
448 * If enabled multiqueue will synchronize deactivated or not-linked streams
449 * to the activated and linked streams by taking the running time.
450 * Otherwise multiqueue will synchronize the deactivated or not-linked
451 * streams by keeping the order in which buffers and events arrived compared
452 * to active and linked streams.
454 g_object_class_install_property (gobject_class, PROP_SYNC_BY_RUNNING_TIME,
455 g_param_spec_boolean ("sync-by-running-time", "Sync By Running Time",
456 "Synchronize deactivated or not-linked streams by running time",
457 DEFAULT_SYNC_BY_RUNNING_TIME,
458 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
460 g_object_class_install_property (gobject_class, PROP_USE_INTERLEAVE,
461 g_param_spec_boolean ("use-interleave", "Use interleave",
462 "Adjust time limits based on input interleave",
463 DEFAULT_USE_INTERLEAVE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
465 g_object_class_install_property (gobject_class, PROP_UNLINKED_CACHE_TIME,
466 g_param_spec_uint64 ("unlinked-cache-time", "Unlinked cache time (ns)",
467 "Extra buffering in time for unlinked streams (if 'sync-by-running-time')",
468 0, G_MAXUINT64, DEFAULT_UNLINKED_CACHE_TIME,
469 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
470 G_PARAM_STATIC_STRINGS));
473 gobject_class->finalize = gst_multi_queue_finalize;
475 gst_element_class_set_static_metadata (gstelement_class,
477 "Generic", "Multiple data queue", "Edward Hervey <edward@fluendo.com>");
478 gst_element_class_add_static_pad_template (gstelement_class, &sinktemplate);
479 gst_element_class_add_static_pad_template (gstelement_class, &srctemplate);
481 gstelement_class->request_new_pad =
482 GST_DEBUG_FUNCPTR (gst_multi_queue_request_new_pad);
483 gstelement_class->release_pad =
484 GST_DEBUG_FUNCPTR (gst_multi_queue_release_pad);
485 gstelement_class->change_state =
486 GST_DEBUG_FUNCPTR (gst_multi_queue_change_state);
490 gst_multi_queue_init (GstMultiQueue * mqueue)
492 mqueue->nbqueues = 0;
493 mqueue->queues = NULL;
495 mqueue->max_size.bytes = DEFAULT_MAX_SIZE_BYTES;
496 mqueue->max_size.visible = DEFAULT_MAX_SIZE_BUFFERS;
497 mqueue->max_size.time = DEFAULT_MAX_SIZE_TIME;
499 mqueue->extra_size.bytes = DEFAULT_EXTRA_SIZE_BYTES;
500 mqueue->extra_size.visible = DEFAULT_EXTRA_SIZE_BUFFERS;
501 mqueue->extra_size.time = DEFAULT_EXTRA_SIZE_TIME;
503 mqueue->use_buffering = DEFAULT_USE_BUFFERING;
504 mqueue->low_percent = DEFAULT_LOW_PERCENT;
505 mqueue->high_percent = DEFAULT_HIGH_PERCENT;
507 mqueue->sync_by_running_time = DEFAULT_SYNC_BY_RUNNING_TIME;
508 mqueue->use_interleave = DEFAULT_USE_INTERLEAVE;
509 mqueue->unlinked_cache_time = DEFAULT_UNLINKED_CACHE_TIME;
513 mqueue->high_time = GST_CLOCK_STIME_NONE;
515 g_mutex_init (&mqueue->qlock);
516 g_mutex_init (&mqueue->buffering_post_lock);
520 gst_multi_queue_finalize (GObject * object)
522 GstMultiQueue *mqueue = GST_MULTI_QUEUE (object);
524 g_list_foreach (mqueue->queues, (GFunc) gst_single_queue_free, NULL);
525 g_list_free (mqueue->queues);
526 mqueue->queues = NULL;
527 mqueue->queues_cookie++;
529 /* free/unref instance data */
530 g_mutex_clear (&mqueue->qlock);
531 g_mutex_clear (&mqueue->buffering_post_lock);
533 G_OBJECT_CLASS (parent_class)->finalize (object);
536 #define SET_CHILD_PROPERTY(mq,format) G_STMT_START { \
537 GList * tmp = mq->queues; \
539 GstSingleQueue *q = (GstSingleQueue*)tmp->data; \
540 q->max_size.format = mq->max_size.format; \
541 update_buffering (mq, q); \
542 gst_data_queue_limits_changed (q->queue); \
543 tmp = g_list_next(tmp); \
548 gst_multi_queue_set_property (GObject * object, guint prop_id,
549 const GValue * value, GParamSpec * pspec)
551 GstMultiQueue *mq = GST_MULTI_QUEUE (object);
554 case PROP_MAX_SIZE_BYTES:
555 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
556 mq->max_size.bytes = g_value_get_uint (value);
557 SET_CHILD_PROPERTY (mq, bytes);
558 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
559 gst_multi_queue_post_buffering (mq);
561 case PROP_MAX_SIZE_BUFFERS:
564 gint new_size = g_value_get_uint (value);
566 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
568 mq->max_size.visible = new_size;
572 GstDataQueueSize size;
573 GstSingleQueue *q = (GstSingleQueue *) tmp->data;
574 gst_data_queue_get_level (q->queue, &size);
576 GST_DEBUG_OBJECT (mq, "Queue %d: Requested buffers size: %d,"
577 " current: %d, current max %d", q->id, new_size, size.visible,
578 q->max_size.visible);
580 /* do not reduce max size below current level if the single queue
581 * has grown because of empty queue */
583 q->max_size.visible = new_size;
584 } else if (q->max_size.visible == 0) {
585 q->max_size.visible = MAX (new_size, size.visible);
586 } else if (new_size > size.visible) {
587 q->max_size.visible = new_size;
589 update_buffering (mq, q);
590 gst_data_queue_limits_changed (q->queue);
591 tmp = g_list_next (tmp);
594 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
595 gst_multi_queue_post_buffering (mq);
599 case PROP_MAX_SIZE_TIME:
600 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
601 mq->max_size.time = g_value_get_uint64 (value);
602 SET_CHILD_PROPERTY (mq, time);
603 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
604 gst_multi_queue_post_buffering (mq);
606 case PROP_EXTRA_SIZE_BYTES:
607 mq->extra_size.bytes = g_value_get_uint (value);
609 case PROP_EXTRA_SIZE_BUFFERS:
610 mq->extra_size.visible = g_value_get_uint (value);
612 case PROP_EXTRA_SIZE_TIME:
613 mq->extra_size.time = g_value_get_uint64 (value);
615 case PROP_USE_BUFFERING:
616 mq->use_buffering = g_value_get_boolean (value);
617 recheck_buffering_status (mq);
619 case PROP_LOW_PERCENT:
620 mq->low_percent = g_value_get_int (value);
621 /* Recheck buffering status - the new low-percent value might
622 * be above the current fill level. If the old low-percent one
623 * was below the current level, this means that mq->buffering is
624 * disabled and needs to be re-enabled. */
625 recheck_buffering_status (mq);
627 case PROP_HIGH_PERCENT:
628 mq->high_percent = g_value_get_int (value);
629 recheck_buffering_status (mq);
631 case PROP_SYNC_BY_RUNNING_TIME:
632 mq->sync_by_running_time = g_value_get_boolean (value);
634 case PROP_USE_INTERLEAVE:
635 mq->use_interleave = g_value_get_boolean (value);
637 case PROP_UNLINKED_CACHE_TIME:
638 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
639 mq->unlinked_cache_time = g_value_get_uint64 (value);
640 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
641 gst_multi_queue_post_buffering (mq);
644 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
650 gst_multi_queue_get_property (GObject * object, guint prop_id,
651 GValue * value, GParamSpec * pspec)
653 GstMultiQueue *mq = GST_MULTI_QUEUE (object);
655 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
658 case PROP_EXTRA_SIZE_BYTES:
659 g_value_set_uint (value, mq->extra_size.bytes);
661 case PROP_EXTRA_SIZE_BUFFERS:
662 g_value_set_uint (value, mq->extra_size.visible);
664 case PROP_EXTRA_SIZE_TIME:
665 g_value_set_uint64 (value, mq->extra_size.time);
667 case PROP_MAX_SIZE_BYTES:
668 g_value_set_uint (value, mq->max_size.bytes);
670 case PROP_MAX_SIZE_BUFFERS:
671 g_value_set_uint (value, mq->max_size.visible);
673 case PROP_MAX_SIZE_TIME:
674 g_value_set_uint64 (value, mq->max_size.time);
676 case PROP_USE_BUFFERING:
677 g_value_set_boolean (value, mq->use_buffering);
679 case PROP_LOW_PERCENT:
680 g_value_set_int (value, mq->low_percent);
682 case PROP_HIGH_PERCENT:
683 g_value_set_int (value, mq->high_percent);
685 case PROP_SYNC_BY_RUNNING_TIME:
686 g_value_set_boolean (value, mq->sync_by_running_time);
688 case PROP_USE_INTERLEAVE:
689 g_value_set_boolean (value, mq->use_interleave);
691 case PROP_UNLINKED_CACHE_TIME:
692 g_value_set_uint64 (value, mq->unlinked_cache_time);
695 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
699 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
703 gst_multi_queue_iterate_internal_links (GstPad * pad, GstObject * parent)
705 GstIterator *it = NULL;
707 GstSingleQueue *squeue;
708 GstMultiQueue *mq = GST_MULTI_QUEUE (parent);
711 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
712 squeue = gst_pad_get_element_private (pad);
716 if (squeue->sinkpad == pad)
717 opad = gst_object_ref (squeue->srcpad);
718 else if (squeue->srcpad == pad)
719 opad = gst_object_ref (squeue->sinkpad);
723 g_value_init (&val, GST_TYPE_PAD);
724 g_value_set_object (&val, opad);
725 it = gst_iterator_new_single (GST_TYPE_PAD, &val);
726 g_value_unset (&val);
728 gst_object_unref (opad);
731 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
742 gst_multi_queue_request_new_pad (GstElement * element, GstPadTemplate * temp,
743 const gchar * name, const GstCaps * caps)
745 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
746 GstSingleQueue *squeue;
751 sscanf (name + 4, "_%u", &temp_id);
752 GST_LOG_OBJECT (element, "name : %s (id %d)", GST_STR_NULL (name), temp_id);
755 /* Create a new single queue, add the sink and source pad and return the sink pad */
756 squeue = gst_single_queue_new (mqueue, temp_id);
758 new_pad = squeue ? squeue->sinkpad : NULL;
760 GST_DEBUG_OBJECT (mqueue, "Returning pad %" GST_PTR_FORMAT, new_pad);
766 gst_multi_queue_release_pad (GstElement * element, GstPad * pad)
768 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
769 GstSingleQueue *sq = NULL;
772 GST_LOG_OBJECT (element, "pad %s:%s", GST_DEBUG_PAD_NAME (pad));
774 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
775 /* Find which single queue it belongs to, knowing that it should be a sinkpad */
776 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
777 sq = (GstSingleQueue *) tmp->data;
779 if (sq->sinkpad == pad)
784 GST_WARNING_OBJECT (mqueue, "That pad doesn't belong to this element ???");
785 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
789 /* FIXME: The removal of the singlequeue should probably not happen until it
790 * finishes draining */
792 /* remove it from the list */
793 mqueue->queues = g_list_delete_link (mqueue->queues, tmp);
794 mqueue->queues_cookie++;
796 /* FIXME : recompute next-non-linked */
797 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
799 /* delete SingleQueue */
800 gst_data_queue_set_flushing (sq->queue, TRUE);
802 gst_pad_set_active (sq->srcpad, FALSE);
803 gst_pad_set_active (sq->sinkpad, FALSE);
804 gst_pad_set_element_private (sq->srcpad, NULL);
805 gst_pad_set_element_private (sq->sinkpad, NULL);
806 gst_element_remove_pad (element, sq->srcpad);
807 gst_element_remove_pad (element, sq->sinkpad);
808 gst_single_queue_free (sq);
811 static GstStateChangeReturn
812 gst_multi_queue_change_state (GstElement * element, GstStateChange transition)
814 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
815 GstSingleQueue *sq = NULL;
816 GstStateChangeReturn result;
818 switch (transition) {
819 case GST_STATE_CHANGE_READY_TO_PAUSED:{
822 /* Set all pads to non-flushing */
823 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
824 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
825 sq = (GstSingleQueue *) tmp->data;
826 sq->flushing = FALSE;
829 /* the visible limit might not have been set on single queues that have grown because of other queueus were empty */
830 SET_CHILD_PROPERTY (mqueue, visible);
832 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
833 gst_multi_queue_post_buffering (mqueue);
837 case GST_STATE_CHANGE_PAUSED_TO_READY:{
840 /* Un-wait all waiting pads */
841 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
842 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
843 sq = (GstSingleQueue *) tmp->data;
845 g_cond_signal (&sq->turn);
847 sq->last_query = FALSE;
848 g_cond_signal (&sq->query_handled);
850 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
857 result = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
859 switch (transition) {
868 gst_single_queue_flush (GstMultiQueue * mq, GstSingleQueue * sq, gboolean flush,
873 GST_DEBUG_OBJECT (mq, "flush %s queue %d", (flush ? "start" : "stop"),
877 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
878 sq->srcresult = GST_FLOW_FLUSHING;
879 gst_data_queue_set_flushing (sq->queue, TRUE);
883 /* wake up non-linked task */
884 GST_LOG_OBJECT (mq, "SingleQueue %d : waking up eventually waiting task",
886 g_cond_signal (&sq->turn);
887 sq->last_query = FALSE;
888 g_cond_signal (&sq->query_handled);
889 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
891 GST_LOG_OBJECT (mq, "SingleQueue %d : pausing task", sq->id);
892 result = gst_pad_pause_task (sq->srcpad);
893 sq->sink_tainted = sq->src_tainted = TRUE;
895 gst_single_queue_flush_queue (sq, full);
897 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
898 gst_segment_init (&sq->sink_segment, GST_FORMAT_TIME);
899 gst_segment_init (&sq->src_segment, GST_FORMAT_TIME);
900 sq->has_src_segment = FALSE;
901 /* All pads start off not-linked for a smooth kick-off */
902 sq->srcresult = GST_FLOW_OK;
905 sq->max_size.visible = mq->max_size.visible;
909 sq->last_oldid = G_MAXUINT32;
910 sq->next_time = GST_CLOCK_STIME_NONE;
911 sq->last_time = GST_CLOCK_STIME_NONE;
912 sq->cached_sinktime = GST_CLOCK_STIME_NONE;
913 gst_data_queue_set_flushing (sq->queue, FALSE);
915 /* Reset high time to be recomputed next */
916 mq->high_time = GST_CLOCK_STIME_NONE;
918 sq->flushing = FALSE;
919 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
921 GST_LOG_OBJECT (mq, "SingleQueue %d : starting task", sq->id);
923 gst_pad_start_task (sq->srcpad, (GstTaskFunction) gst_multi_queue_loop,
929 /* WITH LOCK TAKEN */
931 get_percentage (GstSingleQueue * sq)
933 GstDataQueueSize size;
936 gst_data_queue_get_level (sq->queue, &size);
938 GST_DEBUG_OBJECT (sq->mqueue,
939 "queue %d: visible %u/%u, bytes %u/%u, time %" G_GUINT64_FORMAT "/%"
940 G_GUINT64_FORMAT, sq->id, size.visible, sq->max_size.visible,
941 size.bytes, sq->max_size.bytes, sq->cur_time, sq->max_size.time);
943 /* get bytes and time percentages and take the max */
944 if (sq->is_eos || sq->srcresult == GST_FLOW_NOT_LINKED || sq->is_sparse) {
948 if (sq->max_size.time > 0) {
949 tmp = (sq->cur_time * 100) / sq->max_size.time;
950 percent = MAX (percent, tmp);
952 if (sq->max_size.bytes > 0) {
953 tmp = (size.bytes * 100) / sq->max_size.bytes;
954 percent = MAX (percent, tmp);
961 /* WITH LOCK TAKEN */
963 update_buffering (GstMultiQueue * mq, GstSingleQueue * sq)
967 /* nothing to dowhen we are not in buffering mode */
968 if (!mq->use_buffering)
971 percent = get_percentage (sq);
974 if (percent >= mq->high_percent) {
975 mq->buffering = FALSE;
977 /* make sure it increases */
978 percent = MAX (mq->percent, percent);
980 SET_PERCENT (mq, percent);
983 gboolean is_buffering = TRUE;
985 for (iter = mq->queues; iter; iter = g_list_next (iter)) {
986 GstSingleQueue *oq = (GstSingleQueue *) iter->data;
988 if (get_percentage (oq) >= mq->high_percent) {
989 is_buffering = FALSE;
995 if (is_buffering && percent < mq->low_percent) {
996 mq->buffering = TRUE;
997 SET_PERCENT (mq, percent);
1003 gst_multi_queue_post_buffering (GstMultiQueue * mq)
1005 GstMessage *msg = NULL;
1007 g_mutex_lock (&mq->buffering_post_lock);
1008 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1009 if (mq->percent_changed) {
1010 gint percent = mq->percent;
1012 mq->percent_changed = FALSE;
1014 percent = percent * 100 / mq->high_percent;
1019 GST_DEBUG_OBJECT (mq, "Going to post buffering: %d%%", percent);
1020 msg = gst_message_new_buffering (GST_OBJECT_CAST (mq), percent);
1022 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1025 gst_element_post_message (GST_ELEMENT_CAST (mq), msg);
1027 g_mutex_unlock (&mq->buffering_post_lock);
1031 recheck_buffering_status (GstMultiQueue * mq)
1033 if (!mq->use_buffering && mq->buffering) {
1034 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1035 mq->buffering = FALSE;
1036 GST_DEBUG_OBJECT (mq,
1037 "Buffering property disabled, but queue was still buffering; setting percentage to 100%%");
1038 SET_PERCENT (mq, 100);
1039 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1042 if (mq->use_buffering) {
1046 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1048 /* force fill level percentage to be recalculated */
1049 old_perc = mq->percent;
1054 GstSingleQueue *q = (GstSingleQueue *) tmp->data;
1055 update_buffering (mq, q);
1056 gst_data_queue_limits_changed (q->queue);
1057 tmp = g_list_next (tmp);
1060 GST_DEBUG_OBJECT (mq, "Recalculated fill level: old: %d%% new: %d%%",
1061 old_perc, mq->percent);
1063 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1066 gst_multi_queue_post_buffering (mq);
1070 calculate_interleave (GstMultiQueue * mq)
1072 GstClockTimeDiff low, high;
1073 GstClockTime interleave;
1076 low = high = GST_CLOCK_STIME_NONE;
1077 interleave = mq->interleave;
1078 /* Go over all single queues and calculate lowest/highest value */
1079 for (tmp = mq->queues; tmp; tmp = tmp->next) {
1080 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
1081 /* Ignore sparse streams for interleave calculation */
1084 /* If a stream is not active yet (hasn't received any buffers), set
1085 * a maximum interleave to allow it to receive more data */
1088 "queue %d is not active yet, forcing interleave to 5s", sq->id);
1089 mq->interleave = 5 * GST_SECOND;
1090 /* Update max-size time */
1091 mq->max_size.time = mq->interleave;
1092 SET_CHILD_PROPERTY (mq, time);
1095 if (GST_CLOCK_STIME_IS_VALID (sq->cached_sinktime)) {
1096 if (low == GST_CLOCK_STIME_NONE || sq->cached_sinktime < low)
1097 low = sq->cached_sinktime;
1098 if (high == GST_CLOCK_STIME_NONE || sq->cached_sinktime > high)
1099 high = sq->cached_sinktime;
1102 "queue %d , sinktime:%" GST_STIME_FORMAT " low:%" GST_STIME_FORMAT
1103 " high:%" GST_STIME_FORMAT, sq->id,
1104 GST_STIME_ARGS (sq->cached_sinktime), GST_STIME_ARGS (low),
1105 GST_STIME_ARGS (high));
1108 if (GST_CLOCK_STIME_IS_VALID (low) && GST_CLOCK_STIME_IS_VALID (high)) {
1109 interleave = high - low;
1110 /* Padding of interleave and minimum value */
1111 /* FIXME : Make the minimum time interleave a property */
1112 interleave = (150 * interleave / 100) + 250 * GST_MSECOND;
1114 /* Update the stored interleave if:
1115 * * No data has arrived yet (high == low)
1116 * * Or it went higher
1117 * * Or it went lower and we've gone past the previous interleave needed */
1118 if (high == low || interleave > mq->interleave ||
1119 ((mq->last_interleave_update + (2 * MIN (GST_SECOND,
1120 mq->interleave)) < low)
1121 && interleave < (mq->interleave * 3 / 4))) {
1122 /* Update the interleave */
1123 mq->interleave = interleave;
1124 mq->last_interleave_update = high;
1125 /* Update max-size time */
1126 mq->max_size.time = mq->interleave;
1127 SET_CHILD_PROPERTY (mq, time);
1132 GST_DEBUG_OBJECT (mq,
1133 "low:%" GST_STIME_FORMAT " high:%" GST_STIME_FORMAT " interleave:%"
1134 GST_TIME_FORMAT " mq->interleave:%" GST_TIME_FORMAT
1135 " last_interleave_update:%" GST_STIME_FORMAT, GST_STIME_ARGS (low),
1136 GST_STIME_ARGS (high), GST_TIME_ARGS (interleave),
1137 GST_TIME_ARGS (mq->interleave),
1138 GST_STIME_ARGS (mq->last_interleave_update));
1142 /* calculate the diff between running time on the sink and src of the queue.
1143 * This is the total amount of time in the queue.
1144 * WITH LOCK TAKEN */
1146 update_time_level (GstMultiQueue * mq, GstSingleQueue * sq)
1148 GstClockTimeDiff sink_time, src_time;
1150 if (sq->sink_tainted) {
1151 sink_time = sq->sinktime = my_segment_to_running_time (&sq->sink_segment,
1152 sq->sink_segment.position);
1154 GST_DEBUG_OBJECT (mq,
1155 "queue %d sink_segment.position:%" GST_TIME_FORMAT ", sink_time:%"
1156 GST_STIME_FORMAT, sq->id, GST_TIME_ARGS (sq->sink_segment.position),
1157 GST_STIME_ARGS (sink_time));
1159 if (G_UNLIKELY (sq->last_time == GST_CLOCK_STIME_NONE)) {
1160 /* If the single queue still doesn't have a last_time set, this means
1161 * that nothing has been pushed out yet.
1162 * In order for the high_time computation to be as efficient as possible,
1163 * we set the last_time */
1164 sq->last_time = sink_time;
1166 if (G_UNLIKELY (sink_time != GST_CLOCK_STIME_NONE)) {
1167 /* if we have a time, we become untainted and use the time */
1168 sq->sink_tainted = FALSE;
1169 if (mq->use_interleave) {
1170 sq->cached_sinktime = sink_time;
1171 calculate_interleave (mq);
1175 sink_time = sq->sinktime;
1177 if (sq->src_tainted) {
1178 GstSegment *segment;
1181 if (sq->has_src_segment) {
1182 segment = &sq->src_segment;
1183 position = sq->src_segment.position;
1186 * If the src pad had no segment yet, use the sink segment
1187 * to avoid signalling overrun if the received sink segment has a
1188 * a position > max-size-time while the src pad time would be the default=0
1190 * This can happen when switching pads on chained/adaptive streams and the
1191 * new chain has a segment with a much larger position
1193 segment = &sq->sink_segment;
1194 position = sq->sink_segment.position;
1197 src_time = sq->srctime = my_segment_to_running_time (segment, position);
1198 /* if we have a time, we become untainted and use the time */
1199 if (G_UNLIKELY (src_time != GST_CLOCK_STIME_NONE)) {
1200 sq->src_tainted = FALSE;
1203 src_time = sq->srctime;
1205 GST_DEBUG_OBJECT (mq,
1206 "queue %d, sink %" GST_STIME_FORMAT ", src %" GST_STIME_FORMAT, sq->id,
1207 GST_STIME_ARGS (sink_time), GST_STIME_ARGS (src_time));
1209 /* This allows for streams with out of order timestamping - sometimes the
1210 * emerging timestamp is later than the arriving one(s) */
1211 if (G_LIKELY (GST_CLOCK_STIME_IS_VALID (sink_time) &&
1212 GST_CLOCK_STIME_IS_VALID (src_time) && sink_time > src_time))
1213 sq->cur_time = sink_time - src_time;
1217 /* updating the time level can change the buffering state */
1218 update_buffering (mq, sq);
1223 /* take a SEGMENT event and apply the values to segment, updating the time
1224 * level of queue. */
1226 apply_segment (GstMultiQueue * mq, GstSingleQueue * sq, GstEvent * event,
1227 GstSegment * segment)
1229 gst_event_copy_segment (event, segment);
1231 /* now configure the values, we use these to track timestamps on the
1233 if (segment->format != GST_FORMAT_TIME) {
1234 /* non-time format, pretent the current time segment is closed with a
1235 * 0 start and unknown stop time. */
1236 segment->format = GST_FORMAT_TIME;
1241 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1243 /* Make sure we have a valid initial segment position (and not garbage
1245 if (segment->rate > 0.0)
1246 segment->position = segment->start;
1248 segment->position = segment->stop;
1249 if (segment == &sq->sink_segment)
1250 sq->sink_tainted = TRUE;
1252 sq->has_src_segment = TRUE;
1253 sq->src_tainted = TRUE;
1256 GST_DEBUG_OBJECT (mq,
1257 "queue %d, configured SEGMENT %" GST_SEGMENT_FORMAT, sq->id, segment);
1259 /* segment can update the time level of the queue */
1260 update_time_level (mq, sq);
1262 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1263 gst_multi_queue_post_buffering (mq);
1266 /* take a buffer and update segment, updating the time level of the queue. */
1268 apply_buffer (GstMultiQueue * mq, GstSingleQueue * sq, GstClockTime timestamp,
1269 GstClockTime duration, GstSegment * segment)
1271 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1273 /* if no timestamp is set, assume it's continuous with the previous
1275 if (timestamp == GST_CLOCK_TIME_NONE)
1276 timestamp = segment->position;
1279 if (duration != GST_CLOCK_TIME_NONE)
1280 timestamp += duration;
1282 GST_DEBUG_OBJECT (mq, "queue %d, %s position updated to %" GST_TIME_FORMAT,
1283 sq->id, segment == &sq->sink_segment ? "sink" : "src",
1284 GST_TIME_ARGS (timestamp));
1286 segment->position = timestamp;
1288 if (segment == &sq->sink_segment)
1289 sq->sink_tainted = TRUE;
1291 sq->src_tainted = TRUE;
1293 /* calc diff with other end */
1294 update_time_level (mq, sq);
1295 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1296 gst_multi_queue_post_buffering (mq);
1300 apply_gap (GstMultiQueue * mq, GstSingleQueue * sq, GstEvent * event,
1301 GstSegment * segment)
1303 GstClockTime timestamp;
1304 GstClockTime duration;
1306 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1308 gst_event_parse_gap (event, ×tamp, &duration);
1310 if (GST_CLOCK_TIME_IS_VALID (timestamp)) {
1312 if (GST_CLOCK_TIME_IS_VALID (duration)) {
1313 timestamp += duration;
1316 segment->position = timestamp;
1318 if (segment == &sq->sink_segment)
1319 sq->sink_tainted = TRUE;
1321 sq->src_tainted = TRUE;
1323 /* calc diff with other end */
1324 update_time_level (mq, sq);
1327 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1328 gst_multi_queue_post_buffering (mq);
1331 static GstClockTimeDiff
1332 get_running_time (GstSegment * segment, GstMiniObject * object, gboolean end)
1334 GstClockTimeDiff time = GST_CLOCK_STIME_NONE;
1336 if (GST_IS_BUFFER (object)) {
1337 GstBuffer *buf = GST_BUFFER_CAST (object);
1338 GstClockTime btime = GST_BUFFER_DTS_OR_PTS (buf);
1340 if (GST_CLOCK_TIME_IS_VALID (btime)) {
1341 if (end && GST_BUFFER_DURATION_IS_VALID (buf))
1342 btime += GST_BUFFER_DURATION (buf);
1343 if (btime > segment->stop)
1344 btime = segment->stop;
1345 time = my_segment_to_running_time (segment, btime);
1347 } else if (GST_IS_BUFFER_LIST (object)) {
1348 GstBufferList *list = GST_BUFFER_LIST_CAST (object);
1352 n = gst_buffer_list_length (list);
1353 for (i = 0; i < n; i++) {
1355 buf = gst_buffer_list_get (list, i);
1356 btime = GST_BUFFER_DTS_OR_PTS (buf);
1357 if (GST_CLOCK_TIME_IS_VALID (btime)) {
1358 if (end && GST_BUFFER_DURATION_IS_VALID (buf))
1359 btime += GST_BUFFER_DURATION (buf);
1360 if (btime > segment->stop)
1361 btime = segment->stop;
1362 time = my_segment_to_running_time (segment, btime);
1369 } else if (GST_IS_EVENT (object)) {
1370 GstEvent *event = GST_EVENT_CAST (object);
1372 /* For newsegment events return the running time of the start position */
1373 if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
1374 const GstSegment *new_segment;
1376 gst_event_parse_segment (event, &new_segment);
1377 if (new_segment->format == GST_FORMAT_TIME) {
1379 my_segment_to_running_time ((GstSegment *) new_segment,
1380 new_segment->start);
1389 static GstFlowReturn
1390 gst_single_queue_push_one (GstMultiQueue * mq, GstSingleQueue * sq,
1391 GstMiniObject * object, gboolean * allow_drop)
1393 GstFlowReturn result = sq->srcresult;
1395 if (GST_IS_BUFFER (object)) {
1397 GstClockTime timestamp, duration;
1399 buffer = GST_BUFFER_CAST (object);
1400 timestamp = GST_BUFFER_DTS_OR_PTS (buffer);
1401 duration = GST_BUFFER_DURATION (buffer);
1403 apply_buffer (mq, sq, timestamp, duration, &sq->src_segment);
1405 /* Applying the buffer may have made the queue non-full again, unblock it if needed */
1406 gst_data_queue_limits_changed (sq->queue);
1408 if (G_UNLIKELY (*allow_drop)) {
1409 GST_DEBUG_OBJECT (mq,
1410 "SingleQueue %d : Dropping EOS buffer %p with ts %" GST_TIME_FORMAT,
1411 sq->id, buffer, GST_TIME_ARGS (timestamp));
1412 gst_buffer_unref (buffer);
1414 GST_DEBUG_OBJECT (mq,
1415 "SingleQueue %d : Pushing buffer %p with ts %" GST_TIME_FORMAT,
1416 sq->id, buffer, GST_TIME_ARGS (timestamp));
1417 result = gst_pad_push (sq->srcpad, buffer);
1419 } else if (GST_IS_EVENT (object)) {
1422 event = GST_EVENT_CAST (object);
1424 switch (GST_EVENT_TYPE (event)) {
1426 result = GST_FLOW_EOS;
1427 if (G_UNLIKELY (*allow_drop))
1428 *allow_drop = FALSE;
1430 case GST_EVENT_SEGMENT:
1431 apply_segment (mq, sq, event, &sq->src_segment);
1432 /* Applying the segment may have made the queue non-full again, unblock it if needed */
1433 gst_data_queue_limits_changed (sq->queue);
1434 if (G_UNLIKELY (*allow_drop)) {
1435 result = GST_FLOW_OK;
1436 *allow_drop = FALSE;
1440 apply_gap (mq, sq, event, &sq->src_segment);
1441 /* Applying the gap may have made the queue non-full again, unblock it if needed */
1442 gst_data_queue_limits_changed (sq->queue);
1448 if (G_UNLIKELY (*allow_drop)) {
1449 GST_DEBUG_OBJECT (mq,
1450 "SingleQueue %d : Dropping EOS event %p of type %s",
1451 sq->id, event, GST_EVENT_TYPE_NAME (event));
1452 gst_event_unref (event);
1454 GST_DEBUG_OBJECT (mq,
1455 "SingleQueue %d : Pushing event %p of type %s",
1456 sq->id, event, GST_EVENT_TYPE_NAME (event));
1458 gst_pad_push_event (sq->srcpad, event);
1460 } else if (GST_IS_QUERY (object)) {
1464 query = GST_QUERY_CAST (object);
1466 if (G_UNLIKELY (*allow_drop)) {
1467 GST_DEBUG_OBJECT (mq,
1468 "SingleQueue %d : Dropping EOS query %p", sq->id, query);
1469 gst_query_unref (query);
1472 res = gst_pad_peer_query (sq->srcpad, query);
1475 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1476 sq->last_query = res;
1477 sq->last_handled_query = query;
1478 g_cond_signal (&sq->query_handled);
1479 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1481 g_warning ("Unexpected object in singlequeue %u (refcounting problem?)",
1489 static GstMiniObject *
1490 gst_multi_queue_item_steal_object (GstMultiQueueItem * item)
1495 item->object = NULL;
1501 gst_multi_queue_item_destroy (GstMultiQueueItem * item)
1503 if (!item->is_query && item->object)
1504 gst_mini_object_unref (item->object);
1505 g_slice_free (GstMultiQueueItem, item);
1508 /* takes ownership of passed mini object! */
1509 static GstMultiQueueItem *
1510 gst_multi_queue_buffer_item_new (GstMiniObject * object, guint32 curid)
1512 GstMultiQueueItem *item;
1514 item = g_slice_new (GstMultiQueueItem);
1515 item->object = object;
1516 item->destroy = (GDestroyNotify) gst_multi_queue_item_destroy;
1517 item->posid = curid;
1518 item->is_query = GST_IS_QUERY (object);
1520 item->size = gst_buffer_get_size (GST_BUFFER_CAST (object));
1521 item->duration = GST_BUFFER_DURATION (object);
1522 if (item->duration == GST_CLOCK_TIME_NONE)
1524 item->visible = TRUE;
1528 static GstMultiQueueItem *
1529 gst_multi_queue_mo_item_new (GstMiniObject * object, guint32 curid)
1531 GstMultiQueueItem *item;
1533 item = g_slice_new (GstMultiQueueItem);
1534 item->object = object;
1535 item->destroy = (GDestroyNotify) gst_multi_queue_item_destroy;
1536 item->posid = curid;
1537 item->is_query = GST_IS_QUERY (object);
1541 item->visible = FALSE;
1545 /* Each main loop attempts to push buffers until the return value
1546 * is not-linked. not-linked pads are not allowed to push data beyond
1547 * any linked pads, so they don't 'rush ahead of the pack'.
1550 gst_multi_queue_loop (GstPad * pad)
1553 GstMultiQueueItem *item;
1554 GstDataQueueItem *sitem;
1556 GstMiniObject *object = NULL;
1558 GstFlowReturn result;
1559 GstClockTimeDiff next_time;
1561 gboolean do_update_buffering = FALSE;
1562 gboolean dropping = FALSE;
1564 sq = (GstSingleQueue *) gst_pad_get_element_private (pad);
1568 GST_DEBUG_OBJECT (mq, "SingleQueue %d : trying to pop an object", sq->id);
1573 /* Get something from the queue, blocking until that happens, or we get
1575 if (!(gst_data_queue_pop (sq->queue, &sitem)))
1578 item = (GstMultiQueueItem *) sitem;
1579 newid = item->posid;
1581 /* steal the object and destroy the item */
1582 object = gst_multi_queue_item_steal_object (item);
1583 gst_multi_queue_item_destroy (item);
1585 is_buffer = GST_IS_BUFFER (object);
1587 /* Get running time of the item. Events will have GST_CLOCK_STIME_NONE */
1588 next_time = get_running_time (&sq->src_segment, object, FALSE);
1590 GST_LOG_OBJECT (mq, "SingleQueue %d : newid:%d , oldid:%d",
1591 sq->id, newid, sq->last_oldid);
1593 /* If we're not-linked, we do some extra work because we might need to
1594 * wait before pushing. If we're linked but there's a gap in the IDs,
1595 * or it's the first loop, or we just passed the previous highid,
1596 * we might need to wake some sleeping pad up, so there's extra work
1598 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1599 if (sq->srcresult == GST_FLOW_NOT_LINKED
1600 || (sq->last_oldid == G_MAXUINT32) || (newid != (sq->last_oldid + 1))
1601 || sq->last_oldid > mq->highid) {
1602 GST_LOG_OBJECT (mq, "CHECKING sq->srcresult: %s",
1603 gst_flow_get_name (sq->srcresult));
1605 /* Check again if we're flushing after the lock is taken,
1606 * the flush flag might have been changed in the meantime */
1608 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1612 /* Update the nextid so other threads know when to wake us up */
1614 /* Take into account the extra cache time since we're unlinked */
1615 if (GST_CLOCK_STIME_IS_VALID (next_time))
1616 next_time += mq->unlinked_cache_time;
1617 sq->next_time = next_time;
1619 /* Update the oldid (the last ID we output) for highid tracking */
1620 if (sq->last_oldid != G_MAXUINT32)
1621 sq->oldid = sq->last_oldid;
1623 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
1624 /* Go to sleep until it's time to push this buffer */
1626 /* Recompute the highid */
1627 compute_high_id (mq);
1628 /* Recompute the high time */
1629 compute_high_time (mq);
1631 while (((mq->sync_by_running_time && GST_CLOCK_STIME_IS_VALID (next_time)
1632 && (mq->high_time == GST_CLOCK_STIME_NONE
1633 || next_time > mq->high_time))
1634 || (!mq->sync_by_running_time && newid > mq->highid))
1635 && sq->srcresult == GST_FLOW_NOT_LINKED) {
1637 GST_DEBUG_OBJECT (mq,
1638 "queue %d sleeping for not-linked wakeup with "
1639 "newid %u, highid %u, next_time %" GST_STIME_FORMAT
1640 ", high_time %" GST_STIME_FORMAT, sq->id, newid, mq->highid,
1641 GST_STIME_ARGS (next_time), GST_STIME_ARGS (mq->high_time));
1643 /* Wake up all non-linked pads before we sleep */
1644 wake_up_next_non_linked (mq);
1647 g_cond_wait (&sq->turn, &mq->qlock);
1651 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1655 /* Recompute the high time and ID */
1656 compute_high_time (mq);
1657 compute_high_id (mq);
1659 GST_DEBUG_OBJECT (mq, "queue %d woken from sleeping for not-linked "
1660 "wakeup with newid %u, highid %u, next_time %" GST_STIME_FORMAT
1661 ", high_time %" GST_STIME_FORMAT, sq->id, newid, mq->highid,
1662 GST_STIME_ARGS (next_time), GST_STIME_ARGS (mq->high_time));
1665 /* Re-compute the high_id in case someone else pushed */
1666 compute_high_id (mq);
1667 compute_high_time (mq);
1669 compute_high_id (mq);
1670 compute_high_time (mq);
1671 /* Wake up all non-linked pads */
1672 wake_up_next_non_linked (mq);
1674 /* We're done waiting, we can clear the nextid and nexttime */
1676 sq->next_time = GST_CLOCK_STIME_NONE;
1678 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1683 GST_LOG_OBJECT (mq, "sq:%d BEFORE PUSHING sq->srcresult: %s", sq->id,
1684 gst_flow_get_name (sq->srcresult));
1686 /* Update time stats */
1687 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1688 next_time = get_running_time (&sq->src_segment, object, TRUE);
1689 if (GST_CLOCK_STIME_IS_VALID (next_time)) {
1690 if (sq->last_time == GST_CLOCK_STIME_NONE || sq->last_time < next_time)
1691 sq->last_time = next_time;
1692 if (mq->high_time == GST_CLOCK_STIME_NONE || mq->high_time <= next_time) {
1693 /* Wake up all non-linked pads now that we advanced the high time */
1694 mq->high_time = next_time;
1695 wake_up_next_non_linked (mq);
1698 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1700 /* Try to push out the new object */
1701 result = gst_single_queue_push_one (mq, sq, object, &dropping);
1704 /* Check if we pushed something already and if this is
1705 * now a switch from an active to a non-active stream.
1707 * If it is, we reset all the waiting streams, let them
1708 * push another buffer to see if they're now active again.
1709 * This allows faster switching between streams and prevents
1710 * deadlocks if downstream does any waiting too.
1712 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1713 if (sq->pushed && sq->srcresult == GST_FLOW_OK
1714 && result == GST_FLOW_NOT_LINKED) {
1717 GST_LOG_OBJECT (mq, "SingleQueue %d : Changed from active to non-active",
1720 compute_high_id (mq);
1721 do_update_buffering = TRUE;
1723 /* maybe no-one is waiting */
1724 if (mq->numwaiting > 0) {
1725 /* Else figure out which singlequeue(s) need waking up */
1726 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
1727 GstSingleQueue *sq2 = (GstSingleQueue *) tmp->data;
1729 if (sq2->srcresult == GST_FLOW_NOT_LINKED) {
1730 GST_LOG_OBJECT (mq, "Waking up singlequeue %d", sq2->id);
1731 sq2->pushed = FALSE;
1732 sq2->srcresult = GST_FLOW_OK;
1733 g_cond_signal (&sq2->turn);
1742 /* now hold on a bit;
1743 * can not simply throw this result to upstream, because
1744 * that might already be onto another segment, so we have to make
1745 * sure we are relaying the correct info wrt proper segment */
1746 if (result == GST_FLOW_EOS && !dropping &&
1747 sq->srcresult != GST_FLOW_NOT_LINKED) {
1748 GST_DEBUG_OBJECT (mq, "starting EOS drop on sq %d", sq->id);
1750 /* pretend we have not seen EOS yet for upstream's sake */
1751 result = sq->srcresult;
1752 } else if (dropping && gst_data_queue_is_empty (sq->queue)) {
1753 /* queue empty, so stop dropping
1754 * we can commit the result we have now,
1755 * which is either OK after a segment, or EOS */
1756 GST_DEBUG_OBJECT (mq, "committed EOS drop on sq %d", sq->id);
1758 result = GST_FLOW_EOS;
1760 sq->srcresult = result;
1761 sq->last_oldid = newid;
1763 if (do_update_buffering)
1764 update_buffering (mq, sq);
1766 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1767 gst_multi_queue_post_buffering (mq);
1769 GST_LOG_OBJECT (mq, "sq:%d AFTER PUSHING sq->srcresult: %s", sq->id,
1770 gst_flow_get_name (sq->srcresult));
1772 /* Need to make sure wake up any sleeping pads when we exit */
1773 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1774 if (mq->numwaiting > 0 && GST_PAD_IS_EOS (sq->srcpad)) {
1775 compute_high_time (mq);
1776 compute_high_id (mq);
1777 wake_up_next_non_linked (mq);
1779 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1784 if (result != GST_FLOW_OK && result != GST_FLOW_NOT_LINKED
1785 && result != GST_FLOW_EOS)
1793 gst_mini_object_unref (object);
1795 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1796 sq->last_query = FALSE;
1797 g_cond_signal (&sq->query_handled);
1799 /* Post an error message if we got EOS while downstream
1800 * has returned an error flow return. After EOS there
1801 * will be no further buffer which could propagate the
1803 if (sq->is_eos && sq->srcresult < GST_FLOW_EOS) {
1804 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1805 GST_ELEMENT_ERROR (mq, STREAM, FAILED,
1806 ("Internal data stream error."),
1807 ("streaming stopped, reason %s", gst_flow_get_name (sq->srcresult)));
1809 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1812 /* upstream needs to see fatal result ASAP to shut things down,
1813 * but might be stuck in one of our other full queues;
1814 * so empty this one and trigger dynamic queue growth. At
1815 * this point the srcresult is not OK, NOT_LINKED
1816 * or EOS, i.e. a real failure */
1817 gst_single_queue_flush_queue (sq, FALSE);
1818 single_queue_underrun_cb (sq->queue, sq);
1819 gst_data_queue_set_flushing (sq->queue, TRUE);
1820 gst_pad_pause_task (sq->srcpad);
1821 GST_CAT_LOG_OBJECT (multi_queue_debug, mq,
1822 "SingleQueue[%d] task paused, reason:%s",
1823 sq->id, gst_flow_get_name (sq->srcresult));
1829 * gst_multi_queue_chain:
1831 * This is similar to GstQueue's chain function, except:
1832 * _ we don't have leak behaviours,
1833 * _ we push with a unique id (curid)
1835 static GstFlowReturn
1836 gst_multi_queue_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
1840 GstMultiQueueItem *item;
1842 GstClockTime timestamp, duration;
1844 sq = gst_pad_get_element_private (pad);
1847 /* if eos, we are always full, so avoid hanging incoming indefinitely */
1853 /* Get a unique incrementing id */
1854 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
1856 timestamp = GST_BUFFER_DTS_OR_PTS (buffer);
1857 duration = GST_BUFFER_DURATION (buffer);
1860 "SingleQueue %d : about to enqueue buffer %p with id %d (pts:%"
1861 GST_TIME_FORMAT " dts:%" GST_TIME_FORMAT " dur:%" GST_TIME_FORMAT ")",
1862 sq->id, buffer, curid, GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
1863 GST_TIME_ARGS (GST_BUFFER_DTS (buffer)), GST_TIME_ARGS (duration));
1865 item = gst_multi_queue_buffer_item_new (GST_MINI_OBJECT_CAST (buffer), curid);
1867 /* Update interleave before pushing data into queue */
1868 if (mq->use_interleave) {
1869 GstClockTime val = timestamp;
1870 GstClockTimeDiff dval;
1872 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1873 if (val == GST_CLOCK_TIME_NONE)
1874 val = sq->sink_segment.position;
1875 if (duration != GST_CLOCK_TIME_NONE)
1878 dval = my_segment_to_running_time (&sq->sink_segment, val);
1879 if (GST_CLOCK_STIME_IS_VALID (dval)) {
1880 sq->cached_sinktime = dval;
1881 GST_DEBUG_OBJECT (mq,
1882 "Queue %d cached sink time now %" G_GINT64_FORMAT " %"
1883 GST_STIME_FORMAT, sq->id, sq->cached_sinktime,
1884 GST_STIME_ARGS (sq->cached_sinktime));
1885 calculate_interleave (mq);
1887 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1890 if (!(gst_data_queue_push (sq->queue, (GstDataQueueItem *) item)))
1893 /* update time level, we must do this after pushing the data in the queue so
1894 * that we never end up filling the queue first. */
1895 apply_buffer (mq, sq, timestamp, duration, &sq->sink_segment);
1898 return sq->srcresult;
1903 GST_LOG_OBJECT (mq, "SingleQueue %d : exit because task paused, reason: %s",
1904 sq->id, gst_flow_get_name (sq->srcresult));
1905 gst_multi_queue_item_destroy (item);
1910 GST_DEBUG_OBJECT (mq, "we are EOS, dropping buffer, return EOS");
1911 gst_buffer_unref (buffer);
1912 return GST_FLOW_EOS;
1917 gst_multi_queue_sink_activate_mode (GstPad * pad, GstObject * parent,
1918 GstPadMode mode, gboolean active)
1924 sq = (GstSingleQueue *) gst_pad_get_element_private (pad);
1925 mq = (GstMultiQueue *) gst_pad_get_parent (pad);
1927 /* mq is NULL if the pad is activated/deactivated before being
1928 * added to the multiqueue */
1930 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1933 case GST_PAD_MODE_PUSH:
1935 /* All pads start off linked until they push one buffer */
1936 sq->srcresult = GST_FLOW_OK;
1938 gst_data_queue_set_flushing (sq->queue, FALSE);
1940 sq->srcresult = GST_FLOW_FLUSHING;
1941 sq->last_query = FALSE;
1942 g_cond_signal (&sq->query_handled);
1943 gst_data_queue_set_flushing (sq->queue, TRUE);
1945 /* Wait until streaming thread has finished */
1947 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1948 GST_PAD_STREAM_LOCK (pad);
1950 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1951 gst_data_queue_flush (sq->queue);
1953 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1954 GST_PAD_STREAM_UNLOCK (pad);
1956 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1966 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1967 gst_object_unref (mq);
1973 static GstFlowReturn
1974 gst_multi_queue_sink_event (GstPad * pad, GstObject * parent, GstEvent * event)
1979 GstMultiQueueItem *item;
1980 gboolean res = TRUE;
1981 GstFlowReturn flowret = GST_FLOW_OK;
1983 GstEvent *sref = NULL;
1985 sq = (GstSingleQueue *) gst_pad_get_element_private (pad);
1986 mq = (GstMultiQueue *) parent;
1988 type = GST_EVENT_TYPE (event);
1991 case GST_EVENT_STREAM_START:
1993 if (mq->sync_by_running_time) {
1994 GstStreamFlags stream_flags;
1995 gst_event_parse_stream_flags (event, &stream_flags);
1996 if ((stream_flags & GST_STREAM_FLAG_SPARSE)) {
1997 GST_INFO_OBJECT (mq, "SingleQueue %d is a sparse stream", sq->id);
1998 sq->is_sparse = TRUE;
2000 sq->thread = g_thread_self ();
2003 sq->thread = g_thread_self ();
2005 /* Remove EOS flag */
2009 case GST_EVENT_FLUSH_START:
2010 GST_DEBUG_OBJECT (mq, "SingleQueue %d : received flush start event",
2013 res = gst_pad_push_event (sq->srcpad, event);
2015 gst_single_queue_flush (mq, sq, TRUE, FALSE);
2018 case GST_EVENT_FLUSH_STOP:
2019 GST_DEBUG_OBJECT (mq, "SingleQueue %d : received flush stop event",
2022 res = gst_pad_push_event (sq->srcpad, event);
2024 gst_single_queue_flush (mq, sq, FALSE, FALSE);
2027 case GST_EVENT_SEGMENT:
2028 sref = gst_event_ref (event);
2031 /* take ref because the queue will take ownership and we need the event
2032 * afterwards to update the segment */
2033 sref = gst_event_ref (event);
2034 if (mq->use_interleave) {
2035 GstClockTime val, dur;
2037 gst_event_parse_gap (event, &val, &dur);
2038 if (GST_CLOCK_TIME_IS_VALID (val)) {
2039 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2040 if (GST_CLOCK_TIME_IS_VALID (dur))
2042 stime = my_segment_to_running_time (&sq->sink_segment, val);
2043 if (GST_CLOCK_STIME_IS_VALID (stime)) {
2044 sq->cached_sinktime = stime;
2045 calculate_interleave (mq);
2047 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2053 if (!(GST_EVENT_IS_SERIALIZED (event))) {
2054 res = gst_pad_push_event (sq->srcpad, event);
2060 /* if eos, we are always full, so avoid hanging incoming indefinitely */
2064 /* Get an unique incrementing id. */
2065 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2067 item = gst_multi_queue_mo_item_new ((GstMiniObject *) event, curid);
2069 GST_DEBUG_OBJECT (mq,
2070 "SingleQueue %d : Enqueuing event %p of type %s with id %d",
2071 sq->id, event, GST_EVENT_TYPE_NAME (event), curid);
2073 if (!gst_data_queue_push (sq->queue, (GstDataQueueItem *) item))
2076 /* mark EOS when we received one, we must do that after putting the
2077 * buffer in the queue because EOS marks the buffer as filled. */
2080 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2083 /* Post an error message if we got EOS while downstream
2084 * has returned an error flow return. After EOS there
2085 * will be no further buffer which could propagate the
2087 if (sq->srcresult < GST_FLOW_EOS) {
2088 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2089 GST_ELEMENT_ERROR (mq, STREAM, FAILED,
2090 ("Internal data stream error."),
2091 ("streaming stopped, reason %s",
2092 gst_flow_get_name (sq->srcresult)));
2094 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2097 /* EOS affects the buffering state */
2098 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2099 update_buffering (mq, sq);
2100 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2101 single_queue_overrun_cb (sq->queue, sq);
2102 gst_multi_queue_post_buffering (mq);
2104 case GST_EVENT_SEGMENT:
2105 apply_segment (mq, sq, sref, &sq->sink_segment);
2106 gst_event_unref (sref);
2107 /* a new segment allows us to accept more buffers if we got EOS
2108 * from downstream */
2109 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2110 if (sq->srcresult == GST_FLOW_EOS)
2111 sq->srcresult = GST_FLOW_OK;
2112 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2116 apply_gap (mq, sq, sref, &sq->sink_segment);
2117 gst_event_unref (sref);
2124 flowret = GST_FLOW_ERROR;
2125 GST_DEBUG_OBJECT (mq, "SingleQueue %d : returning %s", sq->id,
2126 gst_flow_get_name (flowret));
2131 GST_LOG_OBJECT (mq, "SingleQueue %d : exit because task paused, reason: %s",
2132 sq->id, gst_flow_get_name (sq->srcresult));
2134 gst_event_unref (sref);
2135 gst_multi_queue_item_destroy (item);
2136 return sq->srcresult;
2140 GST_DEBUG_OBJECT (mq, "we are EOS, dropping event, return GST_FLOW_EOS");
2141 gst_event_unref (event);
2142 return GST_FLOW_EOS;
2147 gst_multi_queue_sink_query (GstPad * pad, GstObject * parent, GstQuery * query)
2153 sq = (GstSingleQueue *) gst_pad_get_element_private (pad);
2154 mq = (GstMultiQueue *) parent;
2156 switch (GST_QUERY_TYPE (query)) {
2158 if (GST_QUERY_IS_SERIALIZED (query)) {
2160 GstMultiQueueItem *item;
2162 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2163 if (sq->srcresult != GST_FLOW_OK)
2166 /* serialized events go in the queue. We need to be certain that we
2167 * don't cause deadlocks waiting for the query return value. We check if
2168 * the queue is empty (nothing is blocking downstream and the query can
2169 * be pushed for sure) or we are not buffering. If we are buffering,
2170 * the pipeline waits to unblock downstream until our queue fills up
2171 * completely, which can not happen if we block on the query..
2172 * Therefore we only potentially block when we are not buffering. */
2173 if (!mq->use_buffering || gst_data_queue_is_empty (sq->queue)) {
2174 /* Get an unique incrementing id. */
2175 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2177 item = gst_multi_queue_mo_item_new ((GstMiniObject *) query, curid);
2179 GST_DEBUG_OBJECT (mq,
2180 "SingleQueue %d : Enqueuing query %p of type %s with id %d",
2181 sq->id, query, GST_QUERY_TYPE_NAME (query), curid);
2182 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2183 res = gst_data_queue_push (sq->queue, (GstDataQueueItem *) item);
2184 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2185 if (!res || sq->flushing)
2187 /* it might be that the query has been taken out of the queue
2188 * while we were unlocked. So, we need to check if the last
2189 * handled query is the same one than the one we just
2190 * pushed. If it is, we don't need to wait for the condition
2191 * variable, otherwise we wait for the condition variable to
2193 if (sq->last_handled_query != query)
2194 g_cond_wait (&sq->query_handled, &mq->qlock);
2195 res = sq->last_query;
2196 sq->last_handled_query = NULL;
2198 GST_DEBUG_OBJECT (mq, "refusing query, we are buffering and the "
2199 "queue is not empty");
2202 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2204 /* default handling */
2205 res = gst_pad_query_default (pad, parent, query);
2213 GST_DEBUG_OBJECT (mq, "Flushing");
2214 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2220 gst_multi_queue_src_activate_mode (GstPad * pad, GstObject * parent,
2221 GstPadMode mode, gboolean active)
2227 sq = (GstSingleQueue *) gst_pad_get_element_private (pad);
2230 GST_DEBUG_OBJECT (mq, "SingleQueue %d", sq->id);
2233 case GST_PAD_MODE_PUSH:
2235 result = gst_single_queue_flush (mq, sq, FALSE, TRUE);
2237 result = gst_single_queue_flush (mq, sq, TRUE, TRUE);
2238 /* make sure streaming finishes */
2239 result |= gst_pad_stop_task (pad);
2250 gst_multi_queue_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
2252 GstSingleQueue *sq = gst_pad_get_element_private (pad);
2253 GstMultiQueue *mq = sq->mqueue;
2256 switch (GST_EVENT_TYPE (event)) {
2257 case GST_EVENT_RECONFIGURE:
2258 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2259 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2260 sq->srcresult = GST_FLOW_OK;
2261 g_cond_signal (&sq->turn);
2263 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2265 ret = gst_pad_push_event (sq->sinkpad, event);
2268 ret = gst_pad_push_event (sq->sinkpad, event);
2276 gst_multi_queue_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
2280 /* FIXME, Handle position offset depending on queue size */
2281 switch (GST_QUERY_TYPE (query)) {
2283 /* default handling */
2284 res = gst_pad_query_default (pad, parent, query);
2291 * Next-non-linked functions
2294 /* WITH LOCK TAKEN */
2296 wake_up_next_non_linked (GstMultiQueue * mq)
2300 /* maybe no-one is waiting */
2301 if (mq->numwaiting < 1)
2304 if (mq->sync_by_running_time && GST_CLOCK_STIME_IS_VALID (mq->high_time)) {
2305 /* Else figure out which singlequeue(s) need waking up */
2306 for (tmp = mq->queues; tmp; tmp = tmp->next) {
2307 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
2308 if (sq->srcresult == GST_FLOW_NOT_LINKED
2309 && GST_CLOCK_STIME_IS_VALID (sq->next_time)
2310 && sq->next_time <= mq->high_time) {
2311 GST_LOG_OBJECT (mq, "Waking up singlequeue %d", sq->id);
2312 g_cond_signal (&sq->turn);
2316 /* Else figure out which singlequeue(s) need waking up */
2317 for (tmp = mq->queues; tmp; tmp = tmp->next) {
2318 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
2319 if (sq->srcresult == GST_FLOW_NOT_LINKED &&
2320 sq->nextid != 0 && sq->nextid <= mq->highid) {
2321 GST_LOG_OBJECT (mq, "Waking up singlequeue %d", sq->id);
2322 g_cond_signal (&sq->turn);
2328 /* WITH LOCK TAKEN */
2330 compute_high_id (GstMultiQueue * mq)
2332 /* The high-id is either the highest id among the linked pads, or if all
2333 * pads are not-linked, it's the lowest not-linked pad */
2335 guint32 lowest = G_MAXUINT32;
2336 guint32 highid = G_MAXUINT32;
2338 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
2339 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
2341 GST_LOG_OBJECT (mq, "inspecting sq:%d , nextid:%d, oldid:%d, srcresult:%s",
2342 sq->id, sq->nextid, sq->oldid, gst_flow_get_name (sq->srcresult));
2344 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2345 /* No need to consider queues which are not waiting */
2346 if (sq->nextid == 0) {
2347 GST_LOG_OBJECT (mq, "sq:%d is not waiting - ignoring", sq->id);
2351 if (sq->nextid < lowest)
2352 lowest = sq->nextid;
2353 } else if (!GST_PAD_IS_EOS (sq->srcpad)) {
2354 /* If we don't have a global highid, or the global highid is lower than
2355 * this single queue's last outputted id, store the queue's one,
2356 * unless the singlequeue output is at EOS */
2357 if ((highid == G_MAXUINT32) || (sq->oldid > highid))
2362 if (highid == G_MAXUINT32 || lowest < highid)
2363 mq->highid = lowest;
2365 mq->highid = highid;
2367 GST_LOG_OBJECT (mq, "Highid is now : %u, lowest non-linked %u", mq->highid,
2371 /* WITH LOCK TAKEN */
2373 compute_high_time (GstMultiQueue * mq)
2375 /* The high-time is either the highest last time among the linked
2376 * pads, or if all pads are not-linked, it's the lowest nex time of
2379 GstClockTimeDiff highest = GST_CLOCK_STIME_NONE;
2380 GstClockTimeDiff lowest = GST_CLOCK_STIME_NONE;
2382 if (!mq->sync_by_running_time)
2385 for (tmp = mq->queues; tmp; tmp = tmp->next) {
2386 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
2389 "inspecting sq:%d , next_time:%" GST_STIME_FORMAT ", last_time:%"
2390 GST_STIME_FORMAT ", srcresult:%s", sq->id,
2391 GST_STIME_ARGS (sq->next_time), GST_STIME_ARGS (sq->last_time),
2392 gst_flow_get_name (sq->srcresult));
2394 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2395 /* No need to consider queues which are not waiting */
2396 if (!GST_CLOCK_STIME_IS_VALID (sq->next_time)) {
2397 GST_LOG_OBJECT (mq, "sq:%d is not waiting - ignoring", sq->id);
2401 if (lowest == GST_CLOCK_STIME_NONE || sq->next_time < lowest)
2402 lowest = sq->next_time;
2403 } else if (!GST_PAD_IS_EOS (sq->srcpad)) {
2404 /* If we don't have a global high time, or the global high time
2405 * is lower than this single queue's last outputted time, store
2406 * the queue's one, unless the singlequeue output is at EOS. */
2407 if (highest == GST_CLOCK_STIME_NONE
2408 || (sq->last_time != GST_CLOCK_STIME_NONE && sq->last_time > highest))
2409 highest = sq->last_time;
2412 "highest now %" GST_STIME_FORMAT " lowest %" GST_STIME_FORMAT,
2413 GST_STIME_ARGS (highest), GST_STIME_ARGS (lowest));
2416 if (highest == GST_CLOCK_STIME_NONE)
2417 mq->high_time = lowest;
2419 mq->high_time = highest;
2422 "High time is now : %" GST_STIME_FORMAT ", lowest non-linked %"
2423 GST_STIME_FORMAT, GST_STIME_ARGS (mq->high_time),
2424 GST_STIME_ARGS (lowest));
2427 #define IS_FILLED(q, format, value) (((q)->max_size.format) != 0 && \
2428 ((q)->max_size.format) <= (value))
2431 * GstSingleQueue functions
2434 single_queue_overrun_cb (GstDataQueue * dq, GstSingleQueue * sq)
2436 GstMultiQueue *mq = sq->mqueue;
2438 GstDataQueueSize size;
2439 gboolean filled = TRUE;
2440 gboolean empty_found = FALSE;
2442 gst_data_queue_get_level (sq->queue, &size);
2445 "Single Queue %d: EOS %d, visible %u/%u, bytes %u/%u, time %"
2446 G_GUINT64_FORMAT "/%" G_GUINT64_FORMAT, sq->id, sq->is_eos, size.visible,
2447 sq->max_size.visible, size.bytes, sq->max_size.bytes, sq->cur_time,
2450 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2452 /* check if we reached the hard time/bytes limits;
2453 time limit is only taken into account for non-sparse streams */
2454 if (sq->is_eos || IS_FILLED (sq, bytes, size.bytes) ||
2455 (!sq->is_sparse && IS_FILLED (sq, time, sq->cur_time))) {
2459 /* Search for empty queues */
2460 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
2461 GstSingleQueue *oq = (GstSingleQueue *) tmp->data;
2466 if (oq->srcresult == GST_FLOW_NOT_LINKED) {
2467 GST_LOG_OBJECT (mq, "Queue %d is not-linked", oq->id);
2471 GST_LOG_OBJECT (mq, "Checking Queue %d", oq->id);
2472 if (gst_data_queue_is_empty (oq->queue) && !oq->is_sparse) {
2473 GST_LOG_OBJECT (mq, "Queue %d is empty", oq->id);
2479 /* if hard limits are not reached then we allow one more buffer in the full
2480 * queue, but only if any of the other singelqueues are empty */
2482 if (IS_FILLED (sq, visible, size.visible)) {
2483 sq->max_size.visible = size.visible + 1;
2484 GST_DEBUG_OBJECT (mq,
2485 "Bumping single queue %d max visible to %d",
2486 sq->id, sq->max_size.visible);
2492 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2494 /* Overrun is always forwarded, since this is blocking the upstream element */
2496 GST_DEBUG_OBJECT (mq, "Queue %d is filled, signalling overrun", sq->id);
2497 g_signal_emit (mq, gst_multi_queue_signals[SIGNAL_OVERRUN], 0);
2502 single_queue_underrun_cb (GstDataQueue * dq, GstSingleQueue * sq)
2504 gboolean empty = TRUE;
2505 GstMultiQueue *mq = sq->mqueue;
2508 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2509 GST_LOG_OBJECT (mq, "Single Queue %d is empty but not-linked", sq->id);
2513 "Single Queue %d is empty, Checking other single queues", sq->id);
2516 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2517 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
2518 GstSingleQueue *oq = (GstSingleQueue *) tmp->data;
2520 if (gst_data_queue_is_full (oq->queue)) {
2521 GstDataQueueSize size;
2523 gst_data_queue_get_level (oq->queue, &size);
2524 if (IS_FILLED (oq, visible, size.visible)) {
2525 oq->max_size.visible = size.visible + 1;
2526 GST_DEBUG_OBJECT (mq,
2527 "queue %d is filled, bumping its max visible to %d", oq->id,
2528 oq->max_size.visible);
2529 gst_data_queue_limits_changed (oq->queue);
2532 if (!gst_data_queue_is_empty (oq->queue) || oq->is_sparse)
2535 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2538 GST_DEBUG_OBJECT (mq, "All queues are empty, signalling it");
2539 g_signal_emit (mq, gst_multi_queue_signals[SIGNAL_UNDERRUN], 0);
2544 single_queue_check_full (GstDataQueue * dataq, guint visible, guint bytes,
2545 guint64 time, GstSingleQueue * sq)
2548 GstMultiQueue *mq = sq->mqueue;
2550 GST_DEBUG_OBJECT (mq,
2551 "queue %d: visible %u/%u, bytes %u/%u, time %" G_GUINT64_FORMAT "/%"
2552 G_GUINT64_FORMAT, sq->id, visible, sq->max_size.visible, bytes,
2553 sq->max_size.bytes, sq->cur_time, sq->max_size.time);
2555 /* we are always filled on EOS */
2559 /* we never go past the max visible items unless we are in buffering mode */
2560 if (!mq->use_buffering && IS_FILLED (sq, visible, visible))
2563 /* check time or bytes */
2564 res = IS_FILLED (sq, bytes, bytes);
2565 /* We only care about limits in time if we're not a sparse stream or
2566 * we're not syncing by running time */
2567 if (!sq->is_sparse || !mq->sync_by_running_time) {
2568 /* If unlinked, take into account the extra unlinked cache time */
2569 if (mq->sync_by_running_time && sq->srcresult == GST_FLOW_NOT_LINKED) {
2570 if (sq->cur_time > mq->unlinked_cache_time)
2571 res |= IS_FILLED (sq, time, sq->cur_time - mq->unlinked_cache_time);
2575 res |= IS_FILLED (sq, time, sq->cur_time);
2582 gst_single_queue_flush_queue (GstSingleQueue * sq, gboolean full)
2584 GstDataQueueItem *sitem;
2585 GstMultiQueueItem *mitem;
2586 gboolean was_flushing = FALSE;
2588 while (!gst_data_queue_is_empty (sq->queue)) {
2589 GstMiniObject *data;
2591 /* FIXME: If this fails here although the queue is not empty,
2592 * we're flushing... but we want to rescue all sticky
2593 * events nonetheless.
2595 if (!gst_data_queue_pop (sq->queue, &sitem)) {
2596 was_flushing = TRUE;
2597 gst_data_queue_set_flushing (sq->queue, FALSE);
2601 mitem = (GstMultiQueueItem *) sitem;
2603 data = sitem->object;
2605 if (!full && !mitem->is_query && GST_IS_EVENT (data)
2606 && GST_EVENT_IS_STICKY (data)
2607 && GST_EVENT_TYPE (data) != GST_EVENT_SEGMENT
2608 && GST_EVENT_TYPE (data) != GST_EVENT_EOS) {
2609 gst_pad_store_sticky_event (sq->srcpad, GST_EVENT_CAST (data));
2612 sitem->destroy (sitem);
2615 gst_data_queue_flush (sq->queue);
2617 gst_data_queue_set_flushing (sq->queue, TRUE);
2619 GST_MULTI_QUEUE_MUTEX_LOCK (sq->mqueue);
2620 update_buffering (sq->mqueue, sq);
2621 GST_MULTI_QUEUE_MUTEX_UNLOCK (sq->mqueue);
2622 gst_multi_queue_post_buffering (sq->mqueue);
2626 gst_single_queue_free (GstSingleQueue * sq)
2629 gst_data_queue_flush (sq->queue);
2630 g_object_unref (sq->queue);
2631 g_cond_clear (&sq->turn);
2632 g_cond_clear (&sq->query_handled);
2636 static GstSingleQueue *
2637 gst_single_queue_new (GstMultiQueue * mqueue, guint id)
2642 guint temp_id = (id == -1) ? 0 : id;
2644 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
2646 /* Find an unused queue ID, if possible the passed one */
2647 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
2648 GstSingleQueue *sq2 = (GstSingleQueue *) tmp->data;
2649 /* This works because the IDs are sorted in ascending order */
2650 if (sq2->id == temp_id) {
2651 /* If this ID was requested by the caller return NULL,
2652 * otherwise just get us the next one */
2654 temp_id = sq2->id + 1;
2656 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
2659 } else if (sq2->id > temp_id) {
2664 sq = g_new0 (GstSingleQueue, 1);
2668 mqueue->queues = g_list_insert_before (mqueue->queues, tmp, sq);
2669 mqueue->queues_cookie++;
2671 /* copy over max_size and extra_size so we don't need to take the lock
2672 * any longer when checking if the queue is full. */
2673 sq->max_size.visible = mqueue->max_size.visible;
2674 sq->max_size.bytes = mqueue->max_size.bytes;
2675 sq->max_size.time = mqueue->max_size.time;
2677 sq->extra_size.visible = mqueue->extra_size.visible;
2678 sq->extra_size.bytes = mqueue->extra_size.bytes;
2679 sq->extra_size.time = mqueue->extra_size.time;
2681 GST_DEBUG_OBJECT (mqueue, "Creating GstSingleQueue id:%d", sq->id);
2683 sq->mqueue = mqueue;
2684 sq->srcresult = GST_FLOW_FLUSHING;
2686 sq->queue = gst_data_queue_new ((GstDataQueueCheckFullFunction)
2687 single_queue_check_full,
2688 (GstDataQueueFullCallback) single_queue_overrun_cb,
2689 (GstDataQueueEmptyCallback) single_queue_underrun_cb, sq);
2691 sq->is_sparse = FALSE;
2692 sq->flushing = FALSE;
2694 gst_segment_init (&sq->sink_segment, GST_FORMAT_TIME);
2695 gst_segment_init (&sq->src_segment, GST_FORMAT_TIME);
2699 sq->next_time = GST_CLOCK_STIME_NONE;
2700 sq->last_time = GST_CLOCK_STIME_NONE;
2701 g_cond_init (&sq->turn);
2702 g_cond_init (&sq->query_handled);
2704 sq->sinktime = GST_CLOCK_STIME_NONE;
2705 sq->srctime = GST_CLOCK_STIME_NONE;
2706 sq->sink_tainted = TRUE;
2707 sq->src_tainted = TRUE;
2709 name = g_strdup_printf ("sink_%u", sq->id);
2710 sq->sinkpad = gst_pad_new_from_static_template (&sinktemplate, name);
2713 gst_pad_set_chain_function (sq->sinkpad,
2714 GST_DEBUG_FUNCPTR (gst_multi_queue_chain));
2715 gst_pad_set_activatemode_function (sq->sinkpad,
2716 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_activate_mode));
2717 gst_pad_set_event_full_function (sq->sinkpad,
2718 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_event));
2719 gst_pad_set_query_function (sq->sinkpad,
2720 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_query));
2721 gst_pad_set_iterate_internal_links_function (sq->sinkpad,
2722 GST_DEBUG_FUNCPTR (gst_multi_queue_iterate_internal_links));
2723 GST_OBJECT_FLAG_SET (sq->sinkpad, GST_PAD_FLAG_PROXY_CAPS);
2725 name = g_strdup_printf ("src_%u", sq->id);
2726 sq->srcpad = gst_pad_new_from_static_template (&srctemplate, name);
2729 gst_pad_set_activatemode_function (sq->srcpad,
2730 GST_DEBUG_FUNCPTR (gst_multi_queue_src_activate_mode));
2731 gst_pad_set_event_function (sq->srcpad,
2732 GST_DEBUG_FUNCPTR (gst_multi_queue_src_event));
2733 gst_pad_set_query_function (sq->srcpad,
2734 GST_DEBUG_FUNCPTR (gst_multi_queue_src_query));
2735 gst_pad_set_iterate_internal_links_function (sq->srcpad,
2736 GST_DEBUG_FUNCPTR (gst_multi_queue_iterate_internal_links));
2737 GST_OBJECT_FLAG_SET (sq->srcpad, GST_PAD_FLAG_PROXY_CAPS);
2739 gst_pad_set_element_private (sq->sinkpad, (gpointer) sq);
2740 gst_pad_set_element_private (sq->srcpad, (gpointer) sq);
2742 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
2744 /* only activate the pads when we are not in the NULL state
2745 * and add the pad under the state_lock to prevend state changes
2746 * between activating and adding */
2747 g_rec_mutex_lock (GST_STATE_GET_LOCK (mqueue));
2748 if (GST_STATE_TARGET (mqueue) != GST_STATE_NULL) {
2749 gst_pad_set_active (sq->srcpad, TRUE);
2750 gst_pad_set_active (sq->sinkpad, TRUE);
2752 gst_element_add_pad (GST_ELEMENT (mqueue), sq->srcpad);
2753 gst_element_add_pad (GST_ELEMENT (mqueue), sq->sinkpad);
2754 g_rec_mutex_unlock (GST_STATE_GET_LOCK (mqueue));
2756 GST_DEBUG_OBJECT (mqueue, "GstSingleQueue [%d] created and pads added",