2 * Copyright (C) 2006 Edward Hervey <edward@fluendo.com>
3 * Copyright (C) 2007 Jan Schmidt <jan@fluendo.com>
4 * Copyright (C) 2007 Wim Taymans <wim@fluendo.com>
5 * Copyright (C) 2011 Sebastian Dröge <sebastian.droege@collabora.co.uk>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Library General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Library General Public License for more details.
19 * You should have received a copy of the GNU Library General Public
20 * License along with this library; if not, write to the
21 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
22 * Boston, MA 02110-1301, USA.
26 * SECTION:element-multiqueue
27 * @see_also: #GstQueue
31 * Multiqueue is similar to a normal #GstQueue with the following additional
35 * <itemizedlist><title>Multiple streamhandling</title>
37 * The element handles queueing data on more than one stream at once. To
38 * achieve such a feature it has request sink pads (sink%u) and
39 * 'sometimes' src pads (src%u).
41 * When requesting a given sinkpad with gst_element_request_pad(),
42 * the associated srcpad for that stream will be created.
43 * Example: requesting sink1 will generate src1.
48 * <itemizedlist><title>Non-starvation on multiple streams</title>
50 * If more than one stream is used with the element, the streams' queues
51 * will be dynamically grown (up to a limit), in order to ensure that no
52 * stream is risking data starvation. This guarantees that at any given
53 * time there are at least N bytes queued and available for each individual
56 * If an EOS event comes through a srcpad, the associated queue will be
57 * considered as 'not-empty' in the queue-size-growing algorithm.
62 * <itemizedlist><title>Non-linked srcpads graceful handling</title>
64 * In order to better support dynamic switching between streams, the multiqueue
65 * (unlike the current GStreamer queue) continues to push buffers on non-linked
66 * pads rather than shutting down.
68 * In addition, to prevent a non-linked stream from very quickly consuming all
69 * available buffers and thus 'racing ahead' of the other streams, the element
70 * must ensure that buffers and inlined events for a non-linked stream are pushed
71 * in the same order as they were received, relative to the other streams
72 * controlled by the element. This means that a buffer cannot be pushed to a
73 * non-linked pad any sooner than buffers in any other stream which were received
81 * Data is queued until one of the limits specified by the
82 * #GstMultiQueue:max-size-buffers, #GstMultiQueue:max-size-bytes and/or
83 * #GstMultiQueue:max-size-time properties has been reached. Any attempt to push
84 * more buffers into the queue will block the pushing thread until more space
85 * becomes available. #GstMultiQueue:extra-size-buffers,
88 * #GstMultiQueue:extra-size-bytes and #GstMultiQueue:extra-size-time are
92 * The default queue size limits are 5 buffers, 10MB of data, or
93 * two second worth of data, whichever is reached first. Note that the number
94 * of buffers will dynamically grow depending on the fill level of
98 * The #GstMultiQueue::underrun signal is emitted when all of the queues
99 * are empty. The #GstMultiQueue::overrun signal is emitted when one of the
101 * Both signals are emitted from the context of the streaming thread.
104 * When using #GstMultiQueue:sync-by-running-time the unlinked streams will
105 * be throttled by the highest running-time of linked streams. This allows
106 * further relinking of those unlinked streams without them being in the
107 * future (i.e. to achieve gapless playback).
108 * When dealing with streams which have got different consumption requirements
109 * downstream (ex: video decoders which will consume more buffer (in time) than
110 * audio decoders), it is recommended to group streams of the same type
111 * by using the pad "group-id" property. This will further throttle streams
112 * in time within that group.
123 #include "gstmultiqueue.h"
124 #include <gst/glib-compat-private.h>
128 * @sinkpad: associated sink #GstPad
129 * @srcpad: associated source #GstPad
131 * Structure containing all information and properties about
134 typedef struct _GstSingleQueue GstSingleQueue;
136 struct _GstSingleQueue
138 /* unique identifier of the queue */
140 /* group of streams to which this queue belongs to */
142 GstClockTimeDiff group_high_time;
144 GstMultiQueue *mqueue;
149 /* flowreturn of previous srcpad push */
150 GstFlowReturn srcresult;
151 /* If something was actually pushed on
152 * this pad after flushing/pad activation
153 * and the srcresult corresponds to something
159 GstSegment sink_segment;
160 GstSegment src_segment;
161 gboolean has_src_segment; /* preferred over initializing the src_segment to
162 * UNDEFINED as this doesn't requires adding ifs
163 * in every segment usage */
165 /* position of src/sink */
166 GstClockTimeDiff sinktime, srctime;
167 /* cached input value, used for interleave */
168 GstClockTimeDiff cached_sinktime;
169 /* TRUE if either position needs to be recalculated */
170 gboolean sink_tainted, src_tainted;
174 GstDataQueueSize max_size, extra_size;
175 GstClockTime cur_time;
181 /* Protected by global lock */
182 guint32 nextid; /* ID of the next object waiting to be pushed */
183 guint32 oldid; /* ID of the last object pushed (last in a series) */
184 guint32 last_oldid; /* Previously observed old_id, reset to MAXUINT32 on flush */
185 GstClockTimeDiff next_time; /* End running time of next buffer to be pushed */
186 GstClockTimeDiff last_time; /* Start running time of last pushed buffer */
187 GCond turn; /* SingleQueue turn waiting conditional */
189 /* for serialized queries */
192 GstQuery *last_handled_query;
194 /* For interleave calculation */
199 /* Extension of GstDataQueueItem structure for our usage */
200 typedef struct _GstMultiQueueItem GstMultiQueueItem;
202 struct _GstMultiQueueItem
204 GstMiniObject *object;
209 GDestroyNotify destroy;
215 static GstSingleQueue *gst_single_queue_new (GstMultiQueue * mqueue, guint id);
216 static void gst_single_queue_free (GstSingleQueue * squeue);
218 static void wake_up_next_non_linked (GstMultiQueue * mq);
219 static void compute_high_id (GstMultiQueue * mq);
220 static void compute_high_time (GstMultiQueue * mq, guint groupid);
221 static void single_queue_overrun_cb (GstDataQueue * dq, GstSingleQueue * sq);
222 static void single_queue_underrun_cb (GstDataQueue * dq, GstSingleQueue * sq);
224 static void update_buffering (GstMultiQueue * mq, GstSingleQueue * sq);
225 static void gst_multi_queue_post_buffering (GstMultiQueue * mq);
226 static void recheck_buffering_status (GstMultiQueue * mq);
228 static void gst_single_queue_flush_queue (GstSingleQueue * sq, gboolean full);
230 static GstStaticPadTemplate sinktemplate = GST_STATIC_PAD_TEMPLATE ("sink_%u",
233 GST_STATIC_CAPS_ANY);
235 static GstStaticPadTemplate srctemplate = GST_STATIC_PAD_TEMPLATE ("src_%u",
238 GST_STATIC_CAPS_ANY);
240 GST_DEBUG_CATEGORY_STATIC (multi_queue_debug);
241 #define GST_CAT_DEFAULT (multi_queue_debug)
243 /* Signals and args */
251 /* default limits, we try to keep up to 2 seconds of data and if there is not
252 * time, up to 10 MB. The number of buffers is dynamically scaled to make sure
253 * there is data in the queues. Normally, the byte and time limits are not hit
254 * in theses conditions. */
255 #define DEFAULT_MAX_SIZE_BYTES 10 * 1024 * 1024 /* 10 MB */
256 #define DEFAULT_MAX_SIZE_BUFFERS 5
257 #define DEFAULT_MAX_SIZE_TIME 2 * GST_SECOND
259 /* second limits. When we hit one of the above limits we are probably dealing
260 * with a badly muxed file and we scale the limits to these emergency values.
261 * This is currently not yet implemented.
262 * Since we dynamically scale the queue buffer size up to the limits but avoid
263 * going above the max-size-buffers when we can, we don't really need this
264 * aditional extra size. */
265 #define DEFAULT_EXTRA_SIZE_BYTES 10 * 1024 * 1024 /* 10 MB */
266 #define DEFAULT_EXTRA_SIZE_BUFFERS 5
267 #define DEFAULT_EXTRA_SIZE_TIME 3 * GST_SECOND
269 #define DEFAULT_USE_BUFFERING FALSE
270 #define DEFAULT_LOW_PERCENT 10
271 #define DEFAULT_HIGH_PERCENT 99
272 #define DEFAULT_SYNC_BY_RUNNING_TIME FALSE
273 #define DEFAULT_USE_INTERLEAVE FALSE
274 #define DEFAULT_UNLINKED_CACHE_TIME 250 * GST_MSECOND
279 PROP_EXTRA_SIZE_BYTES,
280 PROP_EXTRA_SIZE_BUFFERS,
281 PROP_EXTRA_SIZE_TIME,
283 PROP_MAX_SIZE_BUFFERS,
288 PROP_SYNC_BY_RUNNING_TIME,
290 PROP_UNLINKED_CACHE_TIME,
294 /* Explanation for buffer levels and percentages:
296 * The buffering_level functions here return a value in a normalized range
297 * that specifies the current fill level of a queue. The range goes from 0 to
298 * MAX_BUFFERING_LEVEL. The low/high watermarks also use this same range.
300 * This is not to be confused with the buffering_percent value, which is
301 * a *relative* quantity - relative to the low/high watermarks.
302 * buffering_percent = 0% means overall buffering_level is at the low watermark.
303 * buffering_percent = 100% means overall buffering_level is at the high watermark.
304 * buffering_percent is used for determining if the fill level has reached
305 * the high watermark, and for producing BUFFERING messages. This value
306 * always uses a 0..100 range (since it is a percentage).
308 * To avoid future confusions, whenever "buffering level" is mentioned, it
309 * refers to the absolute level which is in the 0..MAX_BUFFERING_LEVEL
310 * range. Whenever "buffering_percent" is mentioned, it refers to the
311 * percentage value that is relative to the low/high watermark. */
313 #define MAX_BUFFERING_LEVEL 100
315 /* GstMultiQueuePad */
317 #define DEFAULT_PAD_GROUP_ID 0
325 #define GST_TYPE_MULTIQUEUE_PAD (gst_multiqueue_pad_get_type())
326 #define GST_MULTIQUEUE_PAD(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_MULTIQUEUE_PAD,GstMultiQueuePad))
327 #define GST_IS_MULTIQUEUE_PAD(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_MULTIQUEUE_PAD))
328 #define GST_MULTIQUEUE_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass) ,GST_TYPE_MULTIQUEUE_PAD,GstMultiQueuePadClass))
329 #define GST_IS_MULTIQUEUE_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass) ,GST_TYPE_MULTIQUEUE_PAD))
330 #define GST_MULTIQUEUE_PAD_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS((obj) ,GST_TYPE_MULTIQUEUE_PAD,GstMultiQueuePadClass))
332 struct _GstMultiQueuePad
339 struct _GstMultiQueuePadClass
341 GstPadClass parent_class;
344 GType gst_multiqueue_pad_get_type (void);
346 G_DEFINE_TYPE (GstMultiQueuePad, gst_multiqueue_pad, GST_TYPE_PAD);
348 gst_multiqueue_pad_get_property (GObject * object, guint prop_id,
349 GValue * value, GParamSpec * pspec)
351 GstMultiQueuePad *pad = GST_MULTIQUEUE_PAD (object);
354 case PROP_PAD_GROUP_ID:
356 g_value_set_uint (value, pad->sq->groupid);
359 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
365 gst_multiqueue_pad_set_property (GObject * object, guint prop_id,
366 const GValue * value, GParamSpec * pspec)
368 GstMultiQueuePad *pad = GST_MULTIQUEUE_PAD (object);
371 case PROP_PAD_GROUP_ID:
372 GST_OBJECT_LOCK (pad);
374 pad->sq->groupid = g_value_get_uint (value);
375 GST_OBJECT_UNLOCK (pad);
378 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
384 gst_multiqueue_pad_class_init (GstMultiQueuePadClass * klass)
386 GObjectClass *gobject_class = (GObjectClass *) klass;
388 gobject_class->set_property = gst_multiqueue_pad_set_property;
389 gobject_class->get_property = gst_multiqueue_pad_get_property;
392 * GstMultiQueuePad:group-id:
394 * Group to which this pad belongs.
398 g_object_class_install_property (gobject_class, PROP_PAD_GROUP_ID,
399 g_param_spec_uint ("group-id", "Group ID",
400 "Group to which this pad belongs", 0, G_MAXUINT32,
401 DEFAULT_PAD_GROUP_ID, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
405 gst_multiqueue_pad_init (GstMultiQueuePad * pad)
411 #define GST_MULTI_QUEUE_MUTEX_LOCK(q) G_STMT_START { \
412 g_mutex_lock (&q->qlock); \
415 #define GST_MULTI_QUEUE_MUTEX_UNLOCK(q) G_STMT_START { \
416 g_mutex_unlock (&q->qlock); \
419 #define SET_PERCENT(mq, perc) G_STMT_START { \
420 if (perc != mq->buffering_percent) { \
421 mq->buffering_percent = perc; \
422 mq->buffering_percent_changed = TRUE; \
423 GST_DEBUG_OBJECT (mq, "buffering %d percent", perc); \
427 /* Convenience function */
428 static inline GstClockTimeDiff
429 my_segment_to_running_time (GstSegment * segment, GstClockTime val)
431 GstClockTimeDiff res = GST_CLOCK_STIME_NONE;
433 if (GST_CLOCK_TIME_IS_VALID (val)) {
435 gst_segment_to_running_time_full (segment, GST_FORMAT_TIME, val, &val);
444 static void gst_multi_queue_finalize (GObject * object);
445 static void gst_multi_queue_set_property (GObject * object,
446 guint prop_id, const GValue * value, GParamSpec * pspec);
447 static void gst_multi_queue_get_property (GObject * object,
448 guint prop_id, GValue * value, GParamSpec * pspec);
450 static GstPad *gst_multi_queue_request_new_pad (GstElement * element,
451 GstPadTemplate * temp, const gchar * name, const GstCaps * caps);
452 static void gst_multi_queue_release_pad (GstElement * element, GstPad * pad);
453 static GstStateChangeReturn gst_multi_queue_change_state (GstElement *
454 element, GstStateChange transition);
456 static void gst_multi_queue_loop (GstPad * pad);
459 GST_DEBUG_CATEGORY_INIT (multi_queue_debug, "multiqueue", 0, "multiqueue element");
460 #define gst_multi_queue_parent_class parent_class
461 G_DEFINE_TYPE_WITH_CODE (GstMultiQueue, gst_multi_queue, GST_TYPE_ELEMENT,
464 static guint gst_multi_queue_signals[LAST_SIGNAL] = { 0 };
467 gst_multi_queue_class_init (GstMultiQueueClass * klass)
469 GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
470 GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
472 gobject_class->set_property = gst_multi_queue_set_property;
473 gobject_class->get_property = gst_multi_queue_get_property;
478 * GstMultiQueue::underrun:
479 * @multiqueue: the multiqueue instance
481 * This signal is emitted from the streaming thread when there is
482 * no data in any of the queues inside the multiqueue instance (underrun).
484 * This indicates either starvation or EOS from the upstream data sources.
486 gst_multi_queue_signals[SIGNAL_UNDERRUN] =
487 g_signal_new ("underrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
488 G_STRUCT_OFFSET (GstMultiQueueClass, underrun), NULL, NULL,
489 g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0);
492 * GstMultiQueue::overrun:
493 * @multiqueue: the multiqueue instance
495 * Reports that one of the queues in the multiqueue is full (overrun).
496 * A queue is full if the total amount of data inside it (num-buffers, time,
497 * size) is higher than the boundary values which can be set through the
498 * GObject properties.
500 * This can be used as an indicator of pre-roll.
502 gst_multi_queue_signals[SIGNAL_OVERRUN] =
503 g_signal_new ("overrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
504 G_STRUCT_OFFSET (GstMultiQueueClass, overrun), NULL, NULL,
505 g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0);
509 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_BYTES,
510 g_param_spec_uint ("max-size-bytes", "Max. size (kB)",
511 "Max. amount of data in the queue (bytes, 0=disable)",
512 0, G_MAXUINT, DEFAULT_MAX_SIZE_BYTES,
513 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
514 G_PARAM_STATIC_STRINGS));
515 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_BUFFERS,
516 g_param_spec_uint ("max-size-buffers", "Max. size (buffers)",
517 "Max. number of buffers in the queue (0=disable)", 0, G_MAXUINT,
518 DEFAULT_MAX_SIZE_BUFFERS,
519 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
520 G_PARAM_STATIC_STRINGS));
521 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_TIME,
522 g_param_spec_uint64 ("max-size-time", "Max. size (ns)",
523 "Max. amount of data in the queue (in ns, 0=disable)", 0, G_MAXUINT64,
524 DEFAULT_MAX_SIZE_TIME, G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
525 G_PARAM_STATIC_STRINGS));
527 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_BYTES,
528 g_param_spec_uint ("extra-size-bytes", "Extra Size (kB)",
529 "Amount of data the queues can grow if one of them is empty (bytes, 0=disable)"
530 " (NOT IMPLEMENTED)",
531 0, G_MAXUINT, DEFAULT_EXTRA_SIZE_BYTES,
532 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
533 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_BUFFERS,
534 g_param_spec_uint ("extra-size-buffers", "Extra Size (buffers)",
535 "Amount of buffers the queues can grow if one of them is empty (0=disable)"
536 " (NOT IMPLEMENTED)",
537 0, G_MAXUINT, DEFAULT_EXTRA_SIZE_BUFFERS,
538 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
539 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_TIME,
540 g_param_spec_uint64 ("extra-size-time", "Extra Size (ns)",
541 "Amount of time the queues can grow if one of them is empty (in ns, 0=disable)"
542 " (NOT IMPLEMENTED)",
543 0, G_MAXUINT64, DEFAULT_EXTRA_SIZE_TIME,
544 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
547 * GstMultiQueue:use-buffering
549 * Enable the buffering option in multiqueue so that BUFFERING messages are
550 * emitted based on low-/high-percent thresholds.
552 g_object_class_install_property (gobject_class, PROP_USE_BUFFERING,
553 g_param_spec_boolean ("use-buffering", "Use buffering",
554 "Emit GST_MESSAGE_BUFFERING based on low-/high-percent thresholds",
555 DEFAULT_USE_BUFFERING, G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
556 G_PARAM_STATIC_STRINGS));
558 * GstMultiQueue:low-percent
560 * Low threshold percent for buffering to start.
562 g_object_class_install_property (gobject_class, PROP_LOW_PERCENT,
563 g_param_spec_int ("low-percent", "Low percent",
564 "Low threshold for buffering to start", 0, 100,
565 DEFAULT_LOW_PERCENT, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
567 * GstMultiQueue:high-percent
569 * High threshold percent for buffering to finish.
571 g_object_class_install_property (gobject_class, PROP_HIGH_PERCENT,
572 g_param_spec_int ("high-percent", "High percent",
573 "High threshold for buffering to finish", 0, 100,
574 DEFAULT_HIGH_PERCENT, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
577 * GstMultiQueue:sync-by-running-time
579 * If enabled multiqueue will synchronize deactivated or not-linked streams
580 * to the activated and linked streams by taking the running time.
581 * Otherwise multiqueue will synchronize the deactivated or not-linked
582 * streams by keeping the order in which buffers and events arrived compared
583 * to active and linked streams.
585 g_object_class_install_property (gobject_class, PROP_SYNC_BY_RUNNING_TIME,
586 g_param_spec_boolean ("sync-by-running-time", "Sync By Running Time",
587 "Synchronize deactivated or not-linked streams by running time",
588 DEFAULT_SYNC_BY_RUNNING_TIME,
589 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
591 g_object_class_install_property (gobject_class, PROP_USE_INTERLEAVE,
592 g_param_spec_boolean ("use-interleave", "Use interleave",
593 "Adjust time limits based on input interleave",
594 DEFAULT_USE_INTERLEAVE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
596 g_object_class_install_property (gobject_class, PROP_UNLINKED_CACHE_TIME,
597 g_param_spec_uint64 ("unlinked-cache-time", "Unlinked cache time (ns)",
598 "Extra buffering in time for unlinked streams (if 'sync-by-running-time')",
599 0, G_MAXUINT64, DEFAULT_UNLINKED_CACHE_TIME,
600 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
601 G_PARAM_STATIC_STRINGS));
604 gobject_class->finalize = gst_multi_queue_finalize;
606 gst_element_class_set_static_metadata (gstelement_class,
608 "Generic", "Multiple data queue", "Edward Hervey <edward@fluendo.com>");
609 gst_element_class_add_static_pad_template (gstelement_class, &sinktemplate);
610 gst_element_class_add_static_pad_template (gstelement_class, &srctemplate);
612 gstelement_class->request_new_pad =
613 GST_DEBUG_FUNCPTR (gst_multi_queue_request_new_pad);
614 gstelement_class->release_pad =
615 GST_DEBUG_FUNCPTR (gst_multi_queue_release_pad);
616 gstelement_class->change_state =
617 GST_DEBUG_FUNCPTR (gst_multi_queue_change_state);
621 gst_multi_queue_init (GstMultiQueue * mqueue)
623 mqueue->nbqueues = 0;
624 mqueue->queues = NULL;
626 mqueue->max_size.bytes = DEFAULT_MAX_SIZE_BYTES;
627 mqueue->max_size.visible = DEFAULT_MAX_SIZE_BUFFERS;
628 mqueue->max_size.time = DEFAULT_MAX_SIZE_TIME;
630 mqueue->extra_size.bytes = DEFAULT_EXTRA_SIZE_BYTES;
631 mqueue->extra_size.visible = DEFAULT_EXTRA_SIZE_BUFFERS;
632 mqueue->extra_size.time = DEFAULT_EXTRA_SIZE_TIME;
634 mqueue->use_buffering = DEFAULT_USE_BUFFERING;
635 mqueue->low_watermark = DEFAULT_LOW_PERCENT;
636 mqueue->high_watermark = DEFAULT_HIGH_PERCENT;
638 mqueue->sync_by_running_time = DEFAULT_SYNC_BY_RUNNING_TIME;
639 mqueue->use_interleave = DEFAULT_USE_INTERLEAVE;
640 mqueue->unlinked_cache_time = DEFAULT_UNLINKED_CACHE_TIME;
644 mqueue->high_time = GST_CLOCK_STIME_NONE;
646 g_mutex_init (&mqueue->qlock);
647 g_mutex_init (&mqueue->buffering_post_lock);
651 gst_multi_queue_finalize (GObject * object)
653 GstMultiQueue *mqueue = GST_MULTI_QUEUE (object);
655 g_list_foreach (mqueue->queues, (GFunc) gst_single_queue_free, NULL);
656 g_list_free (mqueue->queues);
657 mqueue->queues = NULL;
658 mqueue->queues_cookie++;
660 /* free/unref instance data */
661 g_mutex_clear (&mqueue->qlock);
662 g_mutex_clear (&mqueue->buffering_post_lock);
664 G_OBJECT_CLASS (parent_class)->finalize (object);
667 #define SET_CHILD_PROPERTY(mq,format) G_STMT_START { \
668 GList * tmp = mq->queues; \
670 GstSingleQueue *q = (GstSingleQueue*)tmp->data; \
671 q->max_size.format = mq->max_size.format; \
672 update_buffering (mq, q); \
673 gst_data_queue_limits_changed (q->queue); \
674 tmp = g_list_next(tmp); \
679 gst_multi_queue_set_property (GObject * object, guint prop_id,
680 const GValue * value, GParamSpec * pspec)
682 GstMultiQueue *mq = GST_MULTI_QUEUE (object);
685 case PROP_MAX_SIZE_BYTES:
686 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
687 mq->max_size.bytes = g_value_get_uint (value);
688 SET_CHILD_PROPERTY (mq, bytes);
689 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
690 gst_multi_queue_post_buffering (mq);
692 case PROP_MAX_SIZE_BUFFERS:
695 gint new_size = g_value_get_uint (value);
697 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
699 mq->max_size.visible = new_size;
703 GstDataQueueSize size;
704 GstSingleQueue *q = (GstSingleQueue *) tmp->data;
705 gst_data_queue_get_level (q->queue, &size);
707 GST_DEBUG_OBJECT (mq, "Queue %d: Requested buffers size: %d,"
708 " current: %d, current max %d", q->id, new_size, size.visible,
709 q->max_size.visible);
711 /* do not reduce max size below current level if the single queue
712 * has grown because of empty queue */
714 q->max_size.visible = new_size;
715 } else if (q->max_size.visible == 0) {
716 q->max_size.visible = MAX (new_size, size.visible);
717 } else if (new_size > size.visible) {
718 q->max_size.visible = new_size;
720 update_buffering (mq, q);
721 gst_data_queue_limits_changed (q->queue);
722 tmp = g_list_next (tmp);
725 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
726 gst_multi_queue_post_buffering (mq);
730 case PROP_MAX_SIZE_TIME:
731 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
732 mq->max_size.time = g_value_get_uint64 (value);
733 SET_CHILD_PROPERTY (mq, time);
734 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
735 gst_multi_queue_post_buffering (mq);
737 case PROP_EXTRA_SIZE_BYTES:
738 mq->extra_size.bytes = g_value_get_uint (value);
740 case PROP_EXTRA_SIZE_BUFFERS:
741 mq->extra_size.visible = g_value_get_uint (value);
743 case PROP_EXTRA_SIZE_TIME:
744 mq->extra_size.time = g_value_get_uint64 (value);
746 case PROP_USE_BUFFERING:
747 mq->use_buffering = g_value_get_boolean (value);
748 recheck_buffering_status (mq);
750 case PROP_LOW_PERCENT:
751 mq->low_watermark = g_value_get_int (value);
752 /* Recheck buffering status - the new low_watermark value might
753 * be above the current fill level. If the old low_watermark one
754 * was below the current level, this means that mq->buffering is
755 * disabled and needs to be re-enabled. */
756 recheck_buffering_status (mq);
758 case PROP_HIGH_PERCENT:
759 mq->high_watermark = g_value_get_int (value);
760 recheck_buffering_status (mq);
762 case PROP_SYNC_BY_RUNNING_TIME:
763 mq->sync_by_running_time = g_value_get_boolean (value);
765 case PROP_USE_INTERLEAVE:
766 mq->use_interleave = g_value_get_boolean (value);
768 case PROP_UNLINKED_CACHE_TIME:
769 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
770 mq->unlinked_cache_time = g_value_get_uint64 (value);
771 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
772 gst_multi_queue_post_buffering (mq);
775 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
781 gst_multi_queue_get_property (GObject * object, guint prop_id,
782 GValue * value, GParamSpec * pspec)
784 GstMultiQueue *mq = GST_MULTI_QUEUE (object);
786 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
789 case PROP_EXTRA_SIZE_BYTES:
790 g_value_set_uint (value, mq->extra_size.bytes);
792 case PROP_EXTRA_SIZE_BUFFERS:
793 g_value_set_uint (value, mq->extra_size.visible);
795 case PROP_EXTRA_SIZE_TIME:
796 g_value_set_uint64 (value, mq->extra_size.time);
798 case PROP_MAX_SIZE_BYTES:
799 g_value_set_uint (value, mq->max_size.bytes);
801 case PROP_MAX_SIZE_BUFFERS:
802 g_value_set_uint (value, mq->max_size.visible);
804 case PROP_MAX_SIZE_TIME:
805 g_value_set_uint64 (value, mq->max_size.time);
807 case PROP_USE_BUFFERING:
808 g_value_set_boolean (value, mq->use_buffering);
810 case PROP_LOW_PERCENT:
811 g_value_set_int (value, mq->low_watermark);
813 case PROP_HIGH_PERCENT:
814 g_value_set_int (value, mq->high_watermark);
816 case PROP_SYNC_BY_RUNNING_TIME:
817 g_value_set_boolean (value, mq->sync_by_running_time);
819 case PROP_USE_INTERLEAVE:
820 g_value_set_boolean (value, mq->use_interleave);
822 case PROP_UNLINKED_CACHE_TIME:
823 g_value_set_uint64 (value, mq->unlinked_cache_time);
826 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
830 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
834 gst_multi_queue_iterate_internal_links (GstPad * pad, GstObject * parent)
836 GstIterator *it = NULL;
838 GstSingleQueue *squeue;
839 GstMultiQueue *mq = GST_MULTI_QUEUE (parent);
842 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
843 squeue = gst_pad_get_element_private (pad);
847 if (squeue->sinkpad == pad)
848 opad = gst_object_ref (squeue->srcpad);
849 else if (squeue->srcpad == pad)
850 opad = gst_object_ref (squeue->sinkpad);
854 g_value_init (&val, GST_TYPE_PAD);
855 g_value_set_object (&val, opad);
856 it = gst_iterator_new_single (GST_TYPE_PAD, &val);
857 g_value_unset (&val);
859 gst_object_unref (opad);
862 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
873 gst_multi_queue_request_new_pad (GstElement * element, GstPadTemplate * temp,
874 const gchar * name, const GstCaps * caps)
876 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
877 GstSingleQueue *squeue;
882 sscanf (name + 4, "_%u", &temp_id);
883 GST_LOG_OBJECT (element, "name : %s (id %d)", GST_STR_NULL (name), temp_id);
886 /* Create a new single queue, add the sink and source pad and return the sink pad */
887 squeue = gst_single_queue_new (mqueue, temp_id);
889 new_pad = squeue ? squeue->sinkpad : NULL;
891 GST_DEBUG_OBJECT (mqueue, "Returning pad %" GST_PTR_FORMAT, new_pad);
897 gst_multi_queue_release_pad (GstElement * element, GstPad * pad)
899 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
900 GstSingleQueue *sq = NULL;
903 GST_LOG_OBJECT (element, "pad %s:%s", GST_DEBUG_PAD_NAME (pad));
905 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
906 /* Find which single queue it belongs to, knowing that it should be a sinkpad */
907 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
908 sq = (GstSingleQueue *) tmp->data;
910 if (sq->sinkpad == pad)
915 GST_WARNING_OBJECT (mqueue, "That pad doesn't belong to this element ???");
916 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
920 /* FIXME: The removal of the singlequeue should probably not happen until it
921 * finishes draining */
923 /* remove it from the list */
924 mqueue->queues = g_list_delete_link (mqueue->queues, tmp);
925 mqueue->queues_cookie++;
927 /* FIXME : recompute next-non-linked */
928 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
930 /* delete SingleQueue */
931 gst_data_queue_set_flushing (sq->queue, TRUE);
933 gst_pad_set_active (sq->srcpad, FALSE);
934 gst_pad_set_active (sq->sinkpad, FALSE);
935 gst_pad_set_element_private (sq->srcpad, NULL);
936 gst_pad_set_element_private (sq->sinkpad, NULL);
937 gst_element_remove_pad (element, sq->srcpad);
938 gst_element_remove_pad (element, sq->sinkpad);
939 gst_single_queue_free (sq);
942 static GstStateChangeReturn
943 gst_multi_queue_change_state (GstElement * element, GstStateChange transition)
945 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
946 GstSingleQueue *sq = NULL;
947 GstStateChangeReturn result;
949 switch (transition) {
950 case GST_STATE_CHANGE_READY_TO_PAUSED:{
953 /* Set all pads to non-flushing */
954 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
955 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
956 sq = (GstSingleQueue *) tmp->data;
957 sq->flushing = FALSE;
960 /* the visible limit might not have been set on single queues that have grown because of other queueus were empty */
961 SET_CHILD_PROPERTY (mqueue, visible);
963 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
964 gst_multi_queue_post_buffering (mqueue);
968 case GST_STATE_CHANGE_PAUSED_TO_READY:{
971 /* Un-wait all waiting pads */
972 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
973 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
974 sq = (GstSingleQueue *) tmp->data;
976 g_cond_signal (&sq->turn);
978 sq->last_query = FALSE;
979 g_cond_signal (&sq->query_handled);
981 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
988 result = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
990 switch (transition) {
999 gst_single_queue_flush (GstMultiQueue * mq, GstSingleQueue * sq, gboolean flush,
1004 GST_DEBUG_OBJECT (mq, "flush %s queue %d", (flush ? "start" : "stop"),
1008 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1009 sq->srcresult = GST_FLOW_FLUSHING;
1010 gst_data_queue_set_flushing (sq->queue, TRUE);
1012 sq->flushing = TRUE;
1014 /* wake up non-linked task */
1015 GST_LOG_OBJECT (mq, "SingleQueue %d : waking up eventually waiting task",
1017 g_cond_signal (&sq->turn);
1018 sq->last_query = FALSE;
1019 g_cond_signal (&sq->query_handled);
1020 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1022 GST_LOG_OBJECT (mq, "SingleQueue %d : pausing task", sq->id);
1023 result = gst_pad_pause_task (sq->srcpad);
1024 sq->sink_tainted = sq->src_tainted = TRUE;
1026 gst_single_queue_flush_queue (sq, full);
1028 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1029 gst_segment_init (&sq->sink_segment, GST_FORMAT_TIME);
1030 gst_segment_init (&sq->src_segment, GST_FORMAT_TIME);
1031 sq->has_src_segment = FALSE;
1032 /* All pads start off not-linked for a smooth kick-off */
1033 sq->srcresult = GST_FLOW_OK;
1036 sq->max_size.visible = mq->max_size.visible;
1040 sq->last_oldid = G_MAXUINT32;
1041 sq->next_time = GST_CLOCK_STIME_NONE;
1042 sq->last_time = GST_CLOCK_STIME_NONE;
1043 sq->cached_sinktime = GST_CLOCK_STIME_NONE;
1044 sq->group_high_time = GST_CLOCK_STIME_NONE;
1045 gst_data_queue_set_flushing (sq->queue, FALSE);
1047 /* We will become active again on the next buffer/gap */
1050 /* Reset high time to be recomputed next */
1051 mq->high_time = GST_CLOCK_STIME_NONE;
1053 sq->flushing = FALSE;
1054 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1056 GST_LOG_OBJECT (mq, "SingleQueue %d : starting task", sq->id);
1058 gst_pad_start_task (sq->srcpad, (GstTaskFunction) gst_multi_queue_loop,
1064 /* WITH LOCK TAKEN */
1066 get_buffering_level (GstSingleQueue * sq)
1068 GstDataQueueSize size;
1069 gint buffering_level, tmp;
1071 gst_data_queue_get_level (sq->queue, &size);
1073 GST_DEBUG_OBJECT (sq->mqueue,
1074 "queue %d: visible %u/%u, bytes %u/%u, time %" G_GUINT64_FORMAT "/%"
1075 G_GUINT64_FORMAT, sq->id, size.visible, sq->max_size.visible,
1076 size.bytes, sq->max_size.bytes, sq->cur_time, sq->max_size.time);
1078 /* get bytes and time buffer levels and take the max */
1079 if (sq->is_eos || sq->srcresult == GST_FLOW_NOT_LINKED || sq->is_sparse) {
1080 buffering_level = MAX_BUFFERING_LEVEL;
1082 buffering_level = 0;
1083 if (sq->max_size.time > 0) {
1085 gst_util_uint64_scale_int (sq->cur_time,
1086 MAX_BUFFERING_LEVEL, sq->max_size.time);
1087 buffering_level = MAX (buffering_level, tmp);
1089 if (sq->max_size.bytes > 0) {
1091 gst_util_uint64_scale_int (size.bytes,
1092 MAX_BUFFERING_LEVEL, sq->max_size.bytes);
1093 buffering_level = MAX (buffering_level, tmp);
1097 return buffering_level;
1100 /* WITH LOCK TAKEN */
1102 update_buffering (GstMultiQueue * mq, GstSingleQueue * sq)
1104 gint buffering_level, percent;
1106 /* nothing to dowhen we are not in buffering mode */
1107 if (!mq->use_buffering)
1110 buffering_level = get_buffering_level (sq);
1112 /* scale so that if buffering_level equals the high watermark,
1113 * the percentage is 100% */
1114 percent = gst_util_uint64_scale (buffering_level, 100, mq->high_watermark);
1119 if (mq->buffering) {
1120 if (buffering_level >= mq->high_watermark) {
1121 mq->buffering = FALSE;
1123 /* make sure it increases */
1124 percent = MAX (mq->buffering_percent, percent);
1126 SET_PERCENT (mq, percent);
1129 gboolean is_buffering = TRUE;
1131 for (iter = mq->queues; iter; iter = g_list_next (iter)) {
1132 GstSingleQueue *oq = (GstSingleQueue *) iter->data;
1134 if (get_buffering_level (oq) >= mq->high_watermark) {
1135 is_buffering = FALSE;
1141 if (is_buffering && buffering_level < mq->low_watermark) {
1142 mq->buffering = TRUE;
1143 SET_PERCENT (mq, percent);
1149 gst_multi_queue_post_buffering (GstMultiQueue * mq)
1151 GstMessage *msg = NULL;
1153 g_mutex_lock (&mq->buffering_post_lock);
1154 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1155 if (mq->buffering_percent_changed) {
1156 gint percent = mq->buffering_percent;
1158 mq->buffering_percent_changed = FALSE;
1160 GST_DEBUG_OBJECT (mq, "Going to post buffering: %d%%", percent);
1161 msg = gst_message_new_buffering (GST_OBJECT_CAST (mq), percent);
1163 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1166 gst_element_post_message (GST_ELEMENT_CAST (mq), msg);
1168 g_mutex_unlock (&mq->buffering_post_lock);
1172 recheck_buffering_status (GstMultiQueue * mq)
1174 if (!mq->use_buffering && mq->buffering) {
1175 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1176 mq->buffering = FALSE;
1177 GST_DEBUG_OBJECT (mq,
1178 "Buffering property disabled, but queue was still buffering; "
1179 "setting buffering percentage to 100%%");
1180 SET_PERCENT (mq, 100);
1181 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1184 if (mq->use_buffering) {
1188 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1190 /* force buffering percentage to be recalculated */
1191 old_perc = mq->buffering_percent;
1192 mq->buffering_percent = 0;
1196 GstSingleQueue *q = (GstSingleQueue *) tmp->data;
1197 update_buffering (mq, q);
1198 gst_data_queue_limits_changed (q->queue);
1199 tmp = g_list_next (tmp);
1202 GST_DEBUG_OBJECT (mq,
1203 "Recalculated buffering percentage: old: %d%% new: %d%%",
1204 old_perc, mq->buffering_percent);
1206 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1209 gst_multi_queue_post_buffering (mq);
1213 calculate_interleave (GstMultiQueue * mq)
1215 GstClockTimeDiff low, high;
1216 GstClockTime interleave;
1219 low = high = GST_CLOCK_STIME_NONE;
1220 interleave = mq->interleave;
1221 /* Go over all single queues and calculate lowest/highest value */
1222 for (tmp = mq->queues; tmp; tmp = tmp->next) {
1223 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
1224 /* Ignore sparse streams for interleave calculation */
1227 /* If a stream is not active yet (hasn't received any buffers), set
1228 * a maximum interleave to allow it to receive more data */
1231 "queue %d is not active yet, forcing interleave to 5s", sq->id);
1232 mq->interleave = 5 * GST_SECOND;
1233 /* Update max-size time */
1234 mq->max_size.time = mq->interleave;
1235 SET_CHILD_PROPERTY (mq, time);
1238 if (GST_CLOCK_STIME_IS_VALID (sq->cached_sinktime)) {
1239 if (low == GST_CLOCK_STIME_NONE || sq->cached_sinktime < low)
1240 low = sq->cached_sinktime;
1241 if (high == GST_CLOCK_STIME_NONE || sq->cached_sinktime > high)
1242 high = sq->cached_sinktime;
1245 "queue %d , sinktime:%" GST_STIME_FORMAT " low:%" GST_STIME_FORMAT
1246 " high:%" GST_STIME_FORMAT, sq->id,
1247 GST_STIME_ARGS (sq->cached_sinktime), GST_STIME_ARGS (low),
1248 GST_STIME_ARGS (high));
1251 if (GST_CLOCK_STIME_IS_VALID (low) && GST_CLOCK_STIME_IS_VALID (high)) {
1252 interleave = high - low;
1253 /* Padding of interleave and minimum value */
1254 /* FIXME : Make the minimum time interleave a property */
1255 interleave = (150 * interleave / 100) + 250 * GST_MSECOND;
1257 /* Update the stored interleave if:
1258 * * No data has arrived yet (high == low)
1259 * * Or it went higher
1260 * * Or it went lower and we've gone past the previous interleave needed */
1261 if (high == low || interleave > mq->interleave ||
1262 ((mq->last_interleave_update + (2 * MIN (GST_SECOND,
1263 mq->interleave)) < low)
1264 && interleave < (mq->interleave * 3 / 4))) {
1265 /* Update the interleave */
1266 mq->interleave = interleave;
1267 mq->last_interleave_update = high;
1268 /* Update max-size time */
1269 mq->max_size.time = mq->interleave;
1270 SET_CHILD_PROPERTY (mq, time);
1275 GST_DEBUG_OBJECT (mq,
1276 "low:%" GST_STIME_FORMAT " high:%" GST_STIME_FORMAT " interleave:%"
1277 GST_TIME_FORMAT " mq->interleave:%" GST_TIME_FORMAT
1278 " last_interleave_update:%" GST_STIME_FORMAT, GST_STIME_ARGS (low),
1279 GST_STIME_ARGS (high), GST_TIME_ARGS (interleave),
1280 GST_TIME_ARGS (mq->interleave),
1281 GST_STIME_ARGS (mq->last_interleave_update));
1285 /* calculate the diff between running time on the sink and src of the queue.
1286 * This is the total amount of time in the queue.
1287 * WITH LOCK TAKEN */
1289 update_time_level (GstMultiQueue * mq, GstSingleQueue * sq)
1291 GstClockTimeDiff sink_time, src_time;
1293 if (sq->sink_tainted) {
1294 sink_time = sq->sinktime = my_segment_to_running_time (&sq->sink_segment,
1295 sq->sink_segment.position);
1297 GST_DEBUG_OBJECT (mq,
1298 "queue %d sink_segment.position:%" GST_TIME_FORMAT ", sink_time:%"
1299 GST_STIME_FORMAT, sq->id, GST_TIME_ARGS (sq->sink_segment.position),
1300 GST_STIME_ARGS (sink_time));
1302 if (G_UNLIKELY (sq->last_time == GST_CLOCK_STIME_NONE)) {
1303 /* If the single queue still doesn't have a last_time set, this means
1304 * that nothing has been pushed out yet.
1305 * In order for the high_time computation to be as efficient as possible,
1306 * we set the last_time */
1307 sq->last_time = sink_time;
1309 if (G_UNLIKELY (sink_time != GST_CLOCK_STIME_NONE)) {
1310 /* if we have a time, we become untainted and use the time */
1311 sq->sink_tainted = FALSE;
1312 if (mq->use_interleave) {
1313 sq->cached_sinktime = sink_time;
1314 calculate_interleave (mq);
1318 sink_time = sq->sinktime;
1320 if (sq->src_tainted) {
1321 GstSegment *segment;
1324 if (sq->has_src_segment) {
1325 segment = &sq->src_segment;
1326 position = sq->src_segment.position;
1329 * If the src pad had no segment yet, use the sink segment
1330 * to avoid signalling overrun if the received sink segment has a
1331 * a position > max-size-time while the src pad time would be the default=0
1333 * This can happen when switching pads on chained/adaptive streams and the
1334 * new chain has a segment with a much larger position
1336 segment = &sq->sink_segment;
1337 position = sq->sink_segment.position;
1340 src_time = sq->srctime = my_segment_to_running_time (segment, position);
1341 /* if we have a time, we become untainted and use the time */
1342 if (G_UNLIKELY (src_time != GST_CLOCK_STIME_NONE)) {
1343 sq->src_tainted = FALSE;
1346 src_time = sq->srctime;
1348 GST_DEBUG_OBJECT (mq,
1349 "queue %d, sink %" GST_STIME_FORMAT ", src %" GST_STIME_FORMAT, sq->id,
1350 GST_STIME_ARGS (sink_time), GST_STIME_ARGS (src_time));
1352 /* This allows for streams with out of order timestamping - sometimes the
1353 * emerging timestamp is later than the arriving one(s) */
1354 if (G_LIKELY (GST_CLOCK_STIME_IS_VALID (sink_time) &&
1355 GST_CLOCK_STIME_IS_VALID (src_time) && sink_time > src_time))
1356 sq->cur_time = sink_time - src_time;
1360 /* updating the time level can change the buffering state */
1361 update_buffering (mq, sq);
1366 /* take a SEGMENT event and apply the values to segment, updating the time
1367 * level of queue. */
1369 apply_segment (GstMultiQueue * mq, GstSingleQueue * sq, GstEvent * event,
1370 GstSegment * segment)
1372 gst_event_copy_segment (event, segment);
1374 /* now configure the values, we use these to track timestamps on the
1376 if (segment->format != GST_FORMAT_TIME) {
1377 /* non-time format, pretent the current time segment is closed with a
1378 * 0 start and unknown stop time. */
1379 segment->format = GST_FORMAT_TIME;
1384 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1386 /* Make sure we have a valid initial segment position (and not garbage
1388 if (segment->rate > 0.0)
1389 segment->position = segment->start;
1391 segment->position = segment->stop;
1392 if (segment == &sq->sink_segment)
1393 sq->sink_tainted = TRUE;
1395 sq->has_src_segment = TRUE;
1396 sq->src_tainted = TRUE;
1399 GST_DEBUG_OBJECT (mq,
1400 "queue %d, configured SEGMENT %" GST_SEGMENT_FORMAT, sq->id, segment);
1402 /* segment can update the time level of the queue */
1403 update_time_level (mq, sq);
1405 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1406 gst_multi_queue_post_buffering (mq);
1409 /* take a buffer and update segment, updating the time level of the queue. */
1411 apply_buffer (GstMultiQueue * mq, GstSingleQueue * sq, GstClockTime timestamp,
1412 GstClockTime duration, GstSegment * segment)
1414 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1416 /* if no timestamp is set, assume it's continuous with the previous
1418 if (timestamp == GST_CLOCK_TIME_NONE)
1419 timestamp = segment->position;
1422 if (duration != GST_CLOCK_TIME_NONE)
1423 timestamp += duration;
1425 GST_DEBUG_OBJECT (mq, "queue %d, %s position updated to %" GST_TIME_FORMAT,
1426 sq->id, segment == &sq->sink_segment ? "sink" : "src",
1427 GST_TIME_ARGS (timestamp));
1429 segment->position = timestamp;
1431 if (segment == &sq->sink_segment)
1432 sq->sink_tainted = TRUE;
1434 sq->src_tainted = TRUE;
1436 /* calc diff with other end */
1437 update_time_level (mq, sq);
1438 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1439 gst_multi_queue_post_buffering (mq);
1443 apply_gap (GstMultiQueue * mq, GstSingleQueue * sq, GstEvent * event,
1444 GstSegment * segment)
1446 GstClockTime timestamp;
1447 GstClockTime duration;
1449 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1451 gst_event_parse_gap (event, ×tamp, &duration);
1453 if (GST_CLOCK_TIME_IS_VALID (timestamp)) {
1455 if (GST_CLOCK_TIME_IS_VALID (duration)) {
1456 timestamp += duration;
1459 segment->position = timestamp;
1461 if (segment == &sq->sink_segment)
1462 sq->sink_tainted = TRUE;
1464 sq->src_tainted = TRUE;
1466 /* calc diff with other end */
1467 update_time_level (mq, sq);
1470 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1471 gst_multi_queue_post_buffering (mq);
1474 static GstClockTimeDiff
1475 get_running_time (GstSegment * segment, GstMiniObject * object, gboolean end)
1477 GstClockTimeDiff time = GST_CLOCK_STIME_NONE;
1479 if (GST_IS_BUFFER (object)) {
1480 GstBuffer *buf = GST_BUFFER_CAST (object);
1481 GstClockTime btime = GST_BUFFER_DTS_OR_PTS (buf);
1483 if (GST_CLOCK_TIME_IS_VALID (btime)) {
1484 if (end && GST_BUFFER_DURATION_IS_VALID (buf))
1485 btime += GST_BUFFER_DURATION (buf);
1486 if (btime > segment->stop)
1487 btime = segment->stop;
1488 time = my_segment_to_running_time (segment, btime);
1490 } else if (GST_IS_BUFFER_LIST (object)) {
1491 GstBufferList *list = GST_BUFFER_LIST_CAST (object);
1495 n = gst_buffer_list_length (list);
1496 for (i = 0; i < n; i++) {
1498 buf = gst_buffer_list_get (list, i);
1499 btime = GST_BUFFER_DTS_OR_PTS (buf);
1500 if (GST_CLOCK_TIME_IS_VALID (btime)) {
1501 if (end && GST_BUFFER_DURATION_IS_VALID (buf))
1502 btime += GST_BUFFER_DURATION (buf);
1503 if (btime > segment->stop)
1504 btime = segment->stop;
1505 time = my_segment_to_running_time (segment, btime);
1512 } else if (GST_IS_EVENT (object)) {
1513 GstEvent *event = GST_EVENT_CAST (object);
1515 /* For newsegment events return the running time of the start position */
1516 if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
1517 const GstSegment *new_segment;
1519 gst_event_parse_segment (event, &new_segment);
1520 if (new_segment->format == GST_FORMAT_TIME) {
1522 my_segment_to_running_time ((GstSegment *) new_segment,
1523 new_segment->start);
1532 static GstFlowReturn
1533 gst_single_queue_push_one (GstMultiQueue * mq, GstSingleQueue * sq,
1534 GstMiniObject * object, gboolean * allow_drop)
1536 GstFlowReturn result = sq->srcresult;
1538 if (GST_IS_BUFFER (object)) {
1540 GstClockTime timestamp, duration;
1542 buffer = GST_BUFFER_CAST (object);
1543 timestamp = GST_BUFFER_DTS_OR_PTS (buffer);
1544 duration = GST_BUFFER_DURATION (buffer);
1546 apply_buffer (mq, sq, timestamp, duration, &sq->src_segment);
1548 /* Applying the buffer may have made the queue non-full again, unblock it if needed */
1549 gst_data_queue_limits_changed (sq->queue);
1551 if (G_UNLIKELY (*allow_drop)) {
1552 GST_DEBUG_OBJECT (mq,
1553 "SingleQueue %d : Dropping EOS buffer %p with ts %" GST_TIME_FORMAT,
1554 sq->id, buffer, GST_TIME_ARGS (timestamp));
1555 gst_buffer_unref (buffer);
1557 GST_DEBUG_OBJECT (mq,
1558 "SingleQueue %d : Pushing buffer %p with ts %" GST_TIME_FORMAT,
1559 sq->id, buffer, GST_TIME_ARGS (timestamp));
1560 result = gst_pad_push (sq->srcpad, buffer);
1562 } else if (GST_IS_EVENT (object)) {
1565 event = GST_EVENT_CAST (object);
1567 switch (GST_EVENT_TYPE (event)) {
1569 result = GST_FLOW_EOS;
1570 if (G_UNLIKELY (*allow_drop))
1571 *allow_drop = FALSE;
1573 case GST_EVENT_SEGMENT:
1574 apply_segment (mq, sq, event, &sq->src_segment);
1575 /* Applying the segment may have made the queue non-full again, unblock it if needed */
1576 gst_data_queue_limits_changed (sq->queue);
1577 if (G_UNLIKELY (*allow_drop)) {
1578 result = GST_FLOW_OK;
1579 *allow_drop = FALSE;
1583 apply_gap (mq, sq, event, &sq->src_segment);
1584 /* Applying the gap may have made the queue non-full again, unblock it if needed */
1585 gst_data_queue_limits_changed (sq->queue);
1591 if (G_UNLIKELY (*allow_drop)) {
1592 GST_DEBUG_OBJECT (mq,
1593 "SingleQueue %d : Dropping EOS event %p of type %s",
1594 sq->id, event, GST_EVENT_TYPE_NAME (event));
1595 gst_event_unref (event);
1597 GST_DEBUG_OBJECT (mq,
1598 "SingleQueue %d : Pushing event %p of type %s",
1599 sq->id, event, GST_EVENT_TYPE_NAME (event));
1601 gst_pad_push_event (sq->srcpad, event);
1603 } else if (GST_IS_QUERY (object)) {
1607 query = GST_QUERY_CAST (object);
1609 if (G_UNLIKELY (*allow_drop)) {
1610 GST_DEBUG_OBJECT (mq,
1611 "SingleQueue %d : Dropping EOS query %p", sq->id, query);
1612 gst_query_unref (query);
1615 res = gst_pad_peer_query (sq->srcpad, query);
1618 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1619 sq->last_query = res;
1620 sq->last_handled_query = query;
1621 g_cond_signal (&sq->query_handled);
1622 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1624 g_warning ("Unexpected object in singlequeue %u (refcounting problem?)",
1632 static GstMiniObject *
1633 gst_multi_queue_item_steal_object (GstMultiQueueItem * item)
1638 item->object = NULL;
1644 gst_multi_queue_item_destroy (GstMultiQueueItem * item)
1646 if (!item->is_query && item->object)
1647 gst_mini_object_unref (item->object);
1648 g_slice_free (GstMultiQueueItem, item);
1651 /* takes ownership of passed mini object! */
1652 static GstMultiQueueItem *
1653 gst_multi_queue_buffer_item_new (GstMiniObject * object, guint32 curid)
1655 GstMultiQueueItem *item;
1657 item = g_slice_new (GstMultiQueueItem);
1658 item->object = object;
1659 item->destroy = (GDestroyNotify) gst_multi_queue_item_destroy;
1660 item->posid = curid;
1661 item->is_query = GST_IS_QUERY (object);
1663 item->size = gst_buffer_get_size (GST_BUFFER_CAST (object));
1664 item->duration = GST_BUFFER_DURATION (object);
1665 if (item->duration == GST_CLOCK_TIME_NONE)
1667 item->visible = TRUE;
1671 static GstMultiQueueItem *
1672 gst_multi_queue_mo_item_new (GstMiniObject * object, guint32 curid)
1674 GstMultiQueueItem *item;
1676 item = g_slice_new (GstMultiQueueItem);
1677 item->object = object;
1678 item->destroy = (GDestroyNotify) gst_multi_queue_item_destroy;
1679 item->posid = curid;
1680 item->is_query = GST_IS_QUERY (object);
1684 item->visible = FALSE;
1688 /* Each main loop attempts to push buffers until the return value
1689 * is not-linked. not-linked pads are not allowed to push data beyond
1690 * any linked pads, so they don't 'rush ahead of the pack'.
1693 gst_multi_queue_loop (GstPad * pad)
1696 GstMultiQueueItem *item;
1697 GstDataQueueItem *sitem;
1699 GstMiniObject *object = NULL;
1701 GstFlowReturn result;
1702 GstClockTimeDiff next_time;
1704 gboolean do_update_buffering = FALSE;
1705 gboolean dropping = FALSE;
1707 sq = (GstSingleQueue *) gst_pad_get_element_private (pad);
1711 GST_DEBUG_OBJECT (mq, "SingleQueue %d : trying to pop an object", sq->id);
1716 /* Get something from the queue, blocking until that happens, or we get
1718 if (!(gst_data_queue_pop (sq->queue, &sitem)))
1721 item = (GstMultiQueueItem *) sitem;
1722 newid = item->posid;
1724 /* steal the object and destroy the item */
1725 object = gst_multi_queue_item_steal_object (item);
1726 gst_multi_queue_item_destroy (item);
1728 is_buffer = GST_IS_BUFFER (object);
1730 /* Get running time of the item. Events will have GST_CLOCK_STIME_NONE */
1731 next_time = get_running_time (&sq->src_segment, object, FALSE);
1733 GST_LOG_OBJECT (mq, "SingleQueue %d : newid:%d , oldid:%d",
1734 sq->id, newid, sq->last_oldid);
1736 /* If we're not-linked, we do some extra work because we might need to
1737 * wait before pushing. If we're linked but there's a gap in the IDs,
1738 * or it's the first loop, or we just passed the previous highid,
1739 * we might need to wake some sleeping pad up, so there's extra work
1741 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1742 if (sq->srcresult == GST_FLOW_NOT_LINKED
1743 || (sq->last_oldid == G_MAXUINT32) || (newid != (sq->last_oldid + 1))
1744 || sq->last_oldid > mq->highid) {
1745 GST_LOG_OBJECT (mq, "CHECKING sq->srcresult: %s",
1746 gst_flow_get_name (sq->srcresult));
1748 /* Check again if we're flushing after the lock is taken,
1749 * the flush flag might have been changed in the meantime */
1751 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1755 /* Update the nextid so other threads know when to wake us up */
1757 /* Take into account the extra cache time since we're unlinked */
1758 if (GST_CLOCK_STIME_IS_VALID (next_time))
1759 next_time += mq->unlinked_cache_time;
1760 sq->next_time = next_time;
1762 /* Update the oldid (the last ID we output) for highid tracking */
1763 if (sq->last_oldid != G_MAXUINT32)
1764 sq->oldid = sq->last_oldid;
1766 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
1767 gboolean should_wait;
1768 /* Go to sleep until it's time to push this buffer */
1770 /* Recompute the highid */
1771 compute_high_id (mq);
1772 /* Recompute the high time */
1773 compute_high_time (mq, sq->groupid);
1775 GST_DEBUG_OBJECT (mq,
1776 "groupid %d high_time %" GST_STIME_FORMAT " next_time %"
1777 GST_STIME_FORMAT, sq->groupid, GST_STIME_ARGS (sq->group_high_time),
1778 GST_STIME_ARGS (next_time));
1780 if (mq->sync_by_running_time)
1781 /* In this case we only need to wait if:
1782 * 1) there is a time against which to wait
1783 * 2) and either we have gone over the high_time or there is no
1785 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
1786 (sq->group_high_time == GST_CLOCK_STIME_NONE
1787 || next_time > sq->group_high_time);
1789 should_wait = newid > mq->highid;
1791 while (should_wait && sq->srcresult == GST_FLOW_NOT_LINKED) {
1793 GST_DEBUG_OBJECT (mq,
1794 "queue %d sleeping for not-linked wakeup with "
1795 "newid %u, highid %u, next_time %" GST_STIME_FORMAT
1796 ", high_time %" GST_STIME_FORMAT, sq->id, newid, mq->highid,
1797 GST_STIME_ARGS (next_time), GST_STIME_ARGS (sq->group_high_time));
1799 /* Wake up all non-linked pads before we sleep */
1800 wake_up_next_non_linked (mq);
1803 g_cond_wait (&sq->turn, &mq->qlock);
1807 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1811 /* Recompute the high time and ID */
1812 compute_high_time (mq, sq->groupid);
1813 compute_high_id (mq);
1815 GST_DEBUG_OBJECT (mq, "queue %d woken from sleeping for not-linked "
1816 "wakeup with newid %u, highid %u, next_time %" GST_STIME_FORMAT
1817 ", high_time %" GST_STIME_FORMAT, sq->id, newid, mq->highid,
1818 GST_STIME_ARGS (next_time), GST_STIME_ARGS (sq->group_high_time));
1820 if (mq->sync_by_running_time)
1821 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
1822 (sq->group_high_time == GST_CLOCK_STIME_NONE
1823 || next_time > sq->group_high_time);
1825 should_wait = newid > mq->highid;
1828 /* Re-compute the high_id in case someone else pushed */
1829 compute_high_id (mq);
1830 compute_high_time (mq, sq->groupid);
1832 compute_high_id (mq);
1833 compute_high_time (mq, sq->groupid);
1834 /* Wake up all non-linked pads */
1835 wake_up_next_non_linked (mq);
1837 /* We're done waiting, we can clear the nextid and nexttime */
1839 sq->next_time = GST_CLOCK_STIME_NONE;
1841 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1846 GST_LOG_OBJECT (mq, "sq:%d BEFORE PUSHING sq->srcresult: %s", sq->id,
1847 gst_flow_get_name (sq->srcresult));
1849 /* Update time stats */
1850 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1851 next_time = get_running_time (&sq->src_segment, object, TRUE);
1852 if (GST_CLOCK_STIME_IS_VALID (next_time)) {
1853 if (sq->last_time == GST_CLOCK_STIME_NONE || sq->last_time < next_time)
1854 sq->last_time = next_time;
1855 if (mq->high_time == GST_CLOCK_STIME_NONE || mq->high_time <= next_time) {
1856 /* Wake up all non-linked pads now that we advanced the high time */
1857 mq->high_time = next_time;
1858 wake_up_next_non_linked (mq);
1861 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1863 /* Try to push out the new object */
1864 result = gst_single_queue_push_one (mq, sq, object, &dropping);
1867 /* Check if we pushed something already and if this is
1868 * now a switch from an active to a non-active stream.
1870 * If it is, we reset all the waiting streams, let them
1871 * push another buffer to see if they're now active again.
1872 * This allows faster switching between streams and prevents
1873 * deadlocks if downstream does any waiting too.
1875 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1876 if (sq->pushed && sq->srcresult == GST_FLOW_OK
1877 && result == GST_FLOW_NOT_LINKED) {
1880 GST_LOG_OBJECT (mq, "SingleQueue %d : Changed from active to non-active",
1883 compute_high_id (mq);
1884 compute_high_time (mq, sq->groupid);
1885 do_update_buffering = TRUE;
1887 /* maybe no-one is waiting */
1888 if (mq->numwaiting > 0) {
1889 /* Else figure out which singlequeue(s) need waking up */
1890 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
1891 GstSingleQueue *sq2 = (GstSingleQueue *) tmp->data;
1893 if (sq2->srcresult == GST_FLOW_NOT_LINKED) {
1894 GST_LOG_OBJECT (mq, "Waking up singlequeue %d", sq2->id);
1895 sq2->pushed = FALSE;
1896 sq2->srcresult = GST_FLOW_OK;
1897 g_cond_signal (&sq2->turn);
1906 /* now hold on a bit;
1907 * can not simply throw this result to upstream, because
1908 * that might already be onto another segment, so we have to make
1909 * sure we are relaying the correct info wrt proper segment */
1910 if (result == GST_FLOW_EOS && !dropping &&
1911 sq->srcresult != GST_FLOW_NOT_LINKED) {
1912 GST_DEBUG_OBJECT (mq, "starting EOS drop on sq %d", sq->id);
1914 /* pretend we have not seen EOS yet for upstream's sake */
1915 result = sq->srcresult;
1916 } else if (dropping && gst_data_queue_is_empty (sq->queue)) {
1917 /* queue empty, so stop dropping
1918 * we can commit the result we have now,
1919 * which is either OK after a segment, or EOS */
1920 GST_DEBUG_OBJECT (mq, "committed EOS drop on sq %d", sq->id);
1922 result = GST_FLOW_EOS;
1924 sq->srcresult = result;
1925 sq->last_oldid = newid;
1927 if (do_update_buffering)
1928 update_buffering (mq, sq);
1930 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1931 gst_multi_queue_post_buffering (mq);
1933 GST_LOG_OBJECT (mq, "sq:%d AFTER PUSHING sq->srcresult: %s (is_eos:%d)",
1934 sq->id, gst_flow_get_name (sq->srcresult), GST_PAD_IS_EOS (sq->srcpad));
1936 /* Need to make sure wake up any sleeping pads when we exit */
1937 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1938 if (mq->numwaiting > 0 && (GST_PAD_IS_EOS (sq->srcpad)
1939 || sq->srcresult == GST_FLOW_EOS)) {
1940 compute_high_time (mq, sq->groupid);
1941 compute_high_id (mq);
1942 wake_up_next_non_linked (mq);
1944 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1949 if (result != GST_FLOW_OK && result != GST_FLOW_NOT_LINKED
1950 && result != GST_FLOW_EOS)
1958 gst_mini_object_unref (object);
1960 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1961 sq->last_query = FALSE;
1962 g_cond_signal (&sq->query_handled);
1964 /* Post an error message if we got EOS while downstream
1965 * has returned an error flow return. After EOS there
1966 * will be no further buffer which could propagate the
1968 if (sq->is_eos && sq->srcresult < GST_FLOW_EOS) {
1969 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1970 GST_ELEMENT_FLOW_ERROR (mq, sq->srcresult);
1972 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1975 /* upstream needs to see fatal result ASAP to shut things down,
1976 * but might be stuck in one of our other full queues;
1977 * so empty this one and trigger dynamic queue growth. At
1978 * this point the srcresult is not OK, NOT_LINKED
1979 * or EOS, i.e. a real failure */
1980 gst_single_queue_flush_queue (sq, FALSE);
1981 single_queue_underrun_cb (sq->queue, sq);
1982 gst_data_queue_set_flushing (sq->queue, TRUE);
1983 gst_pad_pause_task (sq->srcpad);
1984 GST_CAT_LOG_OBJECT (multi_queue_debug, mq,
1985 "SingleQueue[%d] task paused, reason:%s",
1986 sq->id, gst_flow_get_name (sq->srcresult));
1992 * gst_multi_queue_chain:
1994 * This is similar to GstQueue's chain function, except:
1995 * _ we don't have leak behaviours,
1996 * _ we push with a unique id (curid)
1998 static GstFlowReturn
1999 gst_multi_queue_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
2003 GstMultiQueueItem *item;
2005 GstClockTime timestamp, duration;
2007 sq = gst_pad_get_element_private (pad);
2010 /* if eos, we are always full, so avoid hanging incoming indefinitely */
2016 /* Get a unique incrementing id */
2017 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2019 timestamp = GST_BUFFER_DTS_OR_PTS (buffer);
2020 duration = GST_BUFFER_DURATION (buffer);
2023 "SingleQueue %d : about to enqueue buffer %p with id %d (pts:%"
2024 GST_TIME_FORMAT " dts:%" GST_TIME_FORMAT " dur:%" GST_TIME_FORMAT ")",
2025 sq->id, buffer, curid, GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
2026 GST_TIME_ARGS (GST_BUFFER_DTS (buffer)), GST_TIME_ARGS (duration));
2028 item = gst_multi_queue_buffer_item_new (GST_MINI_OBJECT_CAST (buffer), curid);
2030 /* Update interleave before pushing data into queue */
2031 if (mq->use_interleave) {
2032 GstClockTime val = timestamp;
2033 GstClockTimeDiff dval;
2035 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2036 if (val == GST_CLOCK_TIME_NONE)
2037 val = sq->sink_segment.position;
2038 if (duration != GST_CLOCK_TIME_NONE)
2041 dval = my_segment_to_running_time (&sq->sink_segment, val);
2042 if (GST_CLOCK_STIME_IS_VALID (dval)) {
2043 sq->cached_sinktime = dval;
2044 GST_DEBUG_OBJECT (mq,
2045 "Queue %d cached sink time now %" G_GINT64_FORMAT " %"
2046 GST_STIME_FORMAT, sq->id, sq->cached_sinktime,
2047 GST_STIME_ARGS (sq->cached_sinktime));
2048 calculate_interleave (mq);
2050 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2053 if (!(gst_data_queue_push (sq->queue, (GstDataQueueItem *) item)))
2056 /* update time level, we must do this after pushing the data in the queue so
2057 * that we never end up filling the queue first. */
2058 apply_buffer (mq, sq, timestamp, duration, &sq->sink_segment);
2061 return sq->srcresult;
2066 GST_LOG_OBJECT (mq, "SingleQueue %d : exit because task paused, reason: %s",
2067 sq->id, gst_flow_get_name (sq->srcresult));
2068 gst_multi_queue_item_destroy (item);
2073 GST_DEBUG_OBJECT (mq, "we are EOS, dropping buffer, return EOS");
2074 gst_buffer_unref (buffer);
2075 return GST_FLOW_EOS;
2080 gst_multi_queue_sink_activate_mode (GstPad * pad, GstObject * parent,
2081 GstPadMode mode, gboolean active)
2087 sq = (GstSingleQueue *) gst_pad_get_element_private (pad);
2088 mq = (GstMultiQueue *) gst_pad_get_parent (pad);
2090 /* mq is NULL if the pad is activated/deactivated before being
2091 * added to the multiqueue */
2093 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2096 case GST_PAD_MODE_PUSH:
2098 /* All pads start off linked until they push one buffer */
2099 sq->srcresult = GST_FLOW_OK;
2101 gst_data_queue_set_flushing (sq->queue, FALSE);
2103 sq->srcresult = GST_FLOW_FLUSHING;
2104 sq->last_query = FALSE;
2105 g_cond_signal (&sq->query_handled);
2106 gst_data_queue_set_flushing (sq->queue, TRUE);
2108 /* Wait until streaming thread has finished */
2110 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2111 GST_PAD_STREAM_LOCK (pad);
2113 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2114 gst_data_queue_flush (sq->queue);
2116 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2117 GST_PAD_STREAM_UNLOCK (pad);
2119 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2129 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2130 gst_object_unref (mq);
2136 static GstFlowReturn
2137 gst_multi_queue_sink_event (GstPad * pad, GstObject * parent, GstEvent * event)
2142 GstMultiQueueItem *item;
2143 gboolean res = TRUE;
2144 GstFlowReturn flowret = GST_FLOW_OK;
2146 GstEvent *sref = NULL;
2148 sq = (GstSingleQueue *) gst_pad_get_element_private (pad);
2149 mq = (GstMultiQueue *) parent;
2151 type = GST_EVENT_TYPE (event);
2154 case GST_EVENT_STREAM_START:
2156 if (mq->sync_by_running_time) {
2157 GstStreamFlags stream_flags;
2158 gst_event_parse_stream_flags (event, &stream_flags);
2159 if ((stream_flags & GST_STREAM_FLAG_SPARSE)) {
2160 GST_INFO_OBJECT (mq, "SingleQueue %d is a sparse stream", sq->id);
2161 sq->is_sparse = TRUE;
2165 sq->thread = g_thread_self ();
2167 /* Remove EOS flag */
2171 case GST_EVENT_FLUSH_START:
2172 GST_DEBUG_OBJECT (mq, "SingleQueue %d : received flush start event",
2175 res = gst_pad_push_event (sq->srcpad, event);
2177 gst_single_queue_flush (mq, sq, TRUE, FALSE);
2180 case GST_EVENT_FLUSH_STOP:
2181 GST_DEBUG_OBJECT (mq, "SingleQueue %d : received flush stop event",
2184 res = gst_pad_push_event (sq->srcpad, event);
2186 gst_single_queue_flush (mq, sq, FALSE, FALSE);
2189 case GST_EVENT_SEGMENT:
2190 sref = gst_event_ref (event);
2193 /* take ref because the queue will take ownership and we need the event
2194 * afterwards to update the segment */
2195 sref = gst_event_ref (event);
2196 if (mq->use_interleave) {
2197 GstClockTime val, dur;
2199 gst_event_parse_gap (event, &val, &dur);
2200 if (GST_CLOCK_TIME_IS_VALID (val)) {
2201 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2202 if (GST_CLOCK_TIME_IS_VALID (dur))
2204 stime = my_segment_to_running_time (&sq->sink_segment, val);
2205 if (GST_CLOCK_STIME_IS_VALID (stime)) {
2206 sq->cached_sinktime = stime;
2207 calculate_interleave (mq);
2209 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2215 if (!(GST_EVENT_IS_SERIALIZED (event))) {
2216 res = gst_pad_push_event (sq->srcpad, event);
2222 /* if eos, we are always full, so avoid hanging incoming indefinitely */
2226 /* Get an unique incrementing id. */
2227 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2229 item = gst_multi_queue_mo_item_new ((GstMiniObject *) event, curid);
2231 GST_DEBUG_OBJECT (mq,
2232 "SingleQueue %d : Enqueuing event %p of type %s with id %d",
2233 sq->id, event, GST_EVENT_TYPE_NAME (event), curid);
2235 if (!gst_data_queue_push (sq->queue, (GstDataQueueItem *) item))
2238 /* mark EOS when we received one, we must do that after putting the
2239 * buffer in the queue because EOS marks the buffer as filled. */
2242 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2245 /* Post an error message if we got EOS while downstream
2246 * has returned an error flow return. After EOS there
2247 * will be no further buffer which could propagate the
2249 if (sq->srcresult < GST_FLOW_EOS) {
2250 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2251 GST_ELEMENT_FLOW_ERROR (mq, sq->srcresult);
2253 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2256 /* EOS affects the buffering state */
2257 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2258 update_buffering (mq, sq);
2259 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2260 single_queue_overrun_cb (sq->queue, sq);
2261 gst_multi_queue_post_buffering (mq);
2263 case GST_EVENT_SEGMENT:
2264 apply_segment (mq, sq, sref, &sq->sink_segment);
2265 gst_event_unref (sref);
2266 /* a new segment allows us to accept more buffers if we got EOS
2267 * from downstream */
2268 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2269 if (sq->srcresult == GST_FLOW_EOS)
2270 sq->srcresult = GST_FLOW_OK;
2271 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2275 apply_gap (mq, sq, sref, &sq->sink_segment);
2276 gst_event_unref (sref);
2283 flowret = GST_FLOW_ERROR;
2284 GST_DEBUG_OBJECT (mq, "SingleQueue %d : returning %s", sq->id,
2285 gst_flow_get_name (flowret));
2290 GST_LOG_OBJECT (mq, "SingleQueue %d : exit because task paused, reason: %s",
2291 sq->id, gst_flow_get_name (sq->srcresult));
2293 gst_event_unref (sref);
2294 gst_multi_queue_item_destroy (item);
2295 return sq->srcresult;
2299 GST_DEBUG_OBJECT (mq, "we are EOS, dropping event, return GST_FLOW_EOS");
2300 gst_event_unref (event);
2301 return GST_FLOW_EOS;
2306 gst_multi_queue_sink_query (GstPad * pad, GstObject * parent, GstQuery * query)
2312 sq = (GstSingleQueue *) gst_pad_get_element_private (pad);
2313 mq = (GstMultiQueue *) parent;
2315 switch (GST_QUERY_TYPE (query)) {
2317 if (GST_QUERY_IS_SERIALIZED (query)) {
2319 GstMultiQueueItem *item;
2321 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2322 if (sq->srcresult != GST_FLOW_OK)
2325 /* serialized events go in the queue. We need to be certain that we
2326 * don't cause deadlocks waiting for the query return value. We check if
2327 * the queue is empty (nothing is blocking downstream and the query can
2328 * be pushed for sure) or we are not buffering. If we are buffering,
2329 * the pipeline waits to unblock downstream until our queue fills up
2330 * completely, which can not happen if we block on the query..
2331 * Therefore we only potentially block when we are not buffering. */
2332 if (!mq->use_buffering || gst_data_queue_is_empty (sq->queue)) {
2333 /* Get an unique incrementing id. */
2334 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2336 item = gst_multi_queue_mo_item_new ((GstMiniObject *) query, curid);
2338 GST_DEBUG_OBJECT (mq,
2339 "SingleQueue %d : Enqueuing query %p of type %s with id %d",
2340 sq->id, query, GST_QUERY_TYPE_NAME (query), curid);
2341 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2342 res = gst_data_queue_push (sq->queue, (GstDataQueueItem *) item);
2343 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2344 if (!res || sq->flushing)
2346 /* it might be that the query has been taken out of the queue
2347 * while we were unlocked. So, we need to check if the last
2348 * handled query is the same one than the one we just
2349 * pushed. If it is, we don't need to wait for the condition
2350 * variable, otherwise we wait for the condition variable to
2352 if (sq->last_handled_query != query)
2353 g_cond_wait (&sq->query_handled, &mq->qlock);
2354 res = sq->last_query;
2355 sq->last_handled_query = NULL;
2357 GST_DEBUG_OBJECT (mq, "refusing query, we are buffering and the "
2358 "queue is not empty");
2361 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2363 /* default handling */
2364 res = gst_pad_query_default (pad, parent, query);
2372 GST_DEBUG_OBJECT (mq, "Flushing");
2373 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2379 gst_multi_queue_src_activate_mode (GstPad * pad, GstObject * parent,
2380 GstPadMode mode, gboolean active)
2386 sq = (GstSingleQueue *) gst_pad_get_element_private (pad);
2389 GST_DEBUG_OBJECT (mq, "SingleQueue %d", sq->id);
2392 case GST_PAD_MODE_PUSH:
2394 result = gst_single_queue_flush (mq, sq, FALSE, TRUE);
2396 result = gst_single_queue_flush (mq, sq, TRUE, TRUE);
2397 /* make sure streaming finishes */
2398 result |= gst_pad_stop_task (pad);
2409 gst_multi_queue_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
2411 GstSingleQueue *sq = gst_pad_get_element_private (pad);
2412 GstMultiQueue *mq = sq->mqueue;
2415 switch (GST_EVENT_TYPE (event)) {
2416 case GST_EVENT_RECONFIGURE:
2417 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2418 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2419 sq->srcresult = GST_FLOW_OK;
2420 g_cond_signal (&sq->turn);
2422 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2424 ret = gst_pad_push_event (sq->sinkpad, event);
2427 ret = gst_pad_push_event (sq->sinkpad, event);
2435 gst_multi_queue_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
2439 /* FIXME, Handle position offset depending on queue size */
2440 switch (GST_QUERY_TYPE (query)) {
2442 /* default handling */
2443 res = gst_pad_query_default (pad, parent, query);
2450 * Next-non-linked functions
2453 /* WITH LOCK TAKEN */
2455 wake_up_next_non_linked (GstMultiQueue * mq)
2459 /* maybe no-one is waiting */
2460 if (mq->numwaiting < 1)
2463 if (mq->sync_by_running_time && GST_CLOCK_STIME_IS_VALID (mq->high_time)) {
2464 /* Else figure out which singlequeue(s) need waking up */
2465 for (tmp = mq->queues; tmp; tmp = tmp->next) {
2466 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
2467 if (sq->srcresult == GST_FLOW_NOT_LINKED
2468 && GST_CLOCK_STIME_IS_VALID (sq->group_high_time)
2469 && GST_CLOCK_STIME_IS_VALID (sq->next_time)
2470 && sq->next_time <= sq->group_high_time) {
2471 GST_LOG_OBJECT (mq, "Waking up singlequeue %d", sq->id);
2472 g_cond_signal (&sq->turn);
2476 /* Else figure out which singlequeue(s) need waking up */
2477 for (tmp = mq->queues; tmp; tmp = tmp->next) {
2478 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
2479 if (sq->srcresult == GST_FLOW_NOT_LINKED &&
2480 sq->nextid != 0 && sq->nextid <= mq->highid) {
2481 GST_LOG_OBJECT (mq, "Waking up singlequeue %d", sq->id);
2482 g_cond_signal (&sq->turn);
2488 /* WITH LOCK TAKEN */
2490 compute_high_id (GstMultiQueue * mq)
2492 /* The high-id is either the highest id among the linked pads, or if all
2493 * pads are not-linked, it's the lowest not-linked pad */
2495 guint32 lowest = G_MAXUINT32;
2496 guint32 highid = G_MAXUINT32;
2498 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
2499 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
2501 GST_LOG_OBJECT (mq, "inspecting sq:%d , nextid:%d, oldid:%d, srcresult:%s",
2502 sq->id, sq->nextid, sq->oldid, gst_flow_get_name (sq->srcresult));
2504 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2505 /* No need to consider queues which are not waiting */
2506 if (sq->nextid == 0) {
2507 GST_LOG_OBJECT (mq, "sq:%d is not waiting - ignoring", sq->id);
2511 if (sq->nextid < lowest)
2512 lowest = sq->nextid;
2513 } else if (!GST_PAD_IS_EOS (sq->srcpad) && sq->srcresult != GST_FLOW_EOS) {
2514 /* If we don't have a global highid, or the global highid is lower than
2515 * this single queue's last outputted id, store the queue's one,
2516 * unless the singlequeue output is at EOS */
2517 if ((highid == G_MAXUINT32) || (sq->oldid > highid))
2522 if (highid == G_MAXUINT32 || lowest < highid)
2523 mq->highid = lowest;
2525 mq->highid = highid;
2527 GST_LOG_OBJECT (mq, "Highid is now : %u, lowest non-linked %u", mq->highid,
2531 /* WITH LOCK TAKEN */
2533 compute_high_time (GstMultiQueue * mq, guint groupid)
2535 /* The high-time is either the highest last time among the linked
2536 * pads, or if all pads are not-linked, it's the lowest nex time of
2539 GstClockTimeDiff highest = GST_CLOCK_STIME_NONE;
2540 GstClockTimeDiff lowest = GST_CLOCK_STIME_NONE;
2541 GstClockTimeDiff group_high = GST_CLOCK_STIME_NONE;
2542 GstClockTimeDiff group_low = GST_CLOCK_STIME_NONE;
2543 GstClockTimeDiff res;
2544 /* Number of streams which belong to groupid */
2545 guint group_count = 0;
2547 if (!mq->sync_by_running_time)
2548 /* return GST_CLOCK_STIME_NONE; */
2551 for (tmp = mq->queues; tmp; tmp = tmp->next) {
2552 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
2555 "inspecting sq:%d (group:%d) , next_time:%" GST_STIME_FORMAT
2556 ", last_time:%" GST_STIME_FORMAT ", srcresult:%s", sq->id, sq->groupid,
2557 GST_STIME_ARGS (sq->next_time), GST_STIME_ARGS (sq->last_time),
2558 gst_flow_get_name (sq->srcresult));
2560 if (sq->groupid == groupid)
2563 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2564 /* No need to consider queues which are not waiting */
2565 if (!GST_CLOCK_STIME_IS_VALID (sq->next_time)) {
2566 GST_LOG_OBJECT (mq, "sq:%d is not waiting - ignoring", sq->id);
2570 if (lowest == GST_CLOCK_STIME_NONE || sq->next_time < lowest)
2571 lowest = sq->next_time;
2572 if (sq->groupid == groupid && (group_low == GST_CLOCK_STIME_NONE
2573 || sq->next_time < group_low))
2574 group_low = sq->next_time;
2575 } else if (!GST_PAD_IS_EOS (sq->srcpad) && sq->srcresult != GST_FLOW_EOS) {
2576 /* If we don't have a global high time, or the global high time
2577 * is lower than this single queue's last outputted time, store
2578 * the queue's one, unless the singlequeue output is at EOS. */
2579 if (highest == GST_CLOCK_STIME_NONE
2580 || (sq->last_time != GST_CLOCK_STIME_NONE && sq->last_time > highest))
2581 highest = sq->last_time;
2582 if (sq->groupid == groupid && (group_high == GST_CLOCK_STIME_NONE
2583 || (sq->last_time != GST_CLOCK_STIME_NONE
2584 && sq->last_time > group_high)))
2585 group_high = sq->last_time;
2588 "highest now %" GST_STIME_FORMAT " lowest %" GST_STIME_FORMAT,
2589 GST_STIME_ARGS (highest), GST_STIME_ARGS (lowest));
2590 if (sq->groupid == groupid)
2592 "grouphigh %" GST_STIME_FORMAT " grouplow %" GST_STIME_FORMAT,
2593 GST_STIME_ARGS (group_high), GST_STIME_ARGS (group_low));
2596 if (highest == GST_CLOCK_STIME_NONE)
2597 mq->high_time = lowest;
2599 mq->high_time = highest;
2601 GST_LOG_OBJECT (mq, "group count %d for groupid %u", group_count, groupid);
2603 "High time is now : %" GST_STIME_FORMAT ", lowest non-linked %"
2604 GST_STIME_FORMAT, GST_STIME_ARGS (mq->high_time),
2605 GST_STIME_ARGS (lowest));
2607 /* If there's only one stream of a given type, use the global high */
2608 if (group_count < 2)
2609 res = mq->high_time;
2610 else if (group_high == GST_CLOCK_STIME_NONE)
2615 for (tmp = mq->queues; tmp; tmp = tmp->next) {
2616 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
2617 if (groupid == sq->groupid)
2618 sq->group_high_time = res;
2622 #define IS_FILLED(q, format, value) (((q)->max_size.format) != 0 && \
2623 ((q)->max_size.format) <= (value))
2626 * GstSingleQueue functions
2629 single_queue_overrun_cb (GstDataQueue * dq, GstSingleQueue * sq)
2631 GstMultiQueue *mq = sq->mqueue;
2633 GstDataQueueSize size;
2634 gboolean filled = TRUE;
2635 gboolean empty_found = FALSE;
2637 gst_data_queue_get_level (sq->queue, &size);
2640 "Single Queue %d: EOS %d, visible %u/%u, bytes %u/%u, time %"
2641 G_GUINT64_FORMAT "/%" G_GUINT64_FORMAT, sq->id, sq->is_eos, size.visible,
2642 sq->max_size.visible, size.bytes, sq->max_size.bytes, sq->cur_time,
2645 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2647 /* check if we reached the hard time/bytes limits;
2648 time limit is only taken into account for non-sparse streams */
2649 if (sq->is_eos || IS_FILLED (sq, bytes, size.bytes) ||
2650 (!sq->is_sparse && IS_FILLED (sq, time, sq->cur_time))) {
2654 /* Search for empty queues */
2655 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
2656 GstSingleQueue *oq = (GstSingleQueue *) tmp->data;
2661 if (oq->srcresult == GST_FLOW_NOT_LINKED) {
2662 GST_LOG_OBJECT (mq, "Queue %d is not-linked", oq->id);
2666 GST_LOG_OBJECT (mq, "Checking Queue %d", oq->id);
2667 if (gst_data_queue_is_empty (oq->queue) && !oq->is_sparse) {
2668 GST_LOG_OBJECT (mq, "Queue %d is empty", oq->id);
2674 /* if hard limits are not reached then we allow one more buffer in the full
2675 * queue, but only if any of the other singelqueues are empty */
2677 if (IS_FILLED (sq, visible, size.visible)) {
2678 sq->max_size.visible = size.visible + 1;
2679 GST_DEBUG_OBJECT (mq,
2680 "Bumping single queue %d max visible to %d",
2681 sq->id, sq->max_size.visible);
2687 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2689 /* Overrun is always forwarded, since this is blocking the upstream element */
2691 GST_DEBUG_OBJECT (mq, "Queue %d is filled, signalling overrun", sq->id);
2692 g_signal_emit (mq, gst_multi_queue_signals[SIGNAL_OVERRUN], 0);
2697 single_queue_underrun_cb (GstDataQueue * dq, GstSingleQueue * sq)
2699 gboolean empty = TRUE;
2700 GstMultiQueue *mq = sq->mqueue;
2703 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2704 GST_LOG_OBJECT (mq, "Single Queue %d is empty but not-linked", sq->id);
2708 "Single Queue %d is empty, Checking other single queues", sq->id);
2711 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2712 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
2713 GstSingleQueue *oq = (GstSingleQueue *) tmp->data;
2715 if (gst_data_queue_is_full (oq->queue)) {
2716 GstDataQueueSize size;
2718 gst_data_queue_get_level (oq->queue, &size);
2719 if (IS_FILLED (oq, visible, size.visible)) {
2720 oq->max_size.visible = size.visible + 1;
2721 GST_DEBUG_OBJECT (mq,
2722 "queue %d is filled, bumping its max visible to %d", oq->id,
2723 oq->max_size.visible);
2724 gst_data_queue_limits_changed (oq->queue);
2727 if (!gst_data_queue_is_empty (oq->queue) || oq->is_sparse)
2730 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2733 GST_DEBUG_OBJECT (mq, "All queues are empty, signalling it");
2734 g_signal_emit (mq, gst_multi_queue_signals[SIGNAL_UNDERRUN], 0);
2739 single_queue_check_full (GstDataQueue * dataq, guint visible, guint bytes,
2740 guint64 time, GstSingleQueue * sq)
2743 GstMultiQueue *mq = sq->mqueue;
2745 GST_DEBUG_OBJECT (mq,
2746 "queue %d: visible %u/%u, bytes %u/%u, time %" G_GUINT64_FORMAT "/%"
2747 G_GUINT64_FORMAT, sq->id, visible, sq->max_size.visible, bytes,
2748 sq->max_size.bytes, sq->cur_time, sq->max_size.time);
2750 /* we are always filled on EOS */
2754 /* we never go past the max visible items unless we are in buffering mode */
2755 if (!mq->use_buffering && IS_FILLED (sq, visible, visible))
2758 /* check time or bytes */
2759 res = IS_FILLED (sq, bytes, bytes);
2760 /* We only care about limits in time if we're not a sparse stream or
2761 * we're not syncing by running time */
2762 if (!sq->is_sparse || !mq->sync_by_running_time) {
2763 /* If unlinked, take into account the extra unlinked cache time */
2764 if (mq->sync_by_running_time && sq->srcresult == GST_FLOW_NOT_LINKED) {
2765 if (sq->cur_time > mq->unlinked_cache_time)
2766 res |= IS_FILLED (sq, time, sq->cur_time - mq->unlinked_cache_time);
2770 res |= IS_FILLED (sq, time, sq->cur_time);
2777 gst_single_queue_flush_queue (GstSingleQueue * sq, gboolean full)
2779 GstDataQueueItem *sitem;
2780 GstMultiQueueItem *mitem;
2781 gboolean was_flushing = FALSE;
2783 while (!gst_data_queue_is_empty (sq->queue)) {
2784 GstMiniObject *data;
2786 /* FIXME: If this fails here although the queue is not empty,
2787 * we're flushing... but we want to rescue all sticky
2788 * events nonetheless.
2790 if (!gst_data_queue_pop (sq->queue, &sitem)) {
2791 was_flushing = TRUE;
2792 gst_data_queue_set_flushing (sq->queue, FALSE);
2796 mitem = (GstMultiQueueItem *) sitem;
2798 data = sitem->object;
2800 if (!full && !mitem->is_query && GST_IS_EVENT (data)
2801 && GST_EVENT_IS_STICKY (data)
2802 && GST_EVENT_TYPE (data) != GST_EVENT_SEGMENT
2803 && GST_EVENT_TYPE (data) != GST_EVENT_EOS) {
2804 gst_pad_store_sticky_event (sq->srcpad, GST_EVENT_CAST (data));
2807 sitem->destroy (sitem);
2810 gst_data_queue_flush (sq->queue);
2812 gst_data_queue_set_flushing (sq->queue, TRUE);
2814 GST_MULTI_QUEUE_MUTEX_LOCK (sq->mqueue);
2815 update_buffering (sq->mqueue, sq);
2816 GST_MULTI_QUEUE_MUTEX_UNLOCK (sq->mqueue);
2817 gst_multi_queue_post_buffering (sq->mqueue);
2821 gst_single_queue_free (GstSingleQueue * sq)
2824 gst_data_queue_flush (sq->queue);
2825 g_object_unref (sq->queue);
2826 g_cond_clear (&sq->turn);
2827 g_cond_clear (&sq->query_handled);
2831 static GstSingleQueue *
2832 gst_single_queue_new (GstMultiQueue * mqueue, guint id)
2835 GstMultiQueuePad *mqpad;
2836 GstPadTemplate *templ;
2839 guint temp_id = (id == -1) ? 0 : id;
2841 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
2843 /* Find an unused queue ID, if possible the passed one */
2844 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
2845 GstSingleQueue *sq2 = (GstSingleQueue *) tmp->data;
2846 /* This works because the IDs are sorted in ascending order */
2847 if (sq2->id == temp_id) {
2848 /* If this ID was requested by the caller return NULL,
2849 * otherwise just get us the next one */
2851 temp_id = sq2->id + 1;
2853 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
2856 } else if (sq2->id > temp_id) {
2861 sq = g_new0 (GstSingleQueue, 1);
2864 sq->groupid = DEFAULT_PAD_GROUP_ID;
2865 sq->group_high_time = GST_CLOCK_STIME_NONE;
2867 mqueue->queues = g_list_insert_before (mqueue->queues, tmp, sq);
2868 mqueue->queues_cookie++;
2870 /* copy over max_size and extra_size so we don't need to take the lock
2871 * any longer when checking if the queue is full. */
2872 sq->max_size.visible = mqueue->max_size.visible;
2873 sq->max_size.bytes = mqueue->max_size.bytes;
2874 sq->max_size.time = mqueue->max_size.time;
2876 sq->extra_size.visible = mqueue->extra_size.visible;
2877 sq->extra_size.bytes = mqueue->extra_size.bytes;
2878 sq->extra_size.time = mqueue->extra_size.time;
2880 GST_DEBUG_OBJECT (mqueue, "Creating GstSingleQueue id:%d", sq->id);
2882 sq->mqueue = mqueue;
2883 sq->srcresult = GST_FLOW_FLUSHING;
2885 sq->queue = gst_data_queue_new ((GstDataQueueCheckFullFunction)
2886 single_queue_check_full,
2887 (GstDataQueueFullCallback) single_queue_overrun_cb,
2888 (GstDataQueueEmptyCallback) single_queue_underrun_cb, sq);
2890 sq->is_sparse = FALSE;
2891 sq->flushing = FALSE;
2893 gst_segment_init (&sq->sink_segment, GST_FORMAT_TIME);
2894 gst_segment_init (&sq->src_segment, GST_FORMAT_TIME);
2898 sq->next_time = GST_CLOCK_STIME_NONE;
2899 sq->last_time = GST_CLOCK_STIME_NONE;
2900 g_cond_init (&sq->turn);
2901 g_cond_init (&sq->query_handled);
2903 sq->sinktime = GST_CLOCK_STIME_NONE;
2904 sq->srctime = GST_CLOCK_STIME_NONE;
2905 sq->sink_tainted = TRUE;
2906 sq->src_tainted = TRUE;
2908 name = g_strdup_printf ("sink_%u", sq->id);
2909 templ = gst_static_pad_template_get (&sinktemplate);
2910 sq->sinkpad = g_object_new (GST_TYPE_MULTIQUEUE_PAD, "name", name,
2911 "direction", templ->direction, "template", templ, NULL);
2912 gst_object_unref (templ);
2915 mqpad = (GstMultiQueuePad *) sq->sinkpad;
2918 gst_pad_set_chain_function (sq->sinkpad,
2919 GST_DEBUG_FUNCPTR (gst_multi_queue_chain));
2920 gst_pad_set_activatemode_function (sq->sinkpad,
2921 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_activate_mode));
2922 gst_pad_set_event_full_function (sq->sinkpad,
2923 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_event));
2924 gst_pad_set_query_function (sq->sinkpad,
2925 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_query));
2926 gst_pad_set_iterate_internal_links_function (sq->sinkpad,
2927 GST_DEBUG_FUNCPTR (gst_multi_queue_iterate_internal_links));
2928 GST_OBJECT_FLAG_SET (sq->sinkpad, GST_PAD_FLAG_PROXY_CAPS);
2930 name = g_strdup_printf ("src_%u", sq->id);
2931 sq->srcpad = gst_pad_new_from_static_template (&srctemplate, name);
2934 gst_pad_set_activatemode_function (sq->srcpad,
2935 GST_DEBUG_FUNCPTR (gst_multi_queue_src_activate_mode));
2936 gst_pad_set_event_function (sq->srcpad,
2937 GST_DEBUG_FUNCPTR (gst_multi_queue_src_event));
2938 gst_pad_set_query_function (sq->srcpad,
2939 GST_DEBUG_FUNCPTR (gst_multi_queue_src_query));
2940 gst_pad_set_iterate_internal_links_function (sq->srcpad,
2941 GST_DEBUG_FUNCPTR (gst_multi_queue_iterate_internal_links));
2942 GST_OBJECT_FLAG_SET (sq->srcpad, GST_PAD_FLAG_PROXY_CAPS);
2944 gst_pad_set_element_private (sq->sinkpad, (gpointer) sq);
2945 gst_pad_set_element_private (sq->srcpad, (gpointer) sq);
2947 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
2949 /* only activate the pads when we are not in the NULL state
2950 * and add the pad under the state_lock to prevend state changes
2951 * between activating and adding */
2952 g_rec_mutex_lock (GST_STATE_GET_LOCK (mqueue));
2953 if (GST_STATE_TARGET (mqueue) != GST_STATE_NULL) {
2954 gst_pad_set_active (sq->srcpad, TRUE);
2955 gst_pad_set_active (sq->sinkpad, TRUE);
2957 gst_element_add_pad (GST_ELEMENT (mqueue), sq->srcpad);
2958 gst_element_add_pad (GST_ELEMENT (mqueue), sq->sinkpad);
2959 g_rec_mutex_unlock (GST_STATE_GET_LOCK (mqueue));
2961 GST_DEBUG_OBJECT (mqueue, "GstSingleQueue [%d] created and pads added",