2 * Copyright (C) 2006 Edward Hervey <edward@fluendo.com>
3 * Copyright (C) 2007 Jan Schmidt <jan@fluendo.com>
4 * Copyright (C) 2007 Wim Taymans <wim@fluendo.com>
5 * Copyright (C) 2011 Sebastian Dröge <sebastian.droege@collabora.co.uk>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Library General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Library General Public License for more details.
19 * You should have received a copy of the GNU Library General Public
20 * License along with this library; if not, write to the
21 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
22 * Boston, MA 02110-1301, USA.
26 * SECTION:element-multiqueue
28 * @see_also: #GstQueue
30 * Multiqueue is similar to a normal #GstQueue with the following additional
33 * 1) Multiple streamhandling
35 * * The element handles queueing data on more than one stream at once. To
36 * achieve such a feature it has request sink pads (sink%u) and
37 * 'sometimes' src pads (src%u). When requesting a given sinkpad with gst_element_request_pad(),
38 * the associated srcpad for that stream will be created.
39 * Example: requesting sink1 will generate src1.
41 * 2) Non-starvation on multiple stream
43 * * If more than one stream is used with the element, the streams' queues
44 * will be dynamically grown (up to a limit), in order to ensure that no
45 * stream is risking data starvation. This guarantees that at any given
46 * time there are at least N bytes queued and available for each individual
47 * stream. If an EOS event comes through a srcpad, the associated queue will be
48 * considered as 'not-empty' in the queue-size-growing algorithm.
50 * 3) Non-linked srcpads graceful handling
52 * * In order to better support dynamic switching between streams, the multiqueue
53 * (unlike the current GStreamer queue) continues to push buffers on non-linked
54 * pads rather than shutting down. In addition, to prevent a non-linked stream from very quickly consuming all
55 * available buffers and thus 'racing ahead' of the other streams, the element
56 * must ensure that buffers and inlined events for a non-linked stream are pushed
57 * in the same order as they were received, relative to the other streams
58 * controlled by the element. This means that a buffer cannot be pushed to a
59 * non-linked pad any sooner than buffers in any other stream which were received
62 * Data is queued until one of the limits specified by the
63 * #GstMultiQueue:max-size-buffers, #GstMultiQueue:max-size-bytes and/or
64 * #GstMultiQueue:max-size-time properties has been reached. Any attempt to push
65 * more buffers into the queue will block the pushing thread until more space
66 * becomes available. #GstMultiQueue:extra-size-buffers,
69 * #GstMultiQueue:extra-size-bytes and #GstMultiQueue:extra-size-time are
72 * The default queue size limits are 5 buffers, 10MB of data, or
73 * two second worth of data, whichever is reached first. Note that the number
74 * of buffers will dynamically grow depending on the fill level of
77 * The #GstMultiQueue::underrun signal is emitted when all of the queues
78 * are empty. The #GstMultiQueue::overrun signal is emitted when one of the
80 * Both signals are emitted from the context of the streaming thread.
82 * When using #GstMultiQueue:sync-by-running-time the unlinked streams will
83 * be throttled by the highest running-time of linked streams. This allows
84 * further relinking of those unlinked streams without them being in the
85 * future (i.e. to achieve gapless playback).
86 * When dealing with streams which have got different consumption requirements
87 * downstream (ex: video decoders which will consume more buffer (in time) than
88 * audio decoders), it is recommended to group streams of the same type
89 * by using the pad "group-id" property. This will further throttle streams
90 * in time within that group.
98 #include <gst/glib-compat-private.h>
101 #include "gstmultiqueue.h"
102 #include "gstcoreelementselements.h"
105 * @sinkpad: associated sink #GstPad
106 * @srcpad: associated source #GstPad
108 * Structure containing all information and properties about
111 typedef struct _GstSingleQueue GstSingleQueue;
113 struct _GstSingleQueue
117 /* unique identifier of the queue */
119 /* group of streams to which this queue belongs to */
121 GstClockTimeDiff group_high_time;
127 /* flowreturn of previous srcpad push */
128 GstFlowReturn srcresult;
129 /* If something was actually pushed on
130 * this pad after flushing/pad activation
131 * and the srcresult corresponds to something
137 GstSegment sink_segment;
138 GstSegment src_segment;
139 gboolean has_src_segment; /* preferred over initializing the src_segment to
140 * UNDEFINED as this doesn't requires adding ifs
141 * in every segment usage */
143 /* position of src/sink */
144 GstClockTimeDiff sinktime, srctime;
145 /* cached input value, used for interleave */
146 GstClockTimeDiff cached_sinktime;
147 /* TRUE if either position needs to be recalculated */
148 gboolean sink_tainted, src_tainted;
150 /* stream group id */
151 guint32 sink_stream_gid;
152 guint32 src_stream_gid;
154 /* TRUE if the stream group-id changed. Resetted to FALSE the next time the
155 * segment is calculated */
156 gboolean sink_stream_gid_changed;
157 gboolean src_stream_gid_changed;
161 GstDataQueueSize max_size, extra_size;
162 GstClockTime cur_time;
164 gboolean is_segment_done;
169 /* Protected by global lock */
170 guint32 nextid; /* ID of the next object waiting to be pushed */
171 guint32 oldid; /* ID of the last object pushed (last in a series) */
172 guint32 last_oldid; /* Previously observed old_id, reset to MAXUINT32 on flush */
173 GstClockTimeDiff next_time; /* End running time of next buffer to be pushed */
174 GstClockTimeDiff last_time; /* Start running time of last pushed buffer */
175 GCond turn; /* SingleQueue turn waiting conditional */
177 /* for serialized queries */
180 GstQuery *last_handled_query;
182 /* For interleave calculation */
183 GThread *thread; /* Streaming thread of SingleQueue */
184 GstClockTime interleave; /* Calculated interleve within the thread */
187 /* Extension of GstDataQueueItem structure for our usage */
188 typedef struct _GstMultiQueueItem GstMultiQueueItem;
190 struct _GstMultiQueueItem
192 GstMiniObject *object;
197 GDestroyNotify destroy;
203 static GstSingleQueue *gst_single_queue_new (GstMultiQueue * mqueue, guint id);
204 static void gst_single_queue_unref (GstSingleQueue * squeue);
205 static GstSingleQueue *gst_single_queue_ref (GstSingleQueue * squeue);
207 static void wake_up_next_non_linked (GstMultiQueue * mq);
208 static void compute_high_id (GstMultiQueue * mq);
209 static void compute_high_time (GstMultiQueue * mq, guint groupid);
210 static void single_queue_overrun_cb (GstDataQueue * dq, GstSingleQueue * sq);
211 static void single_queue_underrun_cb (GstDataQueue * dq, GstSingleQueue * sq);
213 static void update_buffering (GstMultiQueue * mq, GstSingleQueue * sq);
214 static void gst_multi_queue_post_buffering (GstMultiQueue * mq);
215 static void recheck_buffering_status (GstMultiQueue * mq);
217 static void gst_single_queue_flush_queue (GstSingleQueue * sq, gboolean full);
219 static void calculate_interleave (GstMultiQueue * mq, GstSingleQueue * sq);
221 static GstStaticPadTemplate sinktemplate = GST_STATIC_PAD_TEMPLATE ("sink_%u",
224 GST_STATIC_CAPS_ANY);
226 static GstStaticPadTemplate srctemplate = GST_STATIC_PAD_TEMPLATE ("src_%u",
229 GST_STATIC_CAPS_ANY);
231 GST_DEBUG_CATEGORY_STATIC (multi_queue_debug);
232 #define GST_CAT_DEFAULT (multi_queue_debug)
234 /* Signals and args */
242 /* default limits, we try to keep up to 2 seconds of data and if there is not
243 * time, up to 10 MB. The number of buffers is dynamically scaled to make sure
244 * there is data in the queues. Normally, the byte and time limits are not hit
245 * in theses conditions. */
246 #define DEFAULT_MAX_SIZE_BYTES 10 * 1024 * 1024 /* 10 MB */
247 #define DEFAULT_MAX_SIZE_BUFFERS 5
248 #define DEFAULT_MAX_SIZE_TIME 2 * GST_SECOND
250 /* second limits. When we hit one of the above limits we are probably dealing
251 * with a badly muxed file and we scale the limits to these emergency values.
252 * This is currently not yet implemented.
253 * Since we dynamically scale the queue buffer size up to the limits but avoid
254 * going above the max-size-buffers when we can, we don't really need this
255 * additional extra size. */
256 #define DEFAULT_EXTRA_SIZE_BYTES 10 * 1024 * 1024 /* 10 MB */
257 #define DEFAULT_EXTRA_SIZE_BUFFERS 5
258 #define DEFAULT_EXTRA_SIZE_TIME 3 * GST_SECOND
260 #define DEFAULT_USE_BUFFERING FALSE
261 #define DEFAULT_LOW_WATERMARK 0.01
262 #define DEFAULT_HIGH_WATERMARK 0.99
263 #define DEFAULT_SYNC_BY_RUNNING_TIME FALSE
264 #define DEFAULT_USE_INTERLEAVE FALSE
265 #define DEFAULT_UNLINKED_CACHE_TIME 250 * GST_MSECOND
267 #define DEFAULT_MINIMUM_INTERLEAVE (250 * GST_MSECOND)
272 PROP_EXTRA_SIZE_BYTES,
273 PROP_EXTRA_SIZE_BUFFERS,
274 PROP_EXTRA_SIZE_TIME,
276 PROP_MAX_SIZE_BUFFERS,
283 PROP_SYNC_BY_RUNNING_TIME,
285 PROP_UNLINKED_CACHE_TIME,
286 PROP_MINIMUM_INTERLEAVE,
291 /* Explanation for buffer levels and percentages:
293 * The buffering_level functions here return a value in a normalized range
294 * that specifies the current fill level of a queue. The range goes from 0 to
295 * MAX_BUFFERING_LEVEL. The low/high watermarks also use this same range.
297 * This is not to be confused with the buffering_percent value, which is
298 * a *relative* quantity - relative to the low/high watermarks.
299 * buffering_percent = 0% means overall buffering_level is at the low watermark.
300 * buffering_percent = 100% means overall buffering_level is at the high watermark.
301 * buffering_percent is used for determining if the fill level has reached
302 * the high watermark, and for producing BUFFERING messages. This value
303 * always uses a 0..100 range (since it is a percentage).
305 * To avoid future confusions, whenever "buffering level" is mentioned, it
306 * refers to the absolute level which is in the 0..MAX_BUFFERING_LEVEL
307 * range. Whenever "buffering_percent" is mentioned, it refers to the
308 * percentage value that is relative to the low/high watermark. */
310 /* Using a buffering level range of 0..1000000 to allow for a
311 * resolution in ppm (1 ppm = 0.0001%) */
312 #define MAX_BUFFERING_LEVEL 1000000
314 /* How much 1% makes up in the buffer level range */
315 #define BUF_LEVEL_PERCENT_FACTOR ((MAX_BUFFERING_LEVEL) / 100)
317 /* GstMultiQueuePad */
319 #define DEFAULT_PAD_GROUP_ID 0
325 PROP_CURRENT_LEVEL_BUFFERS,
326 PROP_CURRENT_LEVEL_BYTES,
327 PROP_CURRENT_LEVEL_TIME,
330 #define GST_TYPE_MULTIQUEUE_PAD (gst_multiqueue_pad_get_type())
331 #define GST_MULTIQUEUE_PAD(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_MULTIQUEUE_PAD,GstMultiQueuePad))
332 #define GST_IS_MULTIQUEUE_PAD(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_MULTIQUEUE_PAD))
333 #define GST_MULTIQUEUE_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass) ,GST_TYPE_MULTIQUEUE_PAD,GstMultiQueuePadClass))
334 #define GST_IS_MULTIQUEUE_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass) ,GST_TYPE_MULTIQUEUE_PAD))
335 #define GST_MULTIQUEUE_PAD_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS((obj) ,GST_TYPE_MULTIQUEUE_PAD,GstMultiQueuePadClass))
337 #define GST_MULTI_QUEUE_MUTEX_LOCK(q) G_STMT_START { \
338 g_mutex_lock (&q->qlock); \
341 #define GST_MULTI_QUEUE_MUTEX_UNLOCK(q) G_STMT_START { \
342 g_mutex_unlock (&q->qlock); \
345 #define SET_PERCENT(mq, perc) G_STMT_START { \
346 if (perc != mq->buffering_percent) { \
347 mq->buffering_percent = perc; \
348 mq->buffering_percent_changed = TRUE; \
349 GST_DEBUG_OBJECT (mq, "buffering %d percent", perc); \
353 struct _GstMultiQueuePad
360 struct _GstMultiQueuePadClass
362 GstPadClass parent_class;
365 GType gst_multiqueue_pad_get_type (void);
367 G_DEFINE_TYPE (GstMultiQueuePad, gst_multiqueue_pad, GST_TYPE_PAD);
370 gst_multiqueue_pad_get_group_id (GstMultiQueuePad * pad)
378 mq = g_weak_ref_get (&pad->sq->mqueue);
381 GST_OBJECT_LOCK (mq);
384 ret = pad->sq->groupid;
387 GST_OBJECT_UNLOCK (mq);
388 gst_object_unref (mq);
395 gst_multiqueue_pad_get_current_level_buffers (GstMultiQueuePad * pad)
397 GstSingleQueue *sq = pad->sq;
398 GstDataQueueSize level;
404 mq = g_weak_ref_get (&pad->sq->mqueue);
407 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
410 gst_data_queue_get_level (sq->queue, &level);
413 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
414 gst_object_unref (mq);
417 return level.visible;
421 gst_multiqueue_pad_get_current_level_bytes (GstMultiQueuePad * pad)
423 GstSingleQueue *sq = pad->sq;
424 GstDataQueueSize level;
430 mq = g_weak_ref_get (&pad->sq->mqueue);
433 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
436 gst_data_queue_get_level (sq->queue, &level);
439 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
440 gst_object_unref (mq);
447 gst_multiqueue_pad_get_current_level_time (GstMultiQueuePad * pad)
449 GstSingleQueue *sq = pad->sq;
456 mq = g_weak_ref_get (&pad->sq->mqueue);
459 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
465 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
466 gst_object_unref (mq);
473 gst_multiqueue_pad_get_property (GObject * object, guint prop_id,
474 GValue * value, GParamSpec * pspec)
476 GstMultiQueuePad *pad = GST_MULTIQUEUE_PAD (object);
479 case PROP_PAD_GROUP_ID:
480 g_value_set_uint (value, gst_multiqueue_pad_get_group_id (pad));
482 case PROP_CURRENT_LEVEL_BUFFERS:{
483 g_value_set_uint (value,
484 gst_multiqueue_pad_get_current_level_buffers (pad));
487 case PROP_CURRENT_LEVEL_BYTES:{
488 g_value_set_uint (value,
489 gst_multiqueue_pad_get_current_level_bytes (pad));
492 case PROP_CURRENT_LEVEL_TIME:{
493 g_value_set_uint64 (value,
494 gst_multiqueue_pad_get_current_level_time (pad));
498 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
504 gst_multiqueue_pad_set_property (GObject * object, guint prop_id,
505 const GValue * value, GParamSpec * pspec)
507 GstMultiQueuePad *pad = GST_MULTIQUEUE_PAD (object);
510 case PROP_PAD_GROUP_ID:
512 GstMultiQueue *mqueue = g_weak_ref_get (&pad->sq->mqueue);
515 GST_OBJECT_LOCK (mqueue);
517 pad->sq->groupid = g_value_get_uint (value);
520 GST_OBJECT_UNLOCK (mqueue);
521 gst_object_unref (mqueue);
526 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
532 gst_multiqueue_pad_finalize (GObject * object)
534 GstMultiQueuePad *pad = GST_MULTIQUEUE_PAD (object);
537 gst_single_queue_unref (pad->sq);
539 G_OBJECT_CLASS (gst_multiqueue_pad_parent_class)->finalize (object);
543 gst_multiqueue_pad_class_init (GstMultiQueuePadClass * klass)
545 GObjectClass *gobject_class = (GObjectClass *) klass;
547 gobject_class->set_property = gst_multiqueue_pad_set_property;
548 gobject_class->get_property = gst_multiqueue_pad_get_property;
549 gobject_class->finalize = gst_multiqueue_pad_finalize;
552 * GstMultiQueuePad:group-id:
554 * Group to which this pad belongs.
558 g_object_class_install_property (gobject_class, PROP_PAD_GROUP_ID,
559 g_param_spec_uint ("group-id", "Group ID",
560 "Group to which this pad belongs", 0, G_MAXUINT32,
561 DEFAULT_PAD_GROUP_ID, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
564 * GstMultiQueuePad:current-level-buffers:
566 * The corresponding queue's current level of buffers.
570 g_object_class_install_property (gobject_class, PROP_CURRENT_LEVEL_BUFFERS,
571 g_param_spec_uint ("current-level-buffers", "Current level buffers",
572 "Current level buffers", 0, G_MAXUINT32,
573 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
576 * GstMultiQueuePad:current-level-bytes:
578 * The corresponding queue's current level of bytes.
582 g_object_class_install_property (gobject_class, PROP_CURRENT_LEVEL_BYTES,
583 g_param_spec_uint ("current-level-bytes", "Current level bytes",
584 "Current level bytes", 0, G_MAXUINT32,
585 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
588 * GstMultiQueuePad:current-level-time:
590 * The corresponding queue's current level of time.
594 g_object_class_install_property (gobject_class, PROP_CURRENT_LEVEL_TIME,
595 g_param_spec_uint64 ("current-level-time", "Current level time",
596 "Current level time", 0, G_MAXUINT64,
597 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
601 gst_multiqueue_pad_init (GstMultiQueuePad * pad)
607 /* Convenience function */
608 static inline GstClockTimeDiff
609 my_segment_to_running_time (GstSegment * segment, GstClockTime val)
611 GstClockTimeDiff res = GST_CLOCK_STIME_NONE;
613 if (GST_CLOCK_TIME_IS_VALID (val)) {
615 gst_segment_to_running_time_full (segment, GST_FORMAT_TIME, val, &val);
624 static void gst_multi_queue_finalize (GObject * object);
625 static void gst_multi_queue_set_property (GObject * object,
626 guint prop_id, const GValue * value, GParamSpec * pspec);
627 static void gst_multi_queue_get_property (GObject * object,
628 guint prop_id, GValue * value, GParamSpec * pspec);
630 static GstPad *gst_multi_queue_request_new_pad (GstElement * element,
631 GstPadTemplate * temp, const gchar * name, const GstCaps * caps);
632 static void gst_multi_queue_release_pad (GstElement * element, GstPad * pad);
633 static GstStateChangeReturn gst_multi_queue_change_state (GstElement *
634 element, GstStateChange transition);
636 static void gst_multi_queue_loop (GstPad * pad);
639 GST_DEBUG_CATEGORY_INIT (multi_queue_debug, "multiqueue", 0, "multiqueue element");
640 #define gst_multi_queue_parent_class parent_class
641 G_DEFINE_TYPE_WITH_CODE (GstMultiQueue, gst_multi_queue, GST_TYPE_ELEMENT,
643 GST_ELEMENT_REGISTER_DEFINE (multiqueue, "multiqueue", GST_RANK_NONE,
644 GST_TYPE_MULTI_QUEUE);
646 static guint gst_multi_queue_signals[LAST_SIGNAL] = { 0 };
649 gst_multi_queue_class_init (GstMultiQueueClass * klass)
651 GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
652 GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
654 gobject_class->set_property = gst_multi_queue_set_property;
655 gobject_class->get_property = gst_multi_queue_get_property;
660 * GstMultiQueue::underrun:
661 * @multiqueue: the multiqueue instance
663 * This signal is emitted from the streaming thread when there is
664 * no data in any of the queues inside the multiqueue instance (underrun).
666 * This indicates either starvation or EOS from the upstream data sources.
668 gst_multi_queue_signals[SIGNAL_UNDERRUN] =
669 g_signal_new ("underrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
670 G_STRUCT_OFFSET (GstMultiQueueClass, underrun), NULL, NULL,
671 NULL, G_TYPE_NONE, 0);
674 * GstMultiQueue::overrun:
675 * @multiqueue: the multiqueue instance
677 * Reports that one of the queues in the multiqueue is full (overrun).
678 * A queue is full if the total amount of data inside it (num-buffers, time,
679 * size) is higher than the boundary values which can be set through the
680 * GObject properties.
682 * This can be used as an indicator of pre-roll.
684 gst_multi_queue_signals[SIGNAL_OVERRUN] =
685 g_signal_new ("overrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
686 G_STRUCT_OFFSET (GstMultiQueueClass, overrun), NULL, NULL,
687 NULL, G_TYPE_NONE, 0);
691 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_BYTES,
692 g_param_spec_uint ("max-size-bytes", "Max. size (kB)",
693 "Max. amount of data in the queue (bytes, 0=disable)",
694 0, G_MAXUINT, DEFAULT_MAX_SIZE_BYTES,
695 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
696 G_PARAM_STATIC_STRINGS));
697 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_BUFFERS,
698 g_param_spec_uint ("max-size-buffers", "Max. size (buffers)",
699 "Max. number of buffers in the queue (0=disable)", 0, G_MAXUINT,
700 DEFAULT_MAX_SIZE_BUFFERS,
701 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
702 G_PARAM_STATIC_STRINGS));
703 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_TIME,
704 g_param_spec_uint64 ("max-size-time", "Max. size (ns)",
705 "Max. amount of data in the queue (in ns, 0=disable)", 0, G_MAXUINT64,
706 DEFAULT_MAX_SIZE_TIME, G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
707 G_PARAM_STATIC_STRINGS));
709 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_BYTES,
710 g_param_spec_uint ("extra-size-bytes", "Extra Size (kB)",
711 "Amount of data the queues can grow if one of them is empty (bytes, 0=disable)"
712 " (NOT IMPLEMENTED)",
713 0, G_MAXUINT, DEFAULT_EXTRA_SIZE_BYTES,
714 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
715 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_BUFFERS,
716 g_param_spec_uint ("extra-size-buffers", "Extra Size (buffers)",
717 "Amount of buffers the queues can grow if one of them is empty (0=disable)"
718 " (NOT IMPLEMENTED)",
719 0, G_MAXUINT, DEFAULT_EXTRA_SIZE_BUFFERS,
720 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
721 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_TIME,
722 g_param_spec_uint64 ("extra-size-time", "Extra Size (ns)",
723 "Amount of time the queues can grow if one of them is empty (in ns, 0=disable)"
724 " (NOT IMPLEMENTED)",
725 0, G_MAXUINT64, DEFAULT_EXTRA_SIZE_TIME,
726 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
729 * GstMultiQueue:use-buffering:
731 * Enable the buffering option in multiqueue so that BUFFERING messages are
732 * emitted based on low-/high-percent thresholds.
734 g_object_class_install_property (gobject_class, PROP_USE_BUFFERING,
735 g_param_spec_boolean ("use-buffering", "Use buffering",
736 "Emit GST_MESSAGE_BUFFERING based on low-/high-percent thresholds "
737 "(0% = low-watermark, 100% = high-watermark)",
738 DEFAULT_USE_BUFFERING, G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
739 G_PARAM_STATIC_STRINGS));
741 * GstMultiQueue:low-percent:
743 * Low threshold percent for buffering to start.
745 g_object_class_install_property (gobject_class, PROP_LOW_PERCENT,
746 g_param_spec_int ("low-percent", "Low percent",
747 "Low threshold for buffering to start. Only used if use-buffering is True "
748 "(Deprecated: use low-watermark instead)",
749 0, 100, DEFAULT_LOW_WATERMARK * 100,
750 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
752 * GstMultiQueue:high-percent:
754 * High threshold percent for buffering to finish.
756 g_object_class_install_property (gobject_class, PROP_HIGH_PERCENT,
757 g_param_spec_int ("high-percent", "High percent",
758 "High threshold for buffering to finish. Only used if use-buffering is True "
759 "(Deprecated: use high-watermark instead)",
760 0, 100, DEFAULT_HIGH_WATERMARK * 100,
761 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
763 * GstMultiQueue:low-watermark:
765 * Low threshold watermark for buffering to start.
769 g_object_class_install_property (gobject_class, PROP_LOW_WATERMARK,
770 g_param_spec_double ("low-watermark", "Low watermark",
771 "Low threshold for buffering to start. Only used if use-buffering is True",
772 0.0, 1.0, DEFAULT_LOW_WATERMARK,
773 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
775 * GstMultiQueue:high-watermark:
777 * High threshold watermark for buffering to finish.
781 g_object_class_install_property (gobject_class, PROP_HIGH_WATERMARK,
782 g_param_spec_double ("high-watermark", "High watermark",
783 "High threshold for buffering to finish. Only used if use-buffering is True",
784 0.0, 1.0, DEFAULT_HIGH_WATERMARK,
785 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
788 * GstMultiQueue:sync-by-running-time:
790 * If enabled multiqueue will synchronize deactivated or not-linked streams
791 * to the activated and linked streams by taking the running time.
792 * Otherwise multiqueue will synchronize the deactivated or not-linked
793 * streams by keeping the order in which buffers and events arrived compared
794 * to active and linked streams.
796 g_object_class_install_property (gobject_class, PROP_SYNC_BY_RUNNING_TIME,
797 g_param_spec_boolean ("sync-by-running-time", "Sync By Running Time",
798 "Synchronize deactivated or not-linked streams by running time",
799 DEFAULT_SYNC_BY_RUNNING_TIME,
800 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
802 g_object_class_install_property (gobject_class, PROP_USE_INTERLEAVE,
803 g_param_spec_boolean ("use-interleave", "Use interleave",
804 "Adjust time limits based on input interleave",
805 DEFAULT_USE_INTERLEAVE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
807 g_object_class_install_property (gobject_class, PROP_UNLINKED_CACHE_TIME,
808 g_param_spec_uint64 ("unlinked-cache-time", "Unlinked cache time (ns)",
809 "Extra buffering in time for unlinked streams (if 'sync-by-running-time')",
810 0, G_MAXUINT64, DEFAULT_UNLINKED_CACHE_TIME,
811 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
812 G_PARAM_STATIC_STRINGS));
814 g_object_class_install_property (gobject_class, PROP_MINIMUM_INTERLEAVE,
815 g_param_spec_uint64 ("min-interleave-time", "Minimum interleave time",
816 "Minimum extra buffering for deinterleaving (size of the queues) when use-interleave=true",
817 0, G_MAXUINT64, DEFAULT_MINIMUM_INTERLEAVE,
818 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
819 G_PARAM_STATIC_STRINGS));
822 * GstMultiQueue:stats:
824 * Various #GstMultiQueue statistics. This property returns a #GstStructure
825 * with name "application/x-gst-multi-queue-stats" with the following fields:
827 * - "queues" GST_TYPE_ARRAY Contains one GstStructure named "queue_%d"
828 * (where \%d is the queue's ID) per internal queue:
829 * - "buffers" G_TYPE_UINT The queue's current level of buffers
830 * - "bytes" G_TYPE_UINT The queue's current level of bytes
831 * - "time" G_TYPE_UINT64 The queue's current level of time
835 g_object_class_install_property (gobject_class, PROP_STATS,
836 g_param_spec_boxed ("stats", "Stats",
837 "Multiqueue Statistics",
838 GST_TYPE_STRUCTURE, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
840 gobject_class->finalize = gst_multi_queue_finalize;
842 gst_element_class_set_static_metadata (gstelement_class,
844 "Generic", "Multiple data queue", "Edward Hervey <edward@fluendo.com>");
845 gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
846 &sinktemplate, GST_TYPE_MULTIQUEUE_PAD);
847 gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
848 &srctemplate, GST_TYPE_MULTIQUEUE_PAD);
850 gstelement_class->request_new_pad =
851 GST_DEBUG_FUNCPTR (gst_multi_queue_request_new_pad);
852 gstelement_class->release_pad =
853 GST_DEBUG_FUNCPTR (gst_multi_queue_release_pad);
854 gstelement_class->change_state =
855 GST_DEBUG_FUNCPTR (gst_multi_queue_change_state);
857 gst_type_mark_as_plugin_api (GST_TYPE_MULTIQUEUE_PAD, 0);
861 gst_multi_queue_init (GstMultiQueue * mqueue)
863 mqueue->nbqueues = 0;
864 mqueue->queues = NULL;
866 mqueue->max_size.bytes = DEFAULT_MAX_SIZE_BYTES;
867 mqueue->max_size.visible = DEFAULT_MAX_SIZE_BUFFERS;
868 mqueue->max_size.time = DEFAULT_MAX_SIZE_TIME;
870 mqueue->extra_size.bytes = DEFAULT_EXTRA_SIZE_BYTES;
871 mqueue->extra_size.visible = DEFAULT_EXTRA_SIZE_BUFFERS;
872 mqueue->extra_size.time = DEFAULT_EXTRA_SIZE_TIME;
874 mqueue->use_buffering = DEFAULT_USE_BUFFERING;
875 mqueue->low_watermark = DEFAULT_LOW_WATERMARK * MAX_BUFFERING_LEVEL;
876 mqueue->high_watermark = DEFAULT_HIGH_WATERMARK * MAX_BUFFERING_LEVEL;
878 mqueue->sync_by_running_time = DEFAULT_SYNC_BY_RUNNING_TIME;
879 mqueue->use_interleave = DEFAULT_USE_INTERLEAVE;
880 mqueue->min_interleave_time = DEFAULT_MINIMUM_INTERLEAVE;
881 mqueue->unlinked_cache_time = DEFAULT_UNLINKED_CACHE_TIME;
885 mqueue->high_time = GST_CLOCK_STIME_NONE;
887 g_mutex_init (&mqueue->qlock);
888 g_mutex_init (&mqueue->reconf_lock);
889 g_mutex_init (&mqueue->buffering_post_lock);
893 gst_multi_queue_finalize (GObject * object)
895 GstMultiQueue *mqueue = GST_MULTI_QUEUE (object);
897 g_list_free_full (mqueue->queues, (GDestroyNotify) gst_single_queue_unref);
898 mqueue->queues = NULL;
899 mqueue->queues_cookie++;
901 /* free/unref instance data */
902 g_mutex_clear (&mqueue->qlock);
903 g_mutex_clear (&mqueue->reconf_lock);
904 g_mutex_clear (&mqueue->buffering_post_lock);
906 G_OBJECT_CLASS (parent_class)->finalize (object);
909 #define SET_CHILD_PROPERTY(mq,format) G_STMT_START { \
910 GList * tmp = mq->queues; \
912 GstSingleQueue *q = (GstSingleQueue*)tmp->data; \
913 q->max_size.format = mq->max_size.format; \
914 update_buffering (mq, q); \
915 gst_data_queue_limits_changed (q->queue); \
916 tmp = g_list_next(tmp); \
921 gst_multi_queue_set_property (GObject * object, guint prop_id,
922 const GValue * value, GParamSpec * pspec)
924 GstMultiQueue *mq = GST_MULTI_QUEUE (object);
927 case PROP_MAX_SIZE_BYTES:
928 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
929 mq->max_size.bytes = g_value_get_uint (value);
930 SET_CHILD_PROPERTY (mq, bytes);
931 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
932 gst_multi_queue_post_buffering (mq);
934 case PROP_MAX_SIZE_BUFFERS:
937 gint new_size = g_value_get_uint (value);
939 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
941 mq->max_size.visible = new_size;
945 GstDataQueueSize size;
946 GstSingleQueue *q = (GstSingleQueue *) tmp->data;
947 gst_data_queue_get_level (q->queue, &size);
949 GST_DEBUG_OBJECT (mq, "Queue %d: Requested buffers size: %d,"
950 " current: %d, current max %d", q->id, new_size, size.visible,
951 q->max_size.visible);
953 /* do not reduce max size below current level if the single queue
954 * has grown because of empty queue */
956 q->max_size.visible = new_size;
957 } else if (q->max_size.visible == 0) {
958 q->max_size.visible = MAX (new_size, size.visible);
959 } else if (new_size > size.visible) {
960 q->max_size.visible = new_size;
962 update_buffering (mq, q);
963 gst_data_queue_limits_changed (q->queue);
964 tmp = g_list_next (tmp);
967 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
968 gst_multi_queue_post_buffering (mq);
972 case PROP_MAX_SIZE_TIME:
973 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
974 mq->max_size.time = g_value_get_uint64 (value);
975 SET_CHILD_PROPERTY (mq, time);
976 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
977 gst_multi_queue_post_buffering (mq);
979 case PROP_EXTRA_SIZE_BYTES:
980 mq->extra_size.bytes = g_value_get_uint (value);
982 case PROP_EXTRA_SIZE_BUFFERS:
983 mq->extra_size.visible = g_value_get_uint (value);
985 case PROP_EXTRA_SIZE_TIME:
986 mq->extra_size.time = g_value_get_uint64 (value);
988 case PROP_USE_BUFFERING:
989 mq->use_buffering = g_value_get_boolean (value);
990 recheck_buffering_status (mq);
992 case PROP_LOW_PERCENT:
993 mq->low_watermark = g_value_get_int (value) * BUF_LEVEL_PERCENT_FACTOR;
994 /* Recheck buffering status - the new low_watermark value might
995 * be above the current fill level. If the old low_watermark one
996 * was below the current level, this means that mq->buffering is
997 * disabled and needs to be re-enabled. */
998 recheck_buffering_status (mq);
1000 case PROP_HIGH_PERCENT:
1001 mq->high_watermark = g_value_get_int (value) * BUF_LEVEL_PERCENT_FACTOR;
1002 recheck_buffering_status (mq);
1004 case PROP_LOW_WATERMARK:
1005 mq->low_watermark = g_value_get_double (value) * MAX_BUFFERING_LEVEL;
1006 recheck_buffering_status (mq);
1008 case PROP_HIGH_WATERMARK:
1009 mq->high_watermark = g_value_get_double (value) * MAX_BUFFERING_LEVEL;
1010 recheck_buffering_status (mq);
1012 case PROP_SYNC_BY_RUNNING_TIME:
1013 mq->sync_by_running_time = g_value_get_boolean (value);
1015 case PROP_USE_INTERLEAVE:
1016 mq->use_interleave = g_value_get_boolean (value);
1018 case PROP_UNLINKED_CACHE_TIME:
1019 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1020 mq->unlinked_cache_time = g_value_get_uint64 (value);
1021 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1022 gst_multi_queue_post_buffering (mq);
1024 case PROP_MINIMUM_INTERLEAVE:
1025 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1026 mq->min_interleave_time = g_value_get_uint64 (value);
1027 if (mq->use_interleave)
1028 calculate_interleave (mq, NULL);
1029 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1032 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
1037 /* Called with mutex held */
1038 static GstStructure *
1039 gst_multi_queue_get_stats (GstMultiQueue * mq)
1042 gst_structure_new_empty ("application/x-gst-multi-queue-stats");
1046 if (mq->queues != NULL) {
1047 GValue queues = G_VALUE_INIT;
1048 GValue v = G_VALUE_INIT;
1050 g_value_init (&queues, GST_TYPE_ARRAY);
1052 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
1053 GstDataQueueSize level;
1056 g_value_init (&v, GST_TYPE_STRUCTURE);
1058 sq = (GstSingleQueue *) tmp->data;
1059 gst_data_queue_get_level (sq->queue, &level);
1060 id = g_strdup_printf ("queue_%d", sq->id);
1061 s = gst_structure_new (id,
1062 "buffers", G_TYPE_UINT, level.visible,
1063 "bytes", G_TYPE_UINT, level.bytes,
1064 "time", G_TYPE_UINT64, sq->cur_time, NULL);
1065 g_value_take_boxed (&v, s);
1066 gst_value_array_append_and_take_value (&queues, &v);
1069 gst_structure_take_value (ret, "queues", &queues);
1076 gst_multi_queue_get_property (GObject * object, guint prop_id,
1077 GValue * value, GParamSpec * pspec)
1079 GstMultiQueue *mq = GST_MULTI_QUEUE (object);
1081 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1084 case PROP_EXTRA_SIZE_BYTES:
1085 g_value_set_uint (value, mq->extra_size.bytes);
1087 case PROP_EXTRA_SIZE_BUFFERS:
1088 g_value_set_uint (value, mq->extra_size.visible);
1090 case PROP_EXTRA_SIZE_TIME:
1091 g_value_set_uint64 (value, mq->extra_size.time);
1093 case PROP_MAX_SIZE_BYTES:
1094 g_value_set_uint (value, mq->max_size.bytes);
1096 case PROP_MAX_SIZE_BUFFERS:
1097 g_value_set_uint (value, mq->max_size.visible);
1099 case PROP_MAX_SIZE_TIME:
1100 g_value_set_uint64 (value, mq->max_size.time);
1102 case PROP_USE_BUFFERING:
1103 g_value_set_boolean (value, mq->use_buffering);
1105 case PROP_LOW_PERCENT:
1106 g_value_set_int (value, mq->low_watermark / BUF_LEVEL_PERCENT_FACTOR);
1108 case PROP_HIGH_PERCENT:
1109 g_value_set_int (value, mq->high_watermark / BUF_LEVEL_PERCENT_FACTOR);
1111 case PROP_LOW_WATERMARK:
1112 g_value_set_double (value, mq->low_watermark /
1113 (gdouble) MAX_BUFFERING_LEVEL);
1115 case PROP_HIGH_WATERMARK:
1116 g_value_set_double (value, mq->high_watermark /
1117 (gdouble) MAX_BUFFERING_LEVEL);
1119 case PROP_SYNC_BY_RUNNING_TIME:
1120 g_value_set_boolean (value, mq->sync_by_running_time);
1122 case PROP_USE_INTERLEAVE:
1123 g_value_set_boolean (value, mq->use_interleave);
1125 case PROP_UNLINKED_CACHE_TIME:
1126 g_value_set_uint64 (value, mq->unlinked_cache_time);
1128 case PROP_MINIMUM_INTERLEAVE:
1129 g_value_set_uint64 (value, mq->min_interleave_time);
1132 g_value_take_boxed (value, gst_multi_queue_get_stats (mq));
1135 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
1139 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1142 static GstIterator *
1143 gst_multi_queue_iterate_internal_links (GstPad * pad, GstObject * parent)
1145 GstIterator *it = NULL;
1146 GstPad *opad, *sinkpad, *srcpad;
1147 GstSingleQueue *squeue;
1148 GstMultiQueue *mq = GST_MULTI_QUEUE (parent);
1149 GValue val = { 0, };
1151 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1152 squeue = GST_MULTIQUEUE_PAD (pad)->sq;
1156 srcpad = g_weak_ref_get (&squeue->srcpad);
1157 sinkpad = g_weak_ref_get (&squeue->sinkpad);
1158 if (sinkpad == pad && srcpad) {
1160 gst_clear_object (&sinkpad);
1162 } else if (srcpad == pad && sinkpad) {
1164 gst_clear_object (&srcpad);
1167 gst_clear_object (&srcpad);
1168 gst_clear_object (&sinkpad);
1172 g_value_init (&val, GST_TYPE_PAD);
1173 g_value_set_object (&val, opad);
1174 it = gst_iterator_new_single (GST_TYPE_PAD, &val);
1175 g_value_unset (&val);
1177 gst_object_unref (opad);
1180 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1187 * GstElement methods
1191 gst_multi_queue_request_new_pad (GstElement * element, GstPadTemplate * temp,
1192 const gchar * name, const GstCaps * caps)
1194 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
1195 GstSingleQueue *squeue;
1200 sscanf (name + 4, "_%u", &temp_id);
1201 GST_LOG_OBJECT (element, "name : %s (id %d)", GST_STR_NULL (name), temp_id);
1204 g_mutex_lock (&mqueue->reconf_lock);
1205 /* Create a new single queue, add the sink and source pad and return the sink pad */
1206 squeue = gst_single_queue_new (mqueue, temp_id);
1207 g_mutex_unlock (&mqueue->reconf_lock);
1209 new_pad = squeue ? g_weak_ref_get (&squeue->sinkpad) : NULL;
1210 /* request pad assumes the element is owning the ref of the pad it returns */
1212 gst_object_unref (new_pad);
1214 GST_DEBUG_OBJECT (mqueue, "Returning pad %" GST_PTR_FORMAT, new_pad);
1220 gst_multi_queue_release_pad (GstElement * element, GstPad * pad)
1222 GstPad *sinkpad = NULL, *srcpad = NULL;
1223 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
1224 GstSingleQueue *sq = NULL;
1227 GST_LOG_OBJECT (element, "pad %s:%s", GST_DEBUG_PAD_NAME (pad));
1229 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
1230 /* Find which single queue it belongs to, knowing that it should be a sinkpad */
1231 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
1232 sq = (GstSingleQueue *) tmp->data;
1233 sinkpad = g_weak_ref_get (&sq->sinkpad);
1235 if (sinkpad == pad) {
1236 srcpad = g_weak_ref_get (&sq->srcpad);
1240 gst_object_unref (sinkpad);
1244 gst_clear_object (&sinkpad);
1245 gst_clear_object (&srcpad);
1246 GST_WARNING_OBJECT (mqueue, "That pad doesn't belong to this element ???");
1247 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1251 /* FIXME: The removal of the singlequeue should probably not happen until it
1252 * finishes draining */
1254 /* Take the reconfiguration lock before removing the singlequeue from
1255 * the list, to prevent overlapping release/request from causing
1257 g_mutex_lock (&mqueue->reconf_lock);
1259 /* remove it from the list */
1260 mqueue->queues = g_list_delete_link (mqueue->queues, tmp);
1261 mqueue->queues_cookie++;
1263 /* FIXME : recompute next-non-linked */
1264 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1266 /* delete SingleQueue */
1267 gst_data_queue_set_flushing (sq->queue, TRUE);
1269 gst_pad_set_active (srcpad, FALSE);
1270 gst_pad_set_active (sinkpad, FALSE);
1271 gst_element_remove_pad (element, srcpad);
1272 gst_element_remove_pad (element, sinkpad);
1273 gst_object_unref (srcpad);
1274 gst_object_unref (sinkpad);
1276 g_mutex_unlock (&mqueue->reconf_lock);
1279 static GstStateChangeReturn
1280 gst_multi_queue_change_state (GstElement * element, GstStateChange transition)
1282 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
1283 GstSingleQueue *sq = NULL;
1284 GstStateChangeReturn result;
1286 switch (transition) {
1287 case GST_STATE_CHANGE_READY_TO_PAUSED:{
1290 /* Set all pads to non-flushing */
1291 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
1292 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
1293 sq = (GstSingleQueue *) tmp->data;
1294 sq->flushing = FALSE;
1297 /* the visible limit might not have been set on single queues that have grown because of other queueus were empty */
1298 SET_CHILD_PROPERTY (mqueue, visible);
1300 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1301 gst_multi_queue_post_buffering (mqueue);
1305 case GST_STATE_CHANGE_PAUSED_TO_READY:{
1308 /* Un-wait all waiting pads */
1309 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
1310 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
1311 sq = (GstSingleQueue *) tmp->data;
1312 sq->flushing = TRUE;
1313 g_cond_signal (&sq->turn);
1315 sq->last_query = FALSE;
1316 g_cond_signal (&sq->query_handled);
1318 mqueue->interleave_incomplete = FALSE;
1319 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1326 result = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
1328 switch (transition) {
1337 gst_single_queue_start (GstMultiQueue * mq, GstSingleQueue * sq)
1339 gboolean res = FALSE;
1340 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
1342 GST_LOG_OBJECT (mq, "SingleQueue %d : starting task", sq->id);
1345 res = gst_pad_start_task (srcpad,
1346 (GstTaskFunction) gst_multi_queue_loop, srcpad, NULL);
1347 gst_object_unref (srcpad);
1354 gst_single_queue_pause (GstMultiQueue * mq, GstSingleQueue * sq)
1356 gboolean result = FALSE;
1357 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
1359 GST_LOG_OBJECT (mq, "SingleQueue %d : pausing task", sq->id);
1361 result = gst_pad_pause_task (srcpad);
1362 gst_object_unref (srcpad);
1365 sq->sink_tainted = sq->src_tainted = TRUE;
1366 sq->sink_stream_gid = sq->src_stream_gid = GST_GROUP_ID_INVALID;
1367 sq->sink_stream_gid_changed = FALSE;
1368 sq->src_stream_gid_changed = FALSE;
1373 gst_single_queue_stop (GstMultiQueue * mq, GstSingleQueue * sq)
1375 gboolean result = FALSE;
1376 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
1378 GST_LOG_OBJECT (mq, "SingleQueue %d : stopping task", sq->id);
1380 result = gst_pad_stop_task (srcpad);
1381 gst_object_unref (srcpad);
1383 sq->sink_tainted = sq->src_tainted = TRUE;
1384 sq->sink_stream_gid = sq->src_stream_gid = GST_GROUP_ID_INVALID;
1385 sq->sink_stream_gid_changed = FALSE;
1386 sq->src_stream_gid_changed = FALSE;
1391 gst_single_queue_flush (GstMultiQueue * mq, GstSingleQueue * sq, gboolean flush,
1394 GST_DEBUG_OBJECT (mq, "flush %s queue %d", (flush ? "start" : "stop"),
1398 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1399 sq->srcresult = GST_FLOW_FLUSHING;
1400 gst_data_queue_set_flushing (sq->queue, TRUE);
1402 sq->flushing = TRUE;
1404 /* wake up non-linked task */
1405 GST_LOG_OBJECT (mq, "SingleQueue %d : waking up eventually waiting task",
1407 g_cond_signal (&sq->turn);
1408 sq->last_query = FALSE;
1409 g_cond_signal (&sq->query_handled);
1410 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1412 gst_single_queue_flush_queue (sq, full);
1414 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1415 gst_segment_init (&sq->sink_segment, GST_FORMAT_TIME);
1416 gst_segment_init (&sq->src_segment, GST_FORMAT_TIME);
1417 sq->has_src_segment = FALSE;
1418 /* All pads start off OK for a smooth kick-off */
1419 sq->srcresult = GST_FLOW_OK;
1422 sq->max_size.visible = mq->max_size.visible;
1424 sq->is_segment_done = FALSE;
1427 sq->last_oldid = G_MAXUINT32;
1428 sq->next_time = GST_CLOCK_STIME_NONE;
1429 sq->last_time = GST_CLOCK_STIME_NONE;
1430 sq->cached_sinktime = GST_CLOCK_STIME_NONE;
1431 sq->group_high_time = GST_CLOCK_STIME_NONE;
1432 gst_data_queue_set_flushing (sq->queue, FALSE);
1434 /* We will become active again on the next buffer/gap */
1437 /* Reset high time to be recomputed next */
1438 mq->high_time = GST_CLOCK_STIME_NONE;
1440 sq->flushing = FALSE;
1441 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1445 /* WITH LOCK TAKEN */
1447 get_buffering_level (GstMultiQueue * mq, GstSingleQueue * sq)
1449 GstDataQueueSize size;
1450 gint buffering_level, tmp;
1452 gst_data_queue_get_level (sq->queue, &size);
1454 GST_DEBUG_OBJECT (mq,
1455 "queue %d: visible %u/%u, bytes %u/%u, time %" G_GUINT64_FORMAT "/%"
1456 G_GUINT64_FORMAT, sq->id, size.visible, sq->max_size.visible,
1457 size.bytes, sq->max_size.bytes, sq->cur_time, sq->max_size.time);
1459 /* get bytes and time buffer levels and take the max */
1460 if (sq->is_eos || sq->is_segment_done || sq->srcresult == GST_FLOW_NOT_LINKED
1462 buffering_level = MAX_BUFFERING_LEVEL;
1464 buffering_level = 0;
1465 if (sq->max_size.time > 0) {
1467 gst_util_uint64_scale (sq->cur_time,
1468 MAX_BUFFERING_LEVEL, sq->max_size.time);
1469 buffering_level = MAX (buffering_level, tmp);
1471 if (sq->max_size.bytes > 0) {
1473 gst_util_uint64_scale_int (size.bytes,
1474 MAX_BUFFERING_LEVEL, sq->max_size.bytes);
1475 buffering_level = MAX (buffering_level, tmp);
1479 return buffering_level;
1482 /* WITH LOCK TAKEN */
1484 update_buffering (GstMultiQueue * mq, GstSingleQueue * sq)
1486 gint buffering_level, percent;
1488 /* nothing to dowhen we are not in buffering mode */
1489 if (!mq->use_buffering)
1492 buffering_level = get_buffering_level (mq, sq);
1494 /* scale so that if buffering_level equals the high watermark,
1495 * the percentage is 100% */
1496 percent = gst_util_uint64_scale (buffering_level, 100, mq->high_watermark);
1501 if (mq->buffering) {
1502 if (buffering_level >= mq->high_watermark) {
1503 mq->buffering = FALSE;
1505 /* make sure it increases */
1506 percent = MAX (mq->buffering_percent, percent);
1508 SET_PERCENT (mq, percent);
1511 gboolean is_buffering = TRUE;
1513 for (iter = mq->queues; iter; iter = g_list_next (iter)) {
1514 GstSingleQueue *oq = (GstSingleQueue *) iter->data;
1516 if (get_buffering_level (mq, oq) >= mq->high_watermark) {
1517 is_buffering = FALSE;
1523 if (is_buffering && buffering_level < mq->low_watermark) {
1524 mq->buffering = TRUE;
1525 SET_PERCENT (mq, percent);
1531 gst_multi_queue_post_buffering (GstMultiQueue * mq)
1533 GstMessage *msg = NULL;
1535 g_mutex_lock (&mq->buffering_post_lock);
1536 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1537 if (mq->buffering_percent_changed) {
1538 gint percent = mq->buffering_percent;
1540 mq->buffering_percent_changed = FALSE;
1542 GST_DEBUG_OBJECT (mq, "Going to post buffering: %d%%", percent);
1543 msg = gst_message_new_buffering (GST_OBJECT_CAST (mq), percent);
1545 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1548 gst_element_post_message (GST_ELEMENT_CAST (mq), msg);
1550 g_mutex_unlock (&mq->buffering_post_lock);
1554 recheck_buffering_status (GstMultiQueue * mq)
1556 if (!mq->use_buffering && mq->buffering) {
1557 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1558 mq->buffering = FALSE;
1559 GST_DEBUG_OBJECT (mq,
1560 "Buffering property disabled, but queue was still buffering; "
1561 "setting buffering percentage to 100%%");
1562 SET_PERCENT (mq, 100);
1563 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1566 if (mq->use_buffering) {
1570 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1572 /* force buffering percentage to be recalculated */
1573 old_perc = mq->buffering_percent;
1574 mq->buffering_percent = 0;
1578 GstSingleQueue *q = (GstSingleQueue *) tmp->data;
1579 update_buffering (mq, q);
1580 gst_data_queue_limits_changed (q->queue);
1581 tmp = g_list_next (tmp);
1584 GST_DEBUG_OBJECT (mq,
1585 "Recalculated buffering percentage: old: %d%% new: %d%%",
1586 old_perc, mq->buffering_percent);
1588 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1591 gst_multi_queue_post_buffering (mq);
1595 calculate_interleave (GstMultiQueue * mq, GstSingleQueue * sq)
1597 GstClockTimeDiff low, high;
1598 GstClockTime interleave, other_interleave = 0;
1599 gboolean some_inactive = FALSE;
1602 low = high = GST_CLOCK_STIME_NONE;
1603 interleave = mq->interleave;
1604 /* Go over all single queues and calculate lowest/highest value */
1605 for (tmp = mq->queues; tmp; tmp = tmp->next) {
1606 GstSingleQueue *oq = (GstSingleQueue *) tmp->data;
1607 /* Ignore sparse streams for interleave calculation */
1611 /* If some streams aren't active yet (haven't received any buffers), we will
1612 * grow interleave accordingly */
1614 some_inactive = TRUE;
1618 /* Calculate within each streaming thread */
1619 if (sq && sq->thread != oq->thread) {
1620 if (oq->interleave > other_interleave)
1621 other_interleave = oq->interleave;
1625 /* If the stream isn't EOS, update the low/high input value */
1626 if (GST_CLOCK_STIME_IS_VALID (oq->cached_sinktime) && !oq->is_eos) {
1627 if (low == GST_CLOCK_STIME_NONE || oq->cached_sinktime < low)
1628 low = oq->cached_sinktime;
1629 if (high == GST_CLOCK_STIME_NONE || oq->cached_sinktime > high)
1630 high = oq->cached_sinktime;
1632 /* If the input is before the segment start, consider as inactive to allow
1633 * the interleave to grow until *all* streams have data within the segment.
1635 * The reason for this is that there is no requirements for data before
1636 * the segment start to be "aligned" and therefore interleave calculation
1637 * can't reliably be done. For example a demuxer could provide video data
1638 * from the previous keyframe but audio only from just before the segment
1640 if (oq->cached_sinktime < 0)
1641 some_inactive = TRUE;
1644 "queue %d , sinktime:%" GST_STIME_FORMAT " low:%" GST_STIME_FORMAT
1645 " high:%" GST_STIME_FORMAT, oq->id,
1646 GST_STIME_ARGS (oq->cached_sinktime), GST_STIME_ARGS (low),
1647 GST_STIME_ARGS (high));
1650 if (GST_CLOCK_STIME_IS_VALID (low) && GST_CLOCK_STIME_IS_VALID (high)) {
1651 gboolean do_update = high == low;
1652 interleave = high - low;
1653 /* Padding of interleave and minimum value */
1654 interleave = (150 * interleave / 100) + mq->min_interleave_time;
1656 sq->interleave = interleave;
1658 interleave = MAX (interleave, other_interleave);
1660 /* Progressively grow up the interleave up to 5s if some streams were inactive */
1661 if (some_inactive && interleave <= mq->interleave) {
1662 interleave = MIN (5 * GST_SECOND, mq->interleave + 500 * GST_MSECOND);
1666 /* We force the interleave update if:
1667 * * the interleave was previously set while some streams were not active
1668 * yet but they now all are
1669 * * OR the interleave was previously based on all streams being active
1670 * whereas some now aren't
1672 if (mq->interleave_incomplete != some_inactive)
1675 mq->interleave_incomplete = some_inactive;
1677 /* Update the stored interleave if:
1678 * * No data has arrived yet (high == low)
1679 * * Or it went higher
1680 * * Or it went lower and we've gone past the previous interleave needed */
1681 if (do_update || interleave > mq->interleave ||
1682 ((mq->last_interleave_update + (2 * MIN (GST_SECOND,
1683 mq->interleave)) < low)
1684 && interleave < (mq->interleave * 3 / 4))) {
1685 /* Update the interleave */
1686 mq->interleave = interleave;
1687 mq->last_interleave_update = high;
1688 /* Update max-size time */
1689 mq->max_size.time = mq->interleave;
1690 SET_CHILD_PROPERTY (mq, time);
1694 GST_DEBUG_OBJECT (mq,
1695 "low:%" GST_STIME_FORMAT " high:%" GST_STIME_FORMAT " interleave:%"
1696 GST_TIME_FORMAT " mq->interleave:%" GST_TIME_FORMAT
1697 " last_interleave_update:%" GST_STIME_FORMAT, GST_STIME_ARGS (low),
1698 GST_STIME_ARGS (high), GST_TIME_ARGS (interleave),
1699 GST_TIME_ARGS (mq->interleave),
1700 GST_STIME_ARGS (mq->last_interleave_update));
1704 /* calculate the diff between running time on the sink and src of the queue.
1705 * This is the total amount of time in the queue.
1706 * WITH LOCK TAKEN */
1708 update_time_level (GstMultiQueue * mq, GstSingleQueue * sq)
1710 GstClockTimeDiff sink_time, src_time;
1712 if (sq->sink_tainted) {
1713 sink_time = sq->sinktime = my_segment_to_running_time (&sq->sink_segment,
1714 sq->sink_segment.position);
1716 GST_DEBUG_OBJECT (mq,
1717 "queue %d sink_segment.position:%" GST_TIME_FORMAT ", sink_time:%"
1718 GST_STIME_FORMAT, sq->id, GST_TIME_ARGS (sq->sink_segment.position),
1719 GST_STIME_ARGS (sink_time));
1721 if (G_UNLIKELY (sq->last_time == GST_CLOCK_STIME_NONE)) {
1722 /* If the single queue still doesn't have a last_time set, this means
1723 * that nothing has been pushed out yet.
1724 * In order for the high_time computation to be as efficient as possible,
1725 * we set the last_time */
1726 sq->last_time = sink_time;
1728 if (G_UNLIKELY (sink_time != GST_CLOCK_STIME_NONE)) {
1729 /* if we have a time, we become untainted and use the time */
1730 sq->sink_tainted = FALSE;
1731 if (mq->use_interleave) {
1732 sq->cached_sinktime = sink_time;
1733 calculate_interleave (mq, sq);
1737 sink_time = sq->sinktime;
1739 if (sq->src_tainted) {
1740 GstSegment *segment;
1743 if (sq->has_src_segment) {
1744 segment = &sq->src_segment;
1745 position = sq->src_segment.position;
1748 * If the src pad had no segment yet, use the sink segment
1749 * to avoid signalling overrun if the received sink segment has a
1750 * a position > max-size-time while the src pad time would be the default=0
1752 * This can happen when switching pads on chained/adaptive streams and the
1753 * new chain has a segment with a much larger position
1755 segment = &sq->sink_segment;
1756 position = sq->sink_segment.position;
1759 src_time = sq->srctime = my_segment_to_running_time (segment, position);
1760 /* if we have a time, we become untainted and use the time */
1761 if (G_UNLIKELY (src_time != GST_CLOCK_STIME_NONE)) {
1762 sq->src_tainted = FALSE;
1765 src_time = sq->srctime;
1767 GST_DEBUG_OBJECT (mq,
1768 "queue %d, sink %" GST_STIME_FORMAT ", src %" GST_STIME_FORMAT, sq->id,
1769 GST_STIME_ARGS (sink_time), GST_STIME_ARGS (src_time));
1771 /* This allows for streams with out of order timestamping - sometimes the
1772 * emerging timestamp is later than the arriving one(s) */
1773 if (G_LIKELY (GST_CLOCK_STIME_IS_VALID (sink_time) &&
1774 GST_CLOCK_STIME_IS_VALID (src_time) && sink_time > src_time))
1775 sq->cur_time = sink_time - src_time;
1779 /* updating the time level can change the buffering state */
1780 update_buffering (mq, sq);
1785 /* take a SEGMENT event and apply the values to segment, updating the time
1786 * level of queue. */
1788 apply_segment (GstMultiQueue * mq, GstSingleQueue * sq, GstEvent * event,
1789 GstSegment * segment)
1791 GstClockTimeDiff ppos = 0;
1793 /* If we switched groups, grab the previous position */
1794 if (segment->rate > 0.0) {
1795 if (segment == &sq->sink_segment && sq->sink_stream_gid_changed) {
1797 gst_segment_to_running_time (segment, GST_FORMAT_TIME,
1799 sq->sink_stream_gid_changed = FALSE;
1800 } else if (segment == &sq->src_segment && sq->src_stream_gid_changed) {
1802 gst_segment_to_running_time (segment, GST_FORMAT_TIME,
1804 sq->src_stream_gid_changed = FALSE;
1808 gst_event_copy_segment (event, segment);
1810 /* now configure the values, we use these to track timestamps on the
1812 if (segment->format != GST_FORMAT_TIME) {
1813 /* non-time format, pretent the current time segment is closed with a
1814 * 0 start and unknown stop time. */
1815 segment->format = GST_FORMAT_TIME;
1820 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1823 GST_DEBUG_OBJECT (mq, "queue %d, Applying base of %" GST_TIME_FORMAT,
1824 sq->id, GST_TIME_ARGS (ppos));
1825 segment->base = ppos;
1828 /* Make sure we have a valid initial segment position (and not garbage
1830 if (segment->rate > 0.0)
1831 segment->position = segment->start;
1833 segment->position = segment->stop;
1835 if (segment == &sq->sink_segment)
1836 sq->sink_tainted = TRUE;
1838 sq->has_src_segment = TRUE;
1839 sq->src_tainted = TRUE;
1842 GST_DEBUG_OBJECT (mq,
1843 "queue %d, configured SEGMENT %" GST_SEGMENT_FORMAT, sq->id, segment);
1845 /* segment can update the time level of the queue */
1846 update_time_level (mq, sq);
1848 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1849 gst_multi_queue_post_buffering (mq);
1852 /* take a buffer and update segment, updating the time level of the queue. */
1854 apply_buffer (GstMultiQueue * mq, GstSingleQueue * sq, GstClockTime timestamp,
1855 GstClockTime duration, GstSegment * segment)
1857 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1859 /* if no timestamp is set, assume it's continuous with the previous
1861 if (timestamp == GST_CLOCK_TIME_NONE)
1862 timestamp = segment->position;
1865 if (duration != GST_CLOCK_TIME_NONE)
1866 timestamp += duration;
1868 GST_DEBUG_OBJECT (mq, "queue %d, %s position updated to %" GST_TIME_FORMAT,
1869 sq->id, segment == &sq->sink_segment ? "sink" : "src",
1870 GST_TIME_ARGS (timestamp));
1872 segment->position = timestamp;
1874 if (segment == &sq->sink_segment)
1875 sq->sink_tainted = TRUE;
1877 sq->src_tainted = TRUE;
1879 /* calc diff with other end */
1880 update_time_level (mq, sq);
1881 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1882 gst_multi_queue_post_buffering (mq);
1886 apply_gap (GstMultiQueue * mq, GstSingleQueue * sq, GstEvent * event,
1887 GstSegment * segment)
1889 GstClockTime timestamp;
1890 GstClockTime duration;
1892 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1894 gst_event_parse_gap (event, ×tamp, &duration);
1896 if (GST_CLOCK_TIME_IS_VALID (timestamp)) {
1898 if (GST_CLOCK_TIME_IS_VALID (duration)) {
1899 timestamp += duration;
1902 GST_DEBUG_OBJECT (mq, "queue %d, %s position updated to %" GST_TIME_FORMAT,
1903 sq->id, segment == &sq->sink_segment ? "sink" : "src",
1904 GST_TIME_ARGS (timestamp));
1906 segment->position = timestamp;
1908 if (segment == &sq->sink_segment)
1909 sq->sink_tainted = TRUE;
1911 sq->src_tainted = TRUE;
1913 /* calc diff with other end */
1914 update_time_level (mq, sq);
1917 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1918 gst_multi_queue_post_buffering (mq);
1921 static GstClockTimeDiff
1922 get_running_time (GstSegment * segment, GstMiniObject * object, gboolean end)
1924 GstClockTimeDiff time = GST_CLOCK_STIME_NONE;
1926 if (GST_IS_BUFFER (object)) {
1927 GstBuffer *buf = GST_BUFFER_CAST (object);
1928 GstClockTime btime = GST_BUFFER_DTS_OR_PTS (buf);
1930 if (GST_CLOCK_TIME_IS_VALID (btime)) {
1931 if (end && GST_BUFFER_DURATION_IS_VALID (buf))
1932 btime += GST_BUFFER_DURATION (buf);
1933 time = my_segment_to_running_time (segment, btime);
1935 } else if (GST_IS_BUFFER_LIST (object)) {
1936 GstBufferList *list = GST_BUFFER_LIST_CAST (object);
1940 n = gst_buffer_list_length (list);
1941 for (i = 0; i < n; i++) {
1943 buf = gst_buffer_list_get (list, i);
1944 btime = GST_BUFFER_DTS_OR_PTS (buf);
1945 if (GST_CLOCK_TIME_IS_VALID (btime)) {
1946 if (end && GST_BUFFER_DURATION_IS_VALID (buf))
1947 btime += GST_BUFFER_DURATION (buf);
1948 time = my_segment_to_running_time (segment, btime);
1955 } else if (GST_IS_EVENT (object)) {
1956 GstEvent *event = GST_EVENT_CAST (object);
1958 /* For newsegment events return the running time of the start position */
1959 if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
1960 const GstSegment *new_segment;
1962 gst_event_parse_segment (event, &new_segment);
1963 if (new_segment->format == GST_FORMAT_TIME) {
1965 my_segment_to_running_time ((GstSegment *) new_segment,
1966 new_segment->start);
1968 } else if (GST_EVENT_TYPE (event) == GST_EVENT_GAP) {
1969 GstClockTime ts, dur;
1970 gst_event_parse_gap (event, &ts, &dur);
1971 if (GST_CLOCK_TIME_IS_VALID (ts)) {
1972 if (GST_CLOCK_TIME_IS_VALID (dur))
1974 time = my_segment_to_running_time (segment, ts);
1983 static GstFlowReturn
1984 gst_single_queue_push_one (GstMultiQueue * mq, GstSingleQueue * sq,
1985 GstMiniObject * object, gboolean * allow_drop)
1987 GstFlowReturn result = sq->srcresult;
1988 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
1991 GST_INFO_OBJECT (mq,
1992 "Pushing while corresponding sourcepad has been cleared");
1993 return GST_FLOW_FLUSHING;
1996 if (GST_IS_BUFFER (object)) {
1998 GstClockTime timestamp, duration;
2000 buffer = GST_BUFFER_CAST (object);
2001 timestamp = GST_BUFFER_DTS_OR_PTS (buffer);
2002 duration = GST_BUFFER_DURATION (buffer);
2004 apply_buffer (mq, sq, timestamp, duration, &sq->src_segment);
2006 /* Applying the buffer may have made the queue non-full again, unblock it if needed */
2007 gst_data_queue_limits_changed (sq->queue);
2009 if (G_UNLIKELY (*allow_drop)) {
2010 GST_DEBUG_OBJECT (mq,
2011 "SingleQueue %d : Dropping EOS buffer %p with ts %" GST_TIME_FORMAT,
2012 sq->id, buffer, GST_TIME_ARGS (timestamp));
2013 gst_buffer_unref (buffer);
2015 GST_DEBUG_OBJECT (mq,
2016 "SingleQueue %d : Pushing buffer %p with ts %" GST_TIME_FORMAT,
2017 sq->id, buffer, GST_TIME_ARGS (timestamp));
2018 result = gst_pad_push (srcpad, buffer);
2020 } else if (GST_IS_EVENT (object)) {
2023 event = GST_EVENT_CAST (object);
2025 switch (GST_EVENT_TYPE (event)) {
2026 case GST_EVENT_SEGMENT_DONE:
2027 *allow_drop = FALSE;
2030 result = GST_FLOW_EOS;
2031 if (G_UNLIKELY (*allow_drop))
2032 *allow_drop = FALSE;
2034 case GST_EVENT_STREAM_START:
2037 if (gst_event_parse_group_id (event, &group_id)) {
2038 if (sq->src_stream_gid == GST_GROUP_ID_INVALID) {
2039 sq->src_stream_gid = group_id;
2040 } else if (group_id != sq->src_stream_gid) {
2041 sq->src_stream_gid = group_id;
2042 sq->src_stream_gid_changed = TRUE;
2045 result = GST_FLOW_OK;
2046 if (G_UNLIKELY (*allow_drop))
2047 *allow_drop = FALSE;
2050 case GST_EVENT_SEGMENT:
2051 apply_segment (mq, sq, event, &sq->src_segment);
2052 /* Applying the segment may have made the queue non-full again, unblock it if needed */
2053 gst_data_queue_limits_changed (sq->queue);
2054 if (G_UNLIKELY (*allow_drop)) {
2055 result = GST_FLOW_OK;
2056 *allow_drop = FALSE;
2060 apply_gap (mq, sq, event, &sq->src_segment);
2061 /* Applying the gap may have made the queue non-full again, unblock it if needed */
2062 gst_data_queue_limits_changed (sq->queue);
2068 if (G_UNLIKELY (*allow_drop)) {
2069 GST_DEBUG_OBJECT (mq,
2070 "SingleQueue %d : Dropping EOS event %p of type %s",
2071 sq->id, event, GST_EVENT_TYPE_NAME (event));
2072 gst_event_unref (event);
2074 GST_DEBUG_OBJECT (mq,
2075 "SingleQueue %d : Pushing event %p of type %s",
2076 sq->id, event, GST_EVENT_TYPE_NAME (event));
2078 gst_pad_push_event (srcpad, event);
2080 } else if (GST_IS_QUERY (object)) {
2084 query = GST_QUERY_CAST (object);
2086 if (G_UNLIKELY (*allow_drop)) {
2087 GST_DEBUG_OBJECT (mq,
2088 "SingleQueue %d : Dropping EOS query %p", sq->id, query);
2089 gst_query_unref (query);
2092 res = gst_pad_peer_query (srcpad, query);
2095 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2096 sq->last_query = res;
2097 sq->last_handled_query = query;
2098 g_cond_signal (&sq->query_handled);
2099 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2101 g_warning ("Unexpected object in singlequeue %u (refcounting problem?)",
2105 gst_object_unref (srcpad);
2111 static GstMiniObject *
2112 gst_multi_queue_item_steal_object (GstMultiQueueItem * item)
2117 item->object = NULL;
2123 gst_multi_queue_item_destroy (GstMultiQueueItem * item)
2125 if (!item->is_query && item->object)
2126 gst_mini_object_unref (item->object);
2127 g_slice_free (GstMultiQueueItem, item);
2130 /* takes ownership of passed mini object! */
2131 static GstMultiQueueItem *
2132 gst_multi_queue_buffer_item_new (GstMiniObject * object, guint32 curid)
2134 GstMultiQueueItem *item;
2136 item = g_slice_new (GstMultiQueueItem);
2137 item->object = object;
2138 item->destroy = (GDestroyNotify) gst_multi_queue_item_destroy;
2139 item->posid = curid;
2140 item->is_query = GST_IS_QUERY (object);
2142 item->size = gst_buffer_get_size (GST_BUFFER_CAST (object));
2143 item->duration = GST_BUFFER_DURATION (object);
2144 if (item->duration == GST_CLOCK_TIME_NONE)
2146 item->visible = TRUE;
2150 static GstMultiQueueItem *
2151 gst_multi_queue_mo_item_new (GstMiniObject * object, guint32 curid)
2153 GstMultiQueueItem *item;
2155 item = g_slice_new (GstMultiQueueItem);
2156 item->object = object;
2157 item->destroy = (GDestroyNotify) gst_multi_queue_item_destroy;
2158 item->posid = curid;
2159 item->is_query = GST_IS_QUERY (object);
2163 item->visible = FALSE;
2167 /* Each main loop attempts to push buffers until the return value
2168 * is not-linked. not-linked pads are not allowed to push data beyond
2169 * any linked pads, so they don't 'rush ahead of the pack'.
2172 gst_multi_queue_loop (GstPad * pad)
2175 GstMultiQueueItem *item;
2176 GstDataQueueItem *sitem;
2178 GstMiniObject *object = NULL;
2180 GstFlowReturn result;
2181 GstClockTimeDiff next_time;
2183 gboolean is_query = FALSE;
2184 gboolean do_update_buffering = FALSE;
2185 gboolean dropping = FALSE;
2186 GstPad *srcpad = NULL;
2188 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2189 mq = g_weak_ref_get (&sq->mqueue);
2190 srcpad = g_weak_ref_get (&sq->srcpad);
2196 GST_DEBUG_OBJECT (mq, "SingleQueue %d : trying to pop an object", sq->id);
2201 /* Get something from the queue, blocking until that happens, or we get
2203 if (!(gst_data_queue_pop (sq->queue, &sitem)))
2206 item = (GstMultiQueueItem *) sitem;
2207 newid = item->posid;
2209 is_query = item->is_query;
2211 /* steal the object and destroy the item */
2212 object = gst_multi_queue_item_steal_object (item);
2213 gst_multi_queue_item_destroy (item);
2215 is_buffer = GST_IS_BUFFER (object);
2217 /* Get running time of the item. Events will have GST_CLOCK_STIME_NONE */
2218 next_time = get_running_time (&sq->src_segment, object, FALSE);
2220 GST_LOG_OBJECT (mq, "SingleQueue %d : newid:%d , oldid:%d",
2221 sq->id, newid, sq->last_oldid);
2223 /* If we're not-linked, we do some extra work because we might need to
2224 * wait before pushing. If we're linked but there's a gap in the IDs,
2225 * or it's the first loop, or we just passed the previous highid,
2226 * we might need to wake some sleeping pad up, so there's extra work
2228 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2229 if (sq->srcresult == GST_FLOW_NOT_LINKED
2230 || (sq->last_oldid == G_MAXUINT32) || (newid != (sq->last_oldid + 1))
2231 || sq->last_oldid > mq->highid) {
2232 GST_LOG_OBJECT (mq, "CHECKING sq->srcresult: %s",
2233 gst_flow_get_name (sq->srcresult));
2235 /* Check again if we're flushing after the lock is taken,
2236 * the flush flag might have been changed in the meantime */
2238 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2242 /* Update the nextid so other threads know when to wake us up */
2244 /* Take into account the extra cache time since we're unlinked */
2245 if (GST_CLOCK_STIME_IS_VALID (next_time))
2246 next_time += mq->unlinked_cache_time;
2247 sq->next_time = next_time;
2249 /* Update the oldid (the last ID we output) for highid tracking */
2250 if (sq->last_oldid != G_MAXUINT32)
2251 sq->oldid = sq->last_oldid;
2253 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2254 gboolean should_wait;
2255 /* Go to sleep until it's time to push this buffer */
2257 /* Recompute the highid */
2258 compute_high_id (mq);
2259 /* Recompute the high time */
2260 compute_high_time (mq, sq->groupid);
2262 GST_DEBUG_OBJECT (mq,
2263 "groupid %d high_time %" GST_STIME_FORMAT " next_time %"
2264 GST_STIME_FORMAT, sq->groupid, GST_STIME_ARGS (sq->group_high_time),
2265 GST_STIME_ARGS (next_time));
2267 if (mq->sync_by_running_time) {
2268 if (sq->group_high_time == GST_CLOCK_STIME_NONE) {
2269 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
2270 (mq->high_time == GST_CLOCK_STIME_NONE
2271 || next_time > mq->high_time);
2273 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
2274 next_time > sq->group_high_time;
2277 should_wait = newid > mq->highid;
2279 while (should_wait && sq->srcresult == GST_FLOW_NOT_LINKED) {
2281 GST_DEBUG_OBJECT (mq,
2282 "queue %d sleeping for not-linked wakeup with "
2283 "newid %u, highid %u, next_time %" GST_STIME_FORMAT
2284 ", high_time %" GST_STIME_FORMAT, sq->id, newid, mq->highid,
2285 GST_STIME_ARGS (next_time), GST_STIME_ARGS (sq->group_high_time));
2287 /* Wake up all non-linked pads before we sleep */
2288 wake_up_next_non_linked (mq);
2291 g_cond_wait (&sq->turn, &mq->qlock);
2295 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2299 /* Recompute the high time and ID */
2300 compute_high_time (mq, sq->groupid);
2301 compute_high_id (mq);
2303 GST_DEBUG_OBJECT (mq, "queue %d woken from sleeping for not-linked "
2304 "wakeup with newid %u, highid %u, next_time %" GST_STIME_FORMAT
2305 ", high_time %" GST_STIME_FORMAT " mq high_time %" GST_STIME_FORMAT,
2306 sq->id, newid, mq->highid,
2307 GST_STIME_ARGS (next_time), GST_STIME_ARGS (sq->group_high_time),
2308 GST_STIME_ARGS (mq->high_time));
2310 if (mq->sync_by_running_time) {
2311 if (sq->group_high_time == GST_CLOCK_STIME_NONE) {
2312 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
2313 (mq->high_time == GST_CLOCK_STIME_NONE
2314 || next_time > mq->high_time);
2316 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
2317 next_time > sq->group_high_time;
2320 should_wait = newid > mq->highid;
2323 /* Re-compute the high_id in case someone else pushed */
2324 compute_high_id (mq);
2325 compute_high_time (mq, sq->groupid);
2327 compute_high_id (mq);
2328 compute_high_time (mq, sq->groupid);
2329 /* Wake up all non-linked pads */
2330 wake_up_next_non_linked (mq);
2332 /* We're done waiting, we can clear the nextid and nexttime */
2334 sq->next_time = GST_CLOCK_STIME_NONE;
2336 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2341 GST_LOG_OBJECT (mq, "sq:%d BEFORE PUSHING sq->srcresult: %s", sq->id,
2342 gst_flow_get_name (sq->srcresult));
2344 /* Update time stats */
2345 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2346 next_time = get_running_time (&sq->src_segment, object, TRUE);
2347 if (GST_CLOCK_STIME_IS_VALID (next_time)) {
2348 if (sq->last_time == GST_CLOCK_STIME_NONE || sq->last_time < next_time)
2349 sq->last_time = next_time;
2350 if (mq->high_time == GST_CLOCK_STIME_NONE || mq->high_time <= next_time) {
2351 /* Wake up all non-linked pads now that we advanced the high time */
2352 mq->high_time = next_time;
2353 wake_up_next_non_linked (mq);
2356 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2358 /* Try to push out the new object */
2359 result = gst_single_queue_push_one (mq, sq, object, &dropping);
2362 /* Check if we pushed something already and if this is
2363 * now a switch from an active to a non-active stream.
2365 * If it is, we reset all the waiting streams, let them
2366 * push another buffer to see if they're now active again.
2367 * This allows faster switching between streams and prevents
2368 * deadlocks if downstream does any waiting too.
2370 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2371 if (sq->pushed && sq->srcresult == GST_FLOW_OK
2372 && result == GST_FLOW_NOT_LINKED) {
2375 GST_LOG_OBJECT (mq, "SingleQueue %d : Changed from active to non-active",
2378 compute_high_id (mq);
2379 compute_high_time (mq, sq->groupid);
2380 do_update_buffering = TRUE;
2382 /* maybe no-one is waiting */
2383 if (mq->numwaiting > 0) {
2384 /* Else figure out which singlequeue(s) need waking up */
2385 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
2386 GstSingleQueue *sq2 = (GstSingleQueue *) tmp->data;
2388 if (sq2->srcresult == GST_FLOW_NOT_LINKED) {
2389 GST_LOG_OBJECT (mq, "Waking up singlequeue %d", sq2->id);
2390 sq2->pushed = FALSE;
2391 sq2->srcresult = GST_FLOW_OK;
2392 g_cond_signal (&sq2->turn);
2401 /* now hold on a bit;
2402 * can not simply throw this result to upstream, because
2403 * that might already be onto another segment, so we have to make
2404 * sure we are relaying the correct info wrt proper segment */
2405 if (result == GST_FLOW_EOS && !dropping &&
2406 sq->srcresult != GST_FLOW_NOT_LINKED) {
2407 GST_DEBUG_OBJECT (mq, "starting EOS drop on sq %d", sq->id);
2409 /* pretend we have not seen EOS yet for upstream's sake */
2410 result = sq->srcresult;
2411 } else if (dropping && gst_data_queue_is_empty (sq->queue)) {
2412 /* queue empty, so stop dropping
2413 * we can commit the result we have now,
2414 * which is either OK after a segment, or EOS */
2415 GST_DEBUG_OBJECT (mq, "committed EOS drop on sq %d", sq->id);
2417 result = GST_FLOW_EOS;
2419 sq->srcresult = result;
2420 sq->last_oldid = newid;
2422 if (do_update_buffering)
2423 update_buffering (mq, sq);
2425 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2426 gst_multi_queue_post_buffering (mq);
2428 GST_LOG_OBJECT (mq, "sq:%d AFTER PUSHING sq->srcresult: %s (is_eos:%d)",
2429 sq->id, gst_flow_get_name (sq->srcresult), GST_PAD_IS_EOS (srcpad));
2431 /* Need to make sure wake up any sleeping pads when we exit */
2432 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2433 if (mq->numwaiting > 0 && (GST_PAD_IS_EOS (srcpad)
2434 || sq->srcresult == GST_FLOW_EOS)) {
2435 compute_high_time (mq, sq->groupid);
2436 compute_high_id (mq);
2437 wake_up_next_non_linked (mq);
2439 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2444 if (result != GST_FLOW_OK && result != GST_FLOW_NOT_LINKED
2445 && result != GST_FLOW_EOS)
2449 gst_clear_object (&mq);
2450 gst_clear_object (&srcpad);
2456 if (object && !is_query)
2457 gst_mini_object_unref (object);
2459 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2460 sq->last_query = FALSE;
2461 g_cond_signal (&sq->query_handled);
2463 /* Post an error message if we got EOS while downstream
2464 * has returned an error flow return. After EOS there
2465 * will be no further buffer which could propagate the
2467 if ((sq->is_eos || sq->is_segment_done) && sq->srcresult < GST_FLOW_EOS) {
2468 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2469 GST_ELEMENT_FLOW_ERROR (mq, sq->srcresult);
2471 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2474 /* upstream needs to see fatal result ASAP to shut things down,
2475 * but might be stuck in one of our other full queues;
2476 * so empty this one and trigger dynamic queue growth. At
2477 * this point the srcresult is not OK, NOT_LINKED
2478 * or EOS, i.e. a real failure */
2479 gst_single_queue_flush_queue (sq, FALSE);
2480 single_queue_underrun_cb (sq->queue, sq);
2481 gst_data_queue_set_flushing (sq->queue, TRUE);
2482 gst_pad_pause_task (srcpad);
2483 GST_CAT_LOG_OBJECT (multi_queue_debug, mq,
2484 "SingleQueue[%d] task paused, reason:%s",
2485 sq->id, gst_flow_get_name (sq->srcresult));
2491 * gst_multi_queue_chain:
2493 * This is similar to GstQueue's chain function, except:
2494 * _ we don't have leak behaviours,
2495 * _ we push with a unique id (curid)
2497 static GstFlowReturn
2498 gst_multi_queue_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
2502 GstMultiQueueItem *item = NULL;
2504 GstClockTime timestamp, duration;
2506 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2507 mq = g_weak_ref_get (&sq->mqueue);
2512 /* if eos, we are always full, so avoid hanging incoming indefinitely */
2518 /* Get a unique incrementing id */
2519 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2521 timestamp = GST_BUFFER_DTS_OR_PTS (buffer);
2522 duration = GST_BUFFER_DURATION (buffer);
2525 "SingleQueue %d : about to enqueue buffer %p with id %d (pts:%"
2526 GST_TIME_FORMAT " dts:%" GST_TIME_FORMAT " dur:%" GST_TIME_FORMAT ")",
2527 sq->id, buffer, curid, GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
2528 GST_TIME_ARGS (GST_BUFFER_DTS (buffer)), GST_TIME_ARGS (duration));
2530 item = gst_multi_queue_buffer_item_new (GST_MINI_OBJECT_CAST (buffer), curid);
2532 /* Update interleave before pushing data into queue */
2533 if (mq->use_interleave) {
2534 GstClockTime val = timestamp;
2535 GstClockTimeDiff dval;
2537 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2538 if (val == GST_CLOCK_TIME_NONE)
2539 val = sq->sink_segment.position;
2540 if (duration != GST_CLOCK_TIME_NONE)
2543 dval = my_segment_to_running_time (&sq->sink_segment, val);
2544 if (GST_CLOCK_STIME_IS_VALID (dval)) {
2545 sq->cached_sinktime = dval;
2546 GST_DEBUG_OBJECT (mq,
2547 "Queue %d cached sink time now %" G_GINT64_FORMAT " %"
2548 GST_STIME_FORMAT, sq->id, sq->cached_sinktime,
2549 GST_STIME_ARGS (sq->cached_sinktime));
2550 calculate_interleave (mq, sq);
2552 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2555 if (!(gst_data_queue_push (sq->queue, (GstDataQueueItem *) item)))
2558 /* update time level, we must do this after pushing the data in the queue so
2559 * that we never end up filling the queue first. */
2560 apply_buffer (mq, sq, timestamp, duration, &sq->sink_segment);
2563 gst_clear_object (&mq);
2564 return sq->srcresult;
2569 GST_LOG_OBJECT (mq, "SingleQueue %d : exit because task paused, reason: %s",
2570 sq->id, gst_flow_get_name (sq->srcresult));
2572 gst_multi_queue_item_destroy (item);
2577 GST_DEBUG_OBJECT (mq, "we are EOS, dropping buffer, return EOS");
2578 gst_buffer_unref (buffer);
2579 gst_object_unref (mq);
2580 return GST_FLOW_EOS;
2585 gst_multi_queue_sink_activate_mode (GstPad * pad, GstObject * parent,
2586 GstPadMode mode, gboolean active)
2592 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2593 mq = (GstMultiQueue *) gst_pad_get_parent (pad);
2595 /* mq is NULL if the pad is activated/deactivated before being
2596 * added to the multiqueue */
2598 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2601 case GST_PAD_MODE_PUSH:
2603 /* All pads start off linked until they push one buffer */
2604 sq->srcresult = GST_FLOW_OK;
2606 gst_data_queue_set_flushing (sq->queue, FALSE);
2608 sq->srcresult = GST_FLOW_FLUSHING;
2609 sq->last_query = FALSE;
2610 g_cond_signal (&sq->query_handled);
2611 gst_data_queue_set_flushing (sq->queue, TRUE);
2613 /* Wait until streaming thread has finished */
2615 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2616 GST_PAD_STREAM_LOCK (pad);
2618 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2619 gst_data_queue_flush (sq->queue);
2621 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2622 GST_PAD_STREAM_UNLOCK (pad);
2624 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2634 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2635 gst_object_unref (mq);
2641 static GstFlowReturn
2642 gst_multi_queue_sink_event (GstPad * pad, GstObject * parent, GstEvent * event)
2647 GstMultiQueueItem *item;
2648 gboolean res = TRUE;
2649 GstFlowReturn flowret = GST_FLOW_OK;
2651 GstEvent *sref = NULL;
2655 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2656 mq = (GstMultiQueue *) parent;
2657 srcpad = g_weak_ref_get (&sq->srcpad);
2660 GST_INFO_OBJECT (pad, "Pushing while corresponding sourcepad has been"
2661 " removed already");
2663 return GST_FLOW_FLUSHING;
2666 type = GST_EVENT_TYPE (event);
2669 case GST_EVENT_STREAM_START:
2672 if (gst_event_parse_group_id (event, &group_id)) {
2673 if (sq->sink_stream_gid == GST_GROUP_ID_INVALID) {
2674 sq->sink_stream_gid = group_id;
2675 } else if (group_id != sq->sink_stream_gid) {
2676 sq->sink_stream_gid = group_id;
2677 sq->sink_stream_gid_changed = TRUE;
2680 if (mq->sync_by_running_time) {
2681 GstStreamFlags stream_flags;
2682 gst_event_parse_stream_flags (event, &stream_flags);
2683 if ((stream_flags & GST_STREAM_FLAG_SPARSE)) {
2684 GST_INFO_OBJECT (mq, "SingleQueue %d is a sparse stream", sq->id);
2685 sq->is_sparse = TRUE;
2689 sq->thread = g_thread_self ();
2691 /* Remove EOS flag */
2695 case GST_EVENT_FLUSH_START:
2696 GST_DEBUG_OBJECT (mq, "SingleQueue %d : received flush start event",
2699 res = gst_pad_push_event (srcpad, event);
2701 gst_single_queue_flush (mq, sq, TRUE, FALSE);
2702 gst_single_queue_pause (mq, sq);
2705 case GST_EVENT_FLUSH_STOP:
2706 GST_DEBUG_OBJECT (mq, "SingleQueue %d : received flush stop event",
2709 res = gst_pad_push_event (srcpad, event);
2711 gst_single_queue_flush (mq, sq, FALSE, FALSE);
2712 gst_single_queue_start (mq, sq);
2715 case GST_EVENT_SEGMENT:
2716 sq->is_segment_done = FALSE;
2717 sref = gst_event_ref (event);
2720 /* take ref because the queue will take ownership and we need the event
2721 * afterwards to update the segment */
2722 sref = gst_event_ref (event);
2723 if (mq->use_interleave) {
2724 GstClockTime val, dur;
2726 gst_event_parse_gap (event, &val, &dur);
2727 if (GST_CLOCK_TIME_IS_VALID (val)) {
2728 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2729 if (GST_CLOCK_TIME_IS_VALID (dur))
2731 stime = my_segment_to_running_time (&sq->sink_segment, val);
2732 if (GST_CLOCK_STIME_IS_VALID (stime)) {
2733 sq->cached_sinktime = stime;
2734 calculate_interleave (mq, sq);
2736 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2742 if (!(GST_EVENT_IS_SERIALIZED (event))) {
2743 res = gst_pad_push_event (srcpad, event);
2749 /* if eos, we are always full, so avoid hanging incoming indefinitely */
2753 /* Get an unique incrementing id. */
2754 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2756 item = gst_multi_queue_mo_item_new ((GstMiniObject *) event, curid);
2758 GST_DEBUG_OBJECT (mq,
2759 "SingleQueue %d : Enqueuing event %p of type %s with id %d",
2760 sq->id, event, GST_EVENT_TYPE_NAME (event), curid);
2762 if (!gst_data_queue_push (sq->queue, (GstDataQueueItem *) item))
2765 /* mark EOS when we received one, we must do that after putting the
2766 * buffer in the queue because EOS marks the buffer as filled. */
2768 case GST_EVENT_SEGMENT_DONE:
2769 sq->is_segment_done = TRUE;
2770 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2771 update_buffering (mq, sq);
2772 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2773 single_queue_overrun_cb (sq->queue, sq);
2774 gst_multi_queue_post_buffering (mq);
2777 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2780 /* Post an error message if we got EOS while downstream
2781 * has returned an error flow return. After EOS there
2782 * will be no further buffer which could propagate the
2784 if (sq->srcresult < GST_FLOW_EOS) {
2785 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2786 GST_ELEMENT_FLOW_ERROR (mq, sq->srcresult);
2788 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2791 /* EOS affects the buffering state */
2792 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2793 update_buffering (mq, sq);
2794 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2795 single_queue_overrun_cb (sq->queue, sq);
2796 gst_multi_queue_post_buffering (mq);
2798 case GST_EVENT_SEGMENT:
2799 apply_segment (mq, sq, sref, &sq->sink_segment);
2800 gst_event_unref (sref);
2801 /* a new segment allows us to accept more buffers if we got EOS
2802 * from downstream */
2803 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2804 if (sq->srcresult == GST_FLOW_EOS)
2805 sq->srcresult = GST_FLOW_OK;
2806 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2810 apply_gap (mq, sq, sref, &sq->sink_segment);
2811 gst_event_unref (sref);
2818 gst_object_unref (srcpad);
2820 flowret = GST_FLOW_ERROR;
2821 GST_DEBUG_OBJECT (mq, "SingleQueue %d : returning %s", sq->id,
2822 gst_flow_get_name (flowret));
2827 gst_object_unref (srcpad);
2828 GST_LOG_OBJECT (mq, "SingleQueue %d : exit because task paused, reason: %s",
2829 sq->id, gst_flow_get_name (sq->srcresult));
2831 gst_event_unref (sref);
2832 gst_multi_queue_item_destroy (item);
2833 return sq->srcresult;
2837 gst_object_unref (srcpad);
2838 GST_DEBUG_OBJECT (mq, "we are EOS, dropping event, return GST_FLOW_EOS");
2839 gst_event_unref (event);
2840 return GST_FLOW_EOS;
2845 gst_multi_queue_sink_query (GstPad * pad, GstObject * parent, GstQuery * query)
2851 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2852 mq = (GstMultiQueue *) parent;
2854 switch (GST_QUERY_TYPE (query)) {
2856 if (GST_QUERY_IS_SERIALIZED (query)) {
2858 GstMultiQueueItem *item;
2860 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2861 if (sq->srcresult != GST_FLOW_OK)
2864 /* serialized events go in the queue. We need to be certain that we
2865 * don't cause deadlocks waiting for the query return value. We check if
2866 * the queue is empty (nothing is blocking downstream and the query can
2867 * be pushed for sure) or we are not buffering. If we are buffering,
2868 * the pipeline waits to unblock downstream until our queue fills up
2869 * completely, which can not happen if we block on the query..
2870 * Therefore we only potentially block when we are not buffering. */
2871 if (!mq->use_buffering || gst_data_queue_is_empty (sq->queue)) {
2872 /* Get an unique incrementing id. */
2873 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2875 item = gst_multi_queue_mo_item_new ((GstMiniObject *) query, curid);
2877 GST_DEBUG_OBJECT (mq,
2878 "SingleQueue %d : Enqueuing query %p of type %s with id %d",
2879 sq->id, query, GST_QUERY_TYPE_NAME (query), curid);
2880 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2881 res = gst_data_queue_push (sq->queue, (GstDataQueueItem *) item);
2882 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2883 if (!res || sq->flushing)
2885 /* it might be that the query has been taken out of the queue
2886 * while we were unlocked. So, we need to check if the last
2887 * handled query is the same one than the one we just
2888 * pushed. If it is, we don't need to wait for the condition
2889 * variable, otherwise we wait for the condition variable to
2891 while (!sq->flushing && sq->srcresult == GST_FLOW_OK
2892 && sq->last_handled_query != query)
2893 g_cond_wait (&sq->query_handled, &mq->qlock);
2894 res = sq->last_query;
2895 sq->last_handled_query = NULL;
2897 GST_DEBUG_OBJECT (mq, "refusing query, we are buffering and the "
2898 "queue is not empty");
2901 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2903 /* default handling */
2904 res = gst_pad_query_default (pad, parent, query);
2912 GST_DEBUG_OBJECT (mq, "Flushing");
2913 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2919 gst_multi_queue_src_activate_mode (GstPad * pad, GstObject * parent,
2920 GstPadMode mode, gboolean active)
2926 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2927 mq = g_weak_ref_get (&sq->mqueue);
2930 GST_ERROR_OBJECT (pad, "No multique set anymore, can't activate pad");
2935 GST_DEBUG_OBJECT (mq, "SingleQueue %d", sq->id);
2938 case GST_PAD_MODE_PUSH:
2940 gst_single_queue_flush (mq, sq, FALSE, TRUE);
2941 result = parent ? gst_single_queue_start (mq, sq) : TRUE;
2943 gst_single_queue_flush (mq, sq, TRUE, TRUE);
2944 result = gst_single_queue_stop (mq, sq);
2951 gst_object_unref (mq);
2956 gst_multi_queue_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
2958 GstSingleQueue *sq = GST_MULTIQUEUE_PAD (pad)->sq;
2959 GstMultiQueue *mq = g_weak_ref_get (&sq->mqueue);
2961 GstPad *sinkpad = g_weak_ref_get (&sq->sinkpad);
2963 if (!mq || !sinkpad) {
2964 gst_clear_object (&sinkpad);
2965 gst_clear_object (&mq);
2966 GST_INFO_OBJECT (pad, "No multique/sinkpad set anymore, flushing");
2971 switch (GST_EVENT_TYPE (event)) {
2972 case GST_EVENT_RECONFIGURE:
2973 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2974 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2975 sq->srcresult = GST_FLOW_OK;
2976 g_cond_signal (&sq->turn);
2978 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2980 ret = gst_pad_push_event (sinkpad, event);
2983 ret = gst_pad_push_event (sinkpad, event);
2987 gst_object_unref (sinkpad);
2988 gst_object_unref (mq);
2994 gst_multi_queue_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
2998 /* FIXME, Handle position offset depending on queue size */
2999 switch (GST_QUERY_TYPE (query)) {
3001 /* default handling */
3002 res = gst_pad_query_default (pad, parent, query);
3009 * Next-non-linked functions
3012 /* WITH LOCK TAKEN */
3014 wake_up_next_non_linked (GstMultiQueue * mq)
3018 /* maybe no-one is waiting */
3019 if (mq->numwaiting < 1)
3022 if (mq->sync_by_running_time && GST_CLOCK_STIME_IS_VALID (mq->high_time)) {
3023 /* Else figure out which singlequeue(s) need waking up */
3024 for (tmp = mq->queues; tmp; tmp = tmp->next) {
3025 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
3026 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
3027 GstClockTimeDiff high_time;
3029 if (GST_CLOCK_STIME_IS_VALID (sq->group_high_time))
3030 high_time = sq->group_high_time;
3032 high_time = mq->high_time;
3034 if (GST_CLOCK_STIME_IS_VALID (sq->next_time) &&
3035 GST_CLOCK_STIME_IS_VALID (high_time)
3036 && sq->next_time <= high_time) {
3037 GST_LOG_OBJECT (mq, "Waking up singlequeue %d", sq->id);
3038 g_cond_signal (&sq->turn);
3043 /* Else figure out which singlequeue(s) need waking up */
3044 for (tmp = mq->queues; tmp; tmp = tmp->next) {
3045 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
3046 if (sq->srcresult == GST_FLOW_NOT_LINKED &&
3047 sq->nextid != 0 && sq->nextid <= mq->highid) {
3048 GST_LOG_OBJECT (mq, "Waking up singlequeue %d", sq->id);
3049 g_cond_signal (&sq->turn);
3055 /* WITH LOCK TAKEN */
3057 compute_high_id (GstMultiQueue * mq)
3059 /* The high-id is either the highest id among the linked pads, or if all
3060 * pads are not-linked, it's the lowest not-linked pad */
3062 guint32 lowest = G_MAXUINT32;
3063 guint32 highid = G_MAXUINT32;
3065 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
3066 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
3067 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
3070 GST_INFO_OBJECT (mq,
3071 "srcpad has been removed already... ignoring single queue");
3076 GST_LOG_OBJECT (mq, "inspecting sq:%d , nextid:%d, oldid:%d, srcresult:%s",
3077 sq->id, sq->nextid, sq->oldid, gst_flow_get_name (sq->srcresult));
3079 /* No need to consider queues which are not waiting */
3080 if (sq->nextid == 0) {
3081 GST_LOG_OBJECT (mq, "sq:%d is not waiting - ignoring", sq->id);
3082 gst_object_unref (srcpad);
3086 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
3087 if (sq->nextid < lowest)
3088 lowest = sq->nextid;
3089 } else if (!GST_PAD_IS_EOS (srcpad) && sq->srcresult != GST_FLOW_EOS) {
3090 /* If we don't have a global highid, or the global highid is lower than
3091 * this single queue's last outputted id, store the queue's one,
3092 * unless the singlequeue output is at EOS */
3093 if ((highid == G_MAXUINT32) || (sq->oldid > highid))
3096 gst_object_unref (srcpad);
3099 if (highid == G_MAXUINT32 || lowest < highid)
3100 mq->highid = lowest;
3102 mq->highid = highid;
3104 GST_LOG_OBJECT (mq, "Highid is now : %u, lowest non-linked %u", mq->highid,
3108 /* WITH LOCK TAKEN */
3110 compute_high_time (GstMultiQueue * mq, guint groupid)
3112 /* The high-time is either the highest last time among the linked
3113 * pads, or if all pads are not-linked, it's the lowest nex time of
3116 GstClockTimeDiff highest = GST_CLOCK_STIME_NONE;
3117 GstClockTimeDiff lowest = GST_CLOCK_STIME_NONE;
3118 GstClockTimeDiff group_high = GST_CLOCK_STIME_NONE;
3119 GstClockTimeDiff group_low = GST_CLOCK_STIME_NONE;
3120 GstClockTimeDiff res;
3121 /* Number of streams which belong to groupid */
3122 guint group_count = 0;
3124 if (!mq->sync_by_running_time)
3125 /* return GST_CLOCK_STIME_NONE; */
3128 for (tmp = mq->queues; tmp; tmp = tmp->next) {
3129 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
3130 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
3133 GST_INFO_OBJECT (mq,
3134 "srcpad has been removed already... ignoring single queue");
3140 "inspecting sq:%d (group:%d) , next_time:%" GST_STIME_FORMAT
3141 ", last_time:%" GST_STIME_FORMAT ", srcresult:%s", sq->id, sq->groupid,
3142 GST_STIME_ARGS (sq->next_time), GST_STIME_ARGS (sq->last_time),
3143 gst_flow_get_name (sq->srcresult));
3145 if (sq->groupid == groupid)
3148 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
3149 /* No need to consider queues which are not waiting */
3150 if (!GST_CLOCK_STIME_IS_VALID (sq->next_time)) {
3151 GST_LOG_OBJECT (mq, "sq:%d is not waiting - ignoring", sq->id);
3152 gst_object_unref (srcpad);
3156 if (lowest == GST_CLOCK_STIME_NONE || sq->next_time < lowest)
3157 lowest = sq->next_time;
3158 if (sq->groupid == groupid && (group_low == GST_CLOCK_STIME_NONE
3159 || sq->next_time < group_low))
3160 group_low = sq->next_time;
3161 } else if (!GST_PAD_IS_EOS (srcpad) && sq->srcresult != GST_FLOW_EOS) {
3162 /* If we don't have a global high time, or the global high time
3163 * is lower than this single queue's last outputted time, store
3164 * the queue's one, unless the singlequeue output is at EOS. */
3165 if (highest == GST_CLOCK_STIME_NONE
3166 || (sq->last_time != GST_CLOCK_STIME_NONE && sq->last_time > highest))
3167 highest = sq->last_time;
3168 if (sq->groupid == groupid && (group_high == GST_CLOCK_STIME_NONE
3169 || (sq->last_time != GST_CLOCK_STIME_NONE
3170 && sq->last_time > group_high)))
3171 group_high = sq->last_time;
3174 "highest now %" GST_STIME_FORMAT " lowest %" GST_STIME_FORMAT,
3175 GST_STIME_ARGS (highest), GST_STIME_ARGS (lowest));
3176 if (sq->groupid == groupid)
3178 "grouphigh %" GST_STIME_FORMAT " grouplow %" GST_STIME_FORMAT,
3179 GST_STIME_ARGS (group_high), GST_STIME_ARGS (group_low));
3181 gst_object_unref (srcpad);
3184 if (highest == GST_CLOCK_STIME_NONE)
3185 mq->high_time = lowest;
3187 mq->high_time = highest;
3189 /* If there's only one stream of a given type, use the global high */
3190 if (group_count < 2)
3191 res = GST_CLOCK_STIME_NONE;
3192 else if (group_high == GST_CLOCK_STIME_NONE)
3197 GST_LOG_OBJECT (mq, "group count %d for groupid %u", group_count, groupid);
3199 "MQ High time is now : %" GST_STIME_FORMAT ", group %d high time %"
3200 GST_STIME_FORMAT ", lowest non-linked %" GST_STIME_FORMAT,
3201 GST_STIME_ARGS (mq->high_time), groupid, GST_STIME_ARGS (mq->high_time),
3202 GST_STIME_ARGS (lowest));
3204 for (tmp = mq->queues; tmp; tmp = tmp->next) {
3205 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
3206 if (groupid == sq->groupid)
3207 sq->group_high_time = res;
3211 #define IS_FILLED(q, format, value) (((q)->max_size.format) != 0 && \
3212 ((q)->max_size.format) <= (value))
3215 * GstSingleQueue functions
3218 single_queue_overrun_cb (GstDataQueue * dq, GstSingleQueue * sq)
3221 GstDataQueueSize size;
3222 gboolean filled = TRUE;
3223 gboolean empty_found = FALSE;
3224 GstMultiQueue *mq = g_weak_ref_get (&sq->mqueue);
3227 GST_ERROR ("No multique set anymore, not doing anything");
3232 gst_data_queue_get_level (sq->queue, &size);
3235 "Single Queue %d: EOS %d, visible %u/%u, bytes %u/%u, time %"
3236 G_GUINT64_FORMAT "/%" G_GUINT64_FORMAT, sq->id, sq->is_eos, size.visible,
3237 sq->max_size.visible, size.bytes, sq->max_size.bytes, sq->cur_time,
3240 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
3242 /* check if we reached the hard time/bytes limits;
3243 time limit is only taken into account for non-sparse streams */
3244 if (sq->is_eos || IS_FILLED (sq, bytes, size.bytes) ||
3245 (!sq->is_sparse && IS_FILLED (sq, time, sq->cur_time))) {
3249 /* Search for empty queues */
3250 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
3251 GstSingleQueue *oq = (GstSingleQueue *) tmp->data;
3256 if (oq->srcresult == GST_FLOW_NOT_LINKED) {
3257 GST_LOG_OBJECT (mq, "Queue %d is not-linked", oq->id);
3261 GST_LOG_OBJECT (mq, "Checking Queue %d", oq->id);
3262 if (gst_data_queue_is_empty (oq->queue) && !oq->is_sparse) {
3263 GST_LOG_OBJECT (mq, "Queue %d is empty", oq->id);
3269 /* if hard limits are not reached then we allow one more buffer in the full
3270 * queue, but only if any of the other singelqueues are empty */
3272 if (IS_FILLED (sq, visible, size.visible)) {
3273 sq->max_size.visible = size.visible + 1;
3274 GST_DEBUG_OBJECT (mq,
3275 "Bumping single queue %d max visible to %d",
3276 sq->id, sq->max_size.visible);
3282 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
3283 gst_object_unref (mq);
3285 /* Overrun is always forwarded, since this is blocking the upstream element */
3287 GST_DEBUG_OBJECT (mq, "Queue %d is filled, signalling overrun", sq->id);
3288 g_signal_emit (mq, gst_multi_queue_signals[SIGNAL_OVERRUN], 0);
3293 single_queue_underrun_cb (GstDataQueue * dq, GstSingleQueue * sq)
3295 gboolean empty = TRUE;
3296 GstMultiQueue *mq = g_weak_ref_get (&sq->mqueue);
3300 GST_ERROR ("No multique set anymore, not doing anything");
3305 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
3306 GST_LOG_OBJECT (mq, "Single Queue %d is empty but not-linked", sq->id);
3307 gst_object_unref (mq);
3311 "Single Queue %d is empty, Checking other single queues", sq->id);
3314 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
3315 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
3316 GstSingleQueue *oq = (GstSingleQueue *) tmp->data;
3318 if (gst_data_queue_is_full (oq->queue)) {
3319 GstDataQueueSize size;
3321 gst_data_queue_get_level (oq->queue, &size);
3322 if (IS_FILLED (oq, visible, size.visible)) {
3323 oq->max_size.visible = size.visible + 1;
3324 GST_DEBUG_OBJECT (mq,
3325 "queue %d is filled, bumping its max visible to %d", oq->id,
3326 oq->max_size.visible);
3327 gst_data_queue_limits_changed (oq->queue);
3330 if (!gst_data_queue_is_empty (oq->queue) || oq->is_sparse)
3333 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
3334 gst_object_unref (mq);
3337 GST_DEBUG_OBJECT (mq, "All queues are empty, signalling it");
3338 g_signal_emit (mq, gst_multi_queue_signals[SIGNAL_UNDERRUN], 0);
3343 single_queue_check_full (GstDataQueue * dataq, guint visible, guint bytes,
3344 guint64 time, GstSingleQueue * sq)
3347 GstMultiQueue *mq = g_weak_ref_get (&sq->mqueue);
3350 GST_ERROR ("No multique set anymore, let's say we are full");
3355 GST_DEBUG_OBJECT (mq,
3356 "queue %d: visible %u/%u, bytes %u/%u, time %" G_GUINT64_FORMAT "/%"
3357 G_GUINT64_FORMAT, sq->id, visible, sq->max_size.visible, bytes,
3358 sq->max_size.bytes, sq->cur_time, sq->max_size.time);
3360 /* we are always filled on EOS */
3361 if (sq->is_eos || sq->is_segment_done) {
3366 /* we never go past the max visible items unless we are in buffering mode */
3367 if (!mq->use_buffering && IS_FILLED (sq, visible, visible)) {
3372 /* check time or bytes */
3373 res = IS_FILLED (sq, bytes, bytes);
3374 /* We only care about limits in time if we're not a sparse stream or
3375 * we're not syncing by running time */
3376 if (!sq->is_sparse || !mq->sync_by_running_time) {
3377 /* If unlinked, take into account the extra unlinked cache time */
3378 if (mq->sync_by_running_time && sq->srcresult == GST_FLOW_NOT_LINKED) {
3379 if (sq->cur_time > mq->unlinked_cache_time)
3380 res |= IS_FILLED (sq, time, sq->cur_time - mq->unlinked_cache_time);
3384 res |= IS_FILLED (sq, time, sq->cur_time);
3387 gst_object_unref (mq);
3393 gst_single_queue_flush_queue (GstSingleQueue * sq, gboolean full)
3395 GstDataQueueItem *sitem;
3396 GstMultiQueueItem *mitem;
3397 gboolean was_flushing = FALSE;
3398 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
3399 GstMultiQueue *mq = g_weak_ref_get (&sq->mqueue);
3401 while (!gst_data_queue_is_empty (sq->queue)) {
3402 GstMiniObject *data;
3404 /* FIXME: If this fails here although the queue is not empty,
3405 * we're flushing... but we want to rescue all sticky
3406 * events nonetheless.
3408 if (!gst_data_queue_pop (sq->queue, &sitem)) {
3409 was_flushing = TRUE;
3410 gst_data_queue_set_flushing (sq->queue, FALSE);
3414 mitem = (GstMultiQueueItem *) sitem;
3416 data = sitem->object;
3418 if (!full && !mitem->is_query && GST_IS_EVENT (data)
3419 && srcpad && GST_EVENT_IS_STICKY (data)
3420 && GST_EVENT_TYPE (data) != GST_EVENT_SEGMENT
3421 && GST_EVENT_TYPE (data) != GST_EVENT_EOS) {
3422 gst_pad_store_sticky_event (srcpad, GST_EVENT_CAST (data));
3425 sitem->destroy (sitem);
3427 gst_clear_object (&srcpad);
3429 gst_data_queue_flush (sq->queue);
3431 gst_data_queue_set_flushing (sq->queue, TRUE);
3434 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
3435 update_buffering (mq, sq);
3436 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
3437 gst_multi_queue_post_buffering (mq);
3438 gst_object_unref (mq);
3443 gst_single_queue_unref (GstSingleQueue * sq)
3445 if (g_atomic_int_dec_and_test (&sq->refcount)) {
3447 gst_data_queue_flush (sq->queue);
3448 g_object_unref (sq->queue);
3449 g_cond_clear (&sq->turn);
3450 g_cond_clear (&sq->query_handled);
3451 g_weak_ref_clear (&sq->sinkpad);
3452 g_weak_ref_clear (&sq->srcpad);
3453 g_weak_ref_clear (&sq->mqueue);
3459 static GstSingleQueue *
3460 gst_single_queue_ref (GstSingleQueue * squeue)
3462 g_atomic_int_inc (&squeue->refcount);
3467 static GstSingleQueue *
3468 gst_single_queue_new (GstMultiQueue * mqueue, guint id)
3470 GstPad *srcpad, *sinkpad;
3472 GstPadTemplate *templ;
3475 guint temp_id = (id == -1) ? 0 : id;
3477 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
3479 /* Find an unused queue ID, if possible the passed one */
3480 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
3481 GstSingleQueue *sq2 = (GstSingleQueue *) tmp->data;
3482 /* This works because the IDs are sorted in ascending order */
3483 if (sq2->id == temp_id) {
3484 /* If this ID was requested by the caller return NULL,
3485 * otherwise just get us the next one */
3487 temp_id = sq2->id + 1;
3489 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
3492 } else if (sq2->id > temp_id) {
3497 sq = g_new0 (GstSingleQueue, 1);
3498 g_atomic_int_set (&sq->refcount, 1);
3502 sq->groupid = DEFAULT_PAD_GROUP_ID;
3503 sq->group_high_time = GST_CLOCK_STIME_NONE;
3505 mqueue->queues = g_list_insert_before (mqueue->queues, tmp, sq);
3506 mqueue->queues_cookie++;
3508 /* copy over max_size and extra_size so we don't need to take the lock
3509 * any longer when checking if the queue is full. */
3510 sq->max_size.visible = mqueue->max_size.visible;
3511 sq->max_size.bytes = mqueue->max_size.bytes;
3512 sq->max_size.time = mqueue->max_size.time;
3514 sq->extra_size.visible = mqueue->extra_size.visible;
3515 sq->extra_size.bytes = mqueue->extra_size.bytes;
3516 sq->extra_size.time = mqueue->extra_size.time;
3518 GST_DEBUG_OBJECT (mqueue, "Creating GstSingleQueue id:%d", sq->id);
3520 g_weak_ref_init (&sq->mqueue, mqueue);
3521 sq->srcresult = GST_FLOW_FLUSHING;
3523 sq->queue = gst_data_queue_new ((GstDataQueueCheckFullFunction)
3524 single_queue_check_full,
3525 (GstDataQueueFullCallback) single_queue_overrun_cb,
3526 (GstDataQueueEmptyCallback) single_queue_underrun_cb, sq);
3528 sq->is_sparse = FALSE;
3529 sq->flushing = FALSE;
3531 gst_segment_init (&sq->sink_segment, GST_FORMAT_TIME);
3532 gst_segment_init (&sq->src_segment, GST_FORMAT_TIME);
3536 sq->next_time = GST_CLOCK_STIME_NONE;
3537 sq->last_time = GST_CLOCK_STIME_NONE;
3538 g_cond_init (&sq->turn);
3539 g_cond_init (&sq->query_handled);
3541 sq->sinktime = GST_CLOCK_STIME_NONE;
3542 sq->srctime = GST_CLOCK_STIME_NONE;
3543 sq->sink_tainted = TRUE;
3544 sq->src_tainted = TRUE;
3546 sq->sink_stream_gid = sq->src_stream_gid = GST_GROUP_ID_INVALID;
3547 sq->sink_stream_gid_changed = FALSE;
3548 sq->src_stream_gid_changed = FALSE;
3550 name = g_strdup_printf ("sink_%u", sq->id);
3551 templ = gst_static_pad_template_get (&sinktemplate);
3552 sinkpad = g_object_new (GST_TYPE_MULTIQUEUE_PAD, "name", name,
3553 "direction", templ->direction, "template", templ, NULL);
3554 g_weak_ref_init (&sq->sinkpad, sinkpad);
3555 gst_object_unref (templ);
3558 GST_MULTIQUEUE_PAD (sinkpad)->sq = sq;
3560 gst_pad_set_chain_function (sinkpad,
3561 GST_DEBUG_FUNCPTR (gst_multi_queue_chain));
3562 gst_pad_set_activatemode_function (sinkpad,
3563 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_activate_mode));
3564 gst_pad_set_event_full_function (sinkpad,
3565 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_event));
3566 gst_pad_set_query_function (sinkpad,
3567 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_query));
3568 gst_pad_set_iterate_internal_links_function (sinkpad,
3569 GST_DEBUG_FUNCPTR (gst_multi_queue_iterate_internal_links));
3570 GST_OBJECT_FLAG_SET (sinkpad, GST_PAD_FLAG_PROXY_CAPS);
3572 name = g_strdup_printf ("src_%u", sq->id);
3573 templ = gst_static_pad_template_get (&srctemplate);
3574 srcpad = g_object_new (GST_TYPE_MULTIQUEUE_PAD, "name", name,
3575 "direction", templ->direction, "template", templ, NULL);
3576 g_weak_ref_init (&sq->srcpad, srcpad);
3577 gst_object_unref (templ);
3580 GST_MULTIQUEUE_PAD (srcpad)->sq = gst_single_queue_ref (sq);
3582 gst_pad_set_activatemode_function (srcpad,
3583 GST_DEBUG_FUNCPTR (gst_multi_queue_src_activate_mode));
3584 gst_pad_set_event_function (srcpad,
3585 GST_DEBUG_FUNCPTR (gst_multi_queue_src_event));
3586 gst_pad_set_query_function (srcpad,
3587 GST_DEBUG_FUNCPTR (gst_multi_queue_src_query));
3588 gst_pad_set_iterate_internal_links_function (srcpad,
3589 GST_DEBUG_FUNCPTR (gst_multi_queue_iterate_internal_links));
3590 GST_OBJECT_FLAG_SET (srcpad, GST_PAD_FLAG_PROXY_CAPS);
3592 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
3594 /* only activate the pads when we are not in the NULL state
3595 * and add the pad under the state_lock to prevent state changes
3596 * between activating and adding */
3597 g_rec_mutex_lock (GST_STATE_GET_LOCK (mqueue));
3598 if (GST_STATE_TARGET (mqueue) != GST_STATE_NULL) {
3599 gst_pad_set_active (srcpad, TRUE);
3600 gst_pad_set_active (sinkpad, TRUE);
3602 gst_element_add_pad (GST_ELEMENT (mqueue), srcpad);
3603 gst_element_add_pad (GST_ELEMENT (mqueue), sinkpad);
3604 if (GST_STATE_TARGET (mqueue) != GST_STATE_NULL) {
3605 gst_single_queue_start (mqueue, sq);
3607 g_rec_mutex_unlock (GST_STATE_GET_LOCK (mqueue));
3609 GST_DEBUG_OBJECT (mqueue, "GstSingleQueue [%d] created and pads added",