2 * Copyright (C) 2006 Edward Hervey <edward@fluendo.com>
3 * Copyright (C) 2007 Jan Schmidt <jan@fluendo.com>
4 * Copyright (C) 2007 Wim Taymans <wim@fluendo.com>
5 * Copyright (C) 2011 Sebastian Dröge <sebastian.droege@collabora.co.uk>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Library General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Library General Public License for more details.
19 * You should have received a copy of the GNU Library General Public
20 * License along with this library; if not, write to the
21 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
22 * Boston, MA 02110-1301, USA.
26 * SECTION:element-multiqueue
28 * @see_also: #GstQueue
30 * Multiqueue is similar to a normal #GstQueue with the following additional
33 * 1) Multiple streamhandling
35 * * The element handles queueing data on more than one stream at once. To
36 * achieve such a feature it has request sink pads (sink%u) and
37 * 'sometimes' src pads (src%u). When requesting a given sinkpad with gst_element_request_pad(),
38 * the associated srcpad for that stream will be created.
39 * Example: requesting sink1 will generate src1.
41 * 2) Non-starvation on multiple stream
43 * * If more than one stream is used with the element, the streams' queues
44 * will be dynamically grown (up to a limit), in order to ensure that no
45 * stream is risking data starvation. This guarantees that at any given
46 * time there are at least N bytes queued and available for each individual
47 * stream. If an EOS event comes through a srcpad, the associated queue will be
48 * considered as 'not-empty' in the queue-size-growing algorithm.
50 * 3) Non-linked srcpads graceful handling
52 * * In order to better support dynamic switching between streams, the multiqueue
53 * (unlike the current GStreamer queue) continues to push buffers on non-linked
54 * pads rather than shutting down. In addition, to prevent a non-linked stream from very quickly consuming all
55 * available buffers and thus 'racing ahead' of the other streams, the element
56 * must ensure that buffers and inlined events for a non-linked stream are pushed
57 * in the same order as they were received, relative to the other streams
58 * controlled by the element. This means that a buffer cannot be pushed to a
59 * non-linked pad any sooner than buffers in any other stream which were received
62 * Data is queued until one of the limits specified by the
63 * #GstMultiQueue:max-size-buffers, #GstMultiQueue:max-size-bytes and/or
64 * #GstMultiQueue:max-size-time properties has been reached. Any attempt to push
65 * more buffers into the queue will block the pushing thread until more space
66 * becomes available. #GstMultiQueue:extra-size-buffers,
69 * #GstMultiQueue:extra-size-bytes and #GstMultiQueue:extra-size-time are
72 * The default queue size limits are 5 buffers, 10MB of data, or
73 * two second worth of data, whichever is reached first. Note that the number
74 * of buffers will dynamically grow depending on the fill level of
77 * The #GstMultiQueue::underrun signal is emitted when all of the queues
78 * are empty. The #GstMultiQueue::overrun signal is emitted when one of the
80 * Both signals are emitted from the context of the streaming thread.
82 * When using #GstMultiQueue:sync-by-running-time the unlinked streams will
83 * be throttled by the highest running-time of linked streams. This allows
84 * further relinking of those unlinked streams without them being in the
85 * future (i.e. to achieve gapless playback).
86 * When dealing with streams which have got different consumption requirements
87 * downstream (ex: video decoders which will consume more buffer (in time) than
88 * audio decoders), it is recommended to group streams of the same type
89 * by using the pad "group-id" property. This will further throttle streams
90 * in time within that group.
98 #include <gst/glib-compat-private.h>
101 #include "gstmultiqueue.h"
102 #include "gstcoreelementselements.h"
105 * @sinkpad: associated sink #GstPad
106 * @srcpad: associated source #GstPad
108 * Structure containing all information and properties about
111 typedef struct _GstSingleQueue GstSingleQueue;
113 struct _GstSingleQueue
117 /* unique identifier of the queue */
119 /* group of streams to which this queue belongs to */
121 GstClockTimeDiff group_high_time;
127 /* flowreturn of previous srcpad push */
128 GstFlowReturn srcresult;
129 /* If something was actually pushed on
130 * this pad after flushing/pad activation
131 * and the srcresult corresponds to something
137 GstSegment sink_segment;
138 GstSegment src_segment;
139 gboolean has_src_segment; /* preferred over initializing the src_segment to
140 * UNDEFINED as this doesn't requires adding ifs
141 * in every segment usage */
143 /* position of src/sink */
144 GstClockTimeDiff sinktime, srctime;
145 /* cached input value, used for interleave */
146 GstClockTimeDiff cached_sinktime;
147 /* TRUE if either position needs to be recalculated */
148 gboolean sink_tainted, src_tainted;
152 GstDataQueueSize max_size, extra_size;
153 GstClockTime cur_time;
155 gboolean is_segment_done;
160 /* Protected by global lock */
161 guint32 nextid; /* ID of the next object waiting to be pushed */
162 guint32 oldid; /* ID of the last object pushed (last in a series) */
163 guint32 last_oldid; /* Previously observed old_id, reset to MAXUINT32 on flush */
164 GstClockTimeDiff next_time; /* End running time of next buffer to be pushed */
165 GstClockTimeDiff last_time; /* Start running time of last pushed buffer */
166 GCond turn; /* SingleQueue turn waiting conditional */
168 /* for serialized queries */
171 GstQuery *last_handled_query;
173 /* For interleave calculation */
174 GThread *thread; /* Streaming thread of SingleQueue */
175 GstClockTime interleave; /* Calculated interleve within the thread */
178 /* Extension of GstDataQueueItem structure for our usage */
179 typedef struct _GstMultiQueueItem GstMultiQueueItem;
181 struct _GstMultiQueueItem
183 GstMiniObject *object;
188 GDestroyNotify destroy;
194 static GstSingleQueue *gst_single_queue_new (GstMultiQueue * mqueue, guint id);
195 static void gst_single_queue_unref (GstSingleQueue * squeue);
196 static GstSingleQueue *gst_single_queue_ref (GstSingleQueue * squeue);
198 static void wake_up_next_non_linked (GstMultiQueue * mq);
199 static void compute_high_id (GstMultiQueue * mq);
200 static void compute_high_time (GstMultiQueue * mq, guint groupid);
201 static void single_queue_overrun_cb (GstDataQueue * dq, GstSingleQueue * sq);
202 static void single_queue_underrun_cb (GstDataQueue * dq, GstSingleQueue * sq);
204 static void update_buffering (GstMultiQueue * mq, GstSingleQueue * sq);
205 static void gst_multi_queue_post_buffering (GstMultiQueue * mq);
206 static void recheck_buffering_status (GstMultiQueue * mq);
208 static void gst_single_queue_flush_queue (GstSingleQueue * sq, gboolean full);
210 static void calculate_interleave (GstMultiQueue * mq, GstSingleQueue * sq);
212 static GstStaticPadTemplate sinktemplate = GST_STATIC_PAD_TEMPLATE ("sink_%u",
215 GST_STATIC_CAPS_ANY);
217 static GstStaticPadTemplate srctemplate = GST_STATIC_PAD_TEMPLATE ("src_%u",
220 GST_STATIC_CAPS_ANY);
222 GST_DEBUG_CATEGORY_STATIC (multi_queue_debug);
223 #define GST_CAT_DEFAULT (multi_queue_debug)
225 /* Signals and args */
233 /* default limits, we try to keep up to 2 seconds of data and if there is not
234 * time, up to 10 MB. The number of buffers is dynamically scaled to make sure
235 * there is data in the queues. Normally, the byte and time limits are not hit
236 * in theses conditions. */
237 #define DEFAULT_MAX_SIZE_BYTES 10 * 1024 * 1024 /* 10 MB */
238 #define DEFAULT_MAX_SIZE_BUFFERS 5
239 #define DEFAULT_MAX_SIZE_TIME 2 * GST_SECOND
241 /* second limits. When we hit one of the above limits we are probably dealing
242 * with a badly muxed file and we scale the limits to these emergency values.
243 * This is currently not yet implemented.
244 * Since we dynamically scale the queue buffer size up to the limits but avoid
245 * going above the max-size-buffers when we can, we don't really need this
246 * additional extra size. */
247 #define DEFAULT_EXTRA_SIZE_BYTES 10 * 1024 * 1024 /* 10 MB */
248 #define DEFAULT_EXTRA_SIZE_BUFFERS 5
249 #define DEFAULT_EXTRA_SIZE_TIME 3 * GST_SECOND
251 #define DEFAULT_USE_BUFFERING FALSE
252 #define DEFAULT_LOW_WATERMARK 0.01
253 #define DEFAULT_HIGH_WATERMARK 0.99
254 #define DEFAULT_SYNC_BY_RUNNING_TIME FALSE
255 #define DEFAULT_USE_INTERLEAVE FALSE
256 #define DEFAULT_UNLINKED_CACHE_TIME 250 * GST_MSECOND
258 #define DEFAULT_MINIMUM_INTERLEAVE (250 * GST_MSECOND)
263 PROP_EXTRA_SIZE_BYTES,
264 PROP_EXTRA_SIZE_BUFFERS,
265 PROP_EXTRA_SIZE_TIME,
267 PROP_MAX_SIZE_BUFFERS,
274 PROP_SYNC_BY_RUNNING_TIME,
276 PROP_UNLINKED_CACHE_TIME,
277 PROP_MINIMUM_INTERLEAVE,
282 /* Explanation for buffer levels and percentages:
284 * The buffering_level functions here return a value in a normalized range
285 * that specifies the current fill level of a queue. The range goes from 0 to
286 * MAX_BUFFERING_LEVEL. The low/high watermarks also use this same range.
288 * This is not to be confused with the buffering_percent value, which is
289 * a *relative* quantity - relative to the low/high watermarks.
290 * buffering_percent = 0% means overall buffering_level is at the low watermark.
291 * buffering_percent = 100% means overall buffering_level is at the high watermark.
292 * buffering_percent is used for determining if the fill level has reached
293 * the high watermark, and for producing BUFFERING messages. This value
294 * always uses a 0..100 range (since it is a percentage).
296 * To avoid future confusions, whenever "buffering level" is mentioned, it
297 * refers to the absolute level which is in the 0..MAX_BUFFERING_LEVEL
298 * range. Whenever "buffering_percent" is mentioned, it refers to the
299 * percentage value that is relative to the low/high watermark. */
301 /* Using a buffering level range of 0..1000000 to allow for a
302 * resolution in ppm (1 ppm = 0.0001%) */
303 #define MAX_BUFFERING_LEVEL 1000000
305 /* How much 1% makes up in the buffer level range */
306 #define BUF_LEVEL_PERCENT_FACTOR ((MAX_BUFFERING_LEVEL) / 100)
308 /* GstMultiQueuePad */
310 #define DEFAULT_PAD_GROUP_ID 0
316 PROP_CURRENT_LEVEL_BUFFERS,
317 PROP_CURRENT_LEVEL_BYTES,
318 PROP_CURRENT_LEVEL_TIME,
321 #define GST_TYPE_MULTIQUEUE_PAD (gst_multiqueue_pad_get_type())
322 #define GST_MULTIQUEUE_PAD(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_MULTIQUEUE_PAD,GstMultiQueuePad))
323 #define GST_IS_MULTIQUEUE_PAD(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_MULTIQUEUE_PAD))
324 #define GST_MULTIQUEUE_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass) ,GST_TYPE_MULTIQUEUE_PAD,GstMultiQueuePadClass))
325 #define GST_IS_MULTIQUEUE_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass) ,GST_TYPE_MULTIQUEUE_PAD))
326 #define GST_MULTIQUEUE_PAD_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS((obj) ,GST_TYPE_MULTIQUEUE_PAD,GstMultiQueuePadClass))
328 #define GST_MULTI_QUEUE_MUTEX_LOCK(q) G_STMT_START { \
329 g_mutex_lock (&q->qlock); \
332 #define GST_MULTI_QUEUE_MUTEX_UNLOCK(q) G_STMT_START { \
333 g_mutex_unlock (&q->qlock); \
336 #define SET_PERCENT(mq, perc) G_STMT_START { \
337 if (perc != mq->buffering_percent) { \
338 mq->buffering_percent = perc; \
339 mq->buffering_percent_changed = TRUE; \
340 GST_DEBUG_OBJECT (mq, "buffering %d percent", perc); \
344 struct _GstMultiQueuePad
351 struct _GstMultiQueuePadClass
353 GstPadClass parent_class;
356 GType gst_multiqueue_pad_get_type (void);
358 G_DEFINE_TYPE (GstMultiQueuePad, gst_multiqueue_pad, GST_TYPE_PAD);
361 gst_multiqueue_pad_get_group_id (GstMultiQueuePad * pad)
369 mq = g_weak_ref_get (&pad->sq->mqueue);
372 GST_OBJECT_LOCK (mq);
375 ret = pad->sq->groupid;
378 GST_OBJECT_UNLOCK (mq);
379 gst_object_unref (mq);
386 gst_multiqueue_pad_get_current_level_buffers (GstMultiQueuePad * pad)
388 GstSingleQueue *sq = pad->sq;
389 GstDataQueueSize level;
395 mq = g_weak_ref_get (&pad->sq->mqueue);
398 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
401 gst_data_queue_get_level (sq->queue, &level);
404 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
405 gst_object_unref (mq);
408 return level.visible;
412 gst_multiqueue_pad_get_current_level_bytes (GstMultiQueuePad * pad)
414 GstSingleQueue *sq = pad->sq;
415 GstDataQueueSize level;
421 mq = g_weak_ref_get (&pad->sq->mqueue);
424 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
427 gst_data_queue_get_level (sq->queue, &level);
430 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
431 gst_object_unref (mq);
438 gst_multiqueue_pad_get_current_level_time (GstMultiQueuePad * pad)
440 GstSingleQueue *sq = pad->sq;
447 mq = g_weak_ref_get (&pad->sq->mqueue);
450 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
456 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
457 gst_object_unref (mq);
464 gst_multiqueue_pad_get_property (GObject * object, guint prop_id,
465 GValue * value, GParamSpec * pspec)
467 GstMultiQueuePad *pad = GST_MULTIQUEUE_PAD (object);
470 case PROP_PAD_GROUP_ID:
471 g_value_set_uint (value, gst_multiqueue_pad_get_group_id (pad));
473 case PROP_CURRENT_LEVEL_BUFFERS:{
474 g_value_set_uint (value,
475 gst_multiqueue_pad_get_current_level_buffers (pad));
478 case PROP_CURRENT_LEVEL_BYTES:{
479 g_value_set_uint (value,
480 gst_multiqueue_pad_get_current_level_bytes (pad));
483 case PROP_CURRENT_LEVEL_TIME:{
484 g_value_set_uint64 (value,
485 gst_multiqueue_pad_get_current_level_time (pad));
489 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
495 gst_multiqueue_pad_set_property (GObject * object, guint prop_id,
496 const GValue * value, GParamSpec * pspec)
498 GstMultiQueuePad *pad = GST_MULTIQUEUE_PAD (object);
501 case PROP_PAD_GROUP_ID:
503 GstMultiQueue *mqueue = g_weak_ref_get (&pad->sq->mqueue);
506 GST_OBJECT_LOCK (mqueue);
508 pad->sq->groupid = g_value_get_uint (value);
511 GST_OBJECT_UNLOCK (mqueue);
512 gst_object_unref (mqueue);
517 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
523 gst_multiqueue_pad_finalize (GObject * object)
525 GstMultiQueuePad *pad = GST_MULTIQUEUE_PAD (object);
528 gst_single_queue_unref (pad->sq);
530 G_OBJECT_CLASS (gst_multiqueue_pad_parent_class)->finalize (object);
534 gst_multiqueue_pad_class_init (GstMultiQueuePadClass * klass)
536 GObjectClass *gobject_class = (GObjectClass *) klass;
538 gobject_class->set_property = gst_multiqueue_pad_set_property;
539 gobject_class->get_property = gst_multiqueue_pad_get_property;
540 gobject_class->finalize = gst_multiqueue_pad_finalize;
543 * GstMultiQueuePad:group-id:
545 * Group to which this pad belongs.
549 g_object_class_install_property (gobject_class, PROP_PAD_GROUP_ID,
550 g_param_spec_uint ("group-id", "Group ID",
551 "Group to which this pad belongs", 0, G_MAXUINT32,
552 DEFAULT_PAD_GROUP_ID, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
555 * GstMultiQueuePad:current-level-buffers:
557 * The corresponding queue's current level of buffers.
561 g_object_class_install_property (gobject_class, PROP_CURRENT_LEVEL_BUFFERS,
562 g_param_spec_uint ("current-level-buffers", "Current level buffers",
563 "Current level buffers", 0, G_MAXUINT32,
564 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
567 * GstMultiQueuePad:current-level-bytes:
569 * The corresponding queue's current level of bytes.
573 g_object_class_install_property (gobject_class, PROP_CURRENT_LEVEL_BYTES,
574 g_param_spec_uint ("current-level-bytes", "Current level bytes",
575 "Current level bytes", 0, G_MAXUINT32,
576 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
579 * GstMultiQueuePad:current-level-time:
581 * The corresponding queue's current level of time.
585 g_object_class_install_property (gobject_class, PROP_CURRENT_LEVEL_TIME,
586 g_param_spec_uint64 ("current-level-time", "Current level time",
587 "Current level time", 0, G_MAXUINT64,
588 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
592 gst_multiqueue_pad_init (GstMultiQueuePad * pad)
598 /* Convenience function */
599 static inline GstClockTimeDiff
600 my_segment_to_running_time (GstSegment * segment, GstClockTime val)
602 GstClockTimeDiff res = GST_CLOCK_STIME_NONE;
604 if (GST_CLOCK_TIME_IS_VALID (val)) {
606 gst_segment_to_running_time_full (segment, GST_FORMAT_TIME, val, &val);
615 static void gst_multi_queue_finalize (GObject * object);
616 static void gst_multi_queue_set_property (GObject * object,
617 guint prop_id, const GValue * value, GParamSpec * pspec);
618 static void gst_multi_queue_get_property (GObject * object,
619 guint prop_id, GValue * value, GParamSpec * pspec);
621 static GstPad *gst_multi_queue_request_new_pad (GstElement * element,
622 GstPadTemplate * temp, const gchar * name, const GstCaps * caps);
623 static void gst_multi_queue_release_pad (GstElement * element, GstPad * pad);
624 static GstStateChangeReturn gst_multi_queue_change_state (GstElement *
625 element, GstStateChange transition);
627 static void gst_multi_queue_loop (GstPad * pad);
630 GST_DEBUG_CATEGORY_INIT (multi_queue_debug, "multiqueue", 0, "multiqueue element");
631 #define gst_multi_queue_parent_class parent_class
632 G_DEFINE_TYPE_WITH_CODE (GstMultiQueue, gst_multi_queue, GST_TYPE_ELEMENT,
634 GST_ELEMENT_REGISTER_DEFINE (multiqueue, "multiqueue", GST_RANK_NONE,
635 GST_TYPE_MULTI_QUEUE);
637 static guint gst_multi_queue_signals[LAST_SIGNAL] = { 0 };
640 gst_multi_queue_class_init (GstMultiQueueClass * klass)
642 GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
643 GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
645 gobject_class->set_property = gst_multi_queue_set_property;
646 gobject_class->get_property = gst_multi_queue_get_property;
651 * GstMultiQueue::underrun:
652 * @multiqueue: the multiqueue instance
654 * This signal is emitted from the streaming thread when there is
655 * no data in any of the queues inside the multiqueue instance (underrun).
657 * This indicates either starvation or EOS from the upstream data sources.
659 gst_multi_queue_signals[SIGNAL_UNDERRUN] =
660 g_signal_new ("underrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
661 G_STRUCT_OFFSET (GstMultiQueueClass, underrun), NULL, NULL,
662 NULL, G_TYPE_NONE, 0);
665 * GstMultiQueue::overrun:
666 * @multiqueue: the multiqueue instance
668 * Reports that one of the queues in the multiqueue is full (overrun).
669 * A queue is full if the total amount of data inside it (num-buffers, time,
670 * size) is higher than the boundary values which can be set through the
671 * GObject properties.
673 * This can be used as an indicator of pre-roll.
675 gst_multi_queue_signals[SIGNAL_OVERRUN] =
676 g_signal_new ("overrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
677 G_STRUCT_OFFSET (GstMultiQueueClass, overrun), NULL, NULL,
678 NULL, G_TYPE_NONE, 0);
682 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_BYTES,
683 g_param_spec_uint ("max-size-bytes", "Max. size (kB)",
684 "Max. amount of data in the queue (bytes, 0=disable)",
685 0, G_MAXUINT, DEFAULT_MAX_SIZE_BYTES,
686 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
687 G_PARAM_STATIC_STRINGS));
688 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_BUFFERS,
689 g_param_spec_uint ("max-size-buffers", "Max. size (buffers)",
690 "Max. number of buffers in the queue (0=disable)", 0, G_MAXUINT,
691 DEFAULT_MAX_SIZE_BUFFERS,
692 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
693 G_PARAM_STATIC_STRINGS));
694 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_TIME,
695 g_param_spec_uint64 ("max-size-time", "Max. size (ns)",
696 "Max. amount of data in the queue (in ns, 0=disable)", 0, G_MAXUINT64,
697 DEFAULT_MAX_SIZE_TIME, G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
698 G_PARAM_STATIC_STRINGS));
700 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_BYTES,
701 g_param_spec_uint ("extra-size-bytes", "Extra Size (kB)",
702 "Amount of data the queues can grow if one of them is empty (bytes, 0=disable)"
703 " (NOT IMPLEMENTED)",
704 0, G_MAXUINT, DEFAULT_EXTRA_SIZE_BYTES,
705 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
706 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_BUFFERS,
707 g_param_spec_uint ("extra-size-buffers", "Extra Size (buffers)",
708 "Amount of buffers the queues can grow if one of them is empty (0=disable)"
709 " (NOT IMPLEMENTED)",
710 0, G_MAXUINT, DEFAULT_EXTRA_SIZE_BUFFERS,
711 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
712 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_TIME,
713 g_param_spec_uint64 ("extra-size-time", "Extra Size (ns)",
714 "Amount of time the queues can grow if one of them is empty (in ns, 0=disable)"
715 " (NOT IMPLEMENTED)",
716 0, G_MAXUINT64, DEFAULT_EXTRA_SIZE_TIME,
717 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
720 * GstMultiQueue:use-buffering:
722 * Enable the buffering option in multiqueue so that BUFFERING messages are
723 * emitted based on low-/high-percent thresholds.
725 g_object_class_install_property (gobject_class, PROP_USE_BUFFERING,
726 g_param_spec_boolean ("use-buffering", "Use buffering",
727 "Emit GST_MESSAGE_BUFFERING based on low-/high-percent thresholds "
728 "(0% = low-watermark, 100% = high-watermark)",
729 DEFAULT_USE_BUFFERING, G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
730 G_PARAM_STATIC_STRINGS));
732 * GstMultiQueue:low-percent:
734 * Low threshold percent for buffering to start.
736 g_object_class_install_property (gobject_class, PROP_LOW_PERCENT,
737 g_param_spec_int ("low-percent", "Low percent",
738 "Low threshold for buffering to start. Only used if use-buffering is True "
739 "(Deprecated: use low-watermark instead)",
740 0, 100, DEFAULT_LOW_WATERMARK * 100,
741 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
743 * GstMultiQueue:high-percent:
745 * High threshold percent for buffering to finish.
747 g_object_class_install_property (gobject_class, PROP_HIGH_PERCENT,
748 g_param_spec_int ("high-percent", "High percent",
749 "High threshold for buffering to finish. Only used if use-buffering is True "
750 "(Deprecated: use high-watermark instead)",
751 0, 100, DEFAULT_HIGH_WATERMARK * 100,
752 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
754 * GstMultiQueue:low-watermark:
756 * Low threshold watermark for buffering to start.
760 g_object_class_install_property (gobject_class, PROP_LOW_WATERMARK,
761 g_param_spec_double ("low-watermark", "Low watermark",
762 "Low threshold for buffering to start. Only used if use-buffering is True",
763 0.0, 1.0, DEFAULT_LOW_WATERMARK,
764 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
766 * GstMultiQueue:high-watermark:
768 * High threshold watermark for buffering to finish.
772 g_object_class_install_property (gobject_class, PROP_HIGH_WATERMARK,
773 g_param_spec_double ("high-watermark", "High watermark",
774 "High threshold for buffering to finish. Only used if use-buffering is True",
775 0.0, 1.0, DEFAULT_HIGH_WATERMARK,
776 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
779 * GstMultiQueue:sync-by-running-time:
781 * If enabled multiqueue will synchronize deactivated or not-linked streams
782 * to the activated and linked streams by taking the running time.
783 * Otherwise multiqueue will synchronize the deactivated or not-linked
784 * streams by keeping the order in which buffers and events arrived compared
785 * to active and linked streams.
787 g_object_class_install_property (gobject_class, PROP_SYNC_BY_RUNNING_TIME,
788 g_param_spec_boolean ("sync-by-running-time", "Sync By Running Time",
789 "Synchronize deactivated or not-linked streams by running time",
790 DEFAULT_SYNC_BY_RUNNING_TIME,
791 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
793 g_object_class_install_property (gobject_class, PROP_USE_INTERLEAVE,
794 g_param_spec_boolean ("use-interleave", "Use interleave",
795 "Adjust time limits based on input interleave",
796 DEFAULT_USE_INTERLEAVE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
798 g_object_class_install_property (gobject_class, PROP_UNLINKED_CACHE_TIME,
799 g_param_spec_uint64 ("unlinked-cache-time", "Unlinked cache time (ns)",
800 "Extra buffering in time for unlinked streams (if 'sync-by-running-time')",
801 0, G_MAXUINT64, DEFAULT_UNLINKED_CACHE_TIME,
802 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
803 G_PARAM_STATIC_STRINGS));
805 g_object_class_install_property (gobject_class, PROP_MINIMUM_INTERLEAVE,
806 g_param_spec_uint64 ("min-interleave-time", "Minimum interleave time",
807 "Minimum extra buffering for deinterleaving (size of the queues) when use-interleave=true",
808 0, G_MAXUINT64, DEFAULT_MINIMUM_INTERLEAVE,
809 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
810 G_PARAM_STATIC_STRINGS));
813 * GstMultiQueue:stats:
815 * Various #GstMultiQueue statistics. This property returns a #GstStructure
816 * with name "application/x-gst-multi-queue-stats" with the following fields:
818 * - "queues" GST_TYPE_ARRAY Contains one GstStructure named "queue_%d"
819 * (where \%d is the queue's ID) per internal queue:
820 * - "buffers" G_TYPE_UINT The queue's current level of buffers
821 * - "bytes" G_TYPE_UINT The queue's current level of bytes
822 * - "time" G_TYPE_UINT64 The queue's current level of time
826 g_object_class_install_property (gobject_class, PROP_STATS,
827 g_param_spec_boxed ("stats", "Stats",
828 "Multiqueue Statistics",
829 GST_TYPE_STRUCTURE, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
831 gobject_class->finalize = gst_multi_queue_finalize;
833 gst_element_class_set_static_metadata (gstelement_class,
835 "Generic", "Multiple data queue", "Edward Hervey <edward@fluendo.com>");
836 gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
837 &sinktemplate, GST_TYPE_MULTIQUEUE_PAD);
838 gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
839 &srctemplate, GST_TYPE_MULTIQUEUE_PAD);
841 gstelement_class->request_new_pad =
842 GST_DEBUG_FUNCPTR (gst_multi_queue_request_new_pad);
843 gstelement_class->release_pad =
844 GST_DEBUG_FUNCPTR (gst_multi_queue_release_pad);
845 gstelement_class->change_state =
846 GST_DEBUG_FUNCPTR (gst_multi_queue_change_state);
848 gst_type_mark_as_plugin_api (GST_TYPE_MULTIQUEUE_PAD, 0);
852 gst_multi_queue_init (GstMultiQueue * mqueue)
854 mqueue->nbqueues = 0;
855 mqueue->queues = NULL;
857 mqueue->max_size.bytes = DEFAULT_MAX_SIZE_BYTES;
858 mqueue->max_size.visible = DEFAULT_MAX_SIZE_BUFFERS;
859 mqueue->max_size.time = DEFAULT_MAX_SIZE_TIME;
861 mqueue->extra_size.bytes = DEFAULT_EXTRA_SIZE_BYTES;
862 mqueue->extra_size.visible = DEFAULT_EXTRA_SIZE_BUFFERS;
863 mqueue->extra_size.time = DEFAULT_EXTRA_SIZE_TIME;
865 mqueue->use_buffering = DEFAULT_USE_BUFFERING;
866 mqueue->low_watermark = DEFAULT_LOW_WATERMARK * MAX_BUFFERING_LEVEL;
867 mqueue->high_watermark = DEFAULT_HIGH_WATERMARK * MAX_BUFFERING_LEVEL;
869 mqueue->sync_by_running_time = DEFAULT_SYNC_BY_RUNNING_TIME;
870 mqueue->use_interleave = DEFAULT_USE_INTERLEAVE;
871 mqueue->min_interleave_time = DEFAULT_MINIMUM_INTERLEAVE;
872 mqueue->unlinked_cache_time = DEFAULT_UNLINKED_CACHE_TIME;
876 mqueue->high_time = GST_CLOCK_STIME_NONE;
878 g_mutex_init (&mqueue->qlock);
879 g_mutex_init (&mqueue->buffering_post_lock);
883 gst_multi_queue_finalize (GObject * object)
885 GstMultiQueue *mqueue = GST_MULTI_QUEUE (object);
887 g_list_free_full (mqueue->queues, (GDestroyNotify) gst_single_queue_unref);
888 mqueue->queues = NULL;
889 mqueue->queues_cookie++;
891 /* free/unref instance data */
892 g_mutex_clear (&mqueue->qlock);
893 g_mutex_clear (&mqueue->buffering_post_lock);
895 G_OBJECT_CLASS (parent_class)->finalize (object);
898 #define SET_CHILD_PROPERTY(mq,format) G_STMT_START { \
899 GList * tmp = mq->queues; \
901 GstSingleQueue *q = (GstSingleQueue*)tmp->data; \
902 q->max_size.format = mq->max_size.format; \
903 update_buffering (mq, q); \
904 gst_data_queue_limits_changed (q->queue); \
905 tmp = g_list_next(tmp); \
910 gst_multi_queue_set_property (GObject * object, guint prop_id,
911 const GValue * value, GParamSpec * pspec)
913 GstMultiQueue *mq = GST_MULTI_QUEUE (object);
916 case PROP_MAX_SIZE_BYTES:
917 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
918 mq->max_size.bytes = g_value_get_uint (value);
919 SET_CHILD_PROPERTY (mq, bytes);
920 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
921 gst_multi_queue_post_buffering (mq);
923 case PROP_MAX_SIZE_BUFFERS:
926 gint new_size = g_value_get_uint (value);
928 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
930 mq->max_size.visible = new_size;
934 GstDataQueueSize size;
935 GstSingleQueue *q = (GstSingleQueue *) tmp->data;
936 gst_data_queue_get_level (q->queue, &size);
938 GST_DEBUG_OBJECT (mq, "Queue %d: Requested buffers size: %d,"
939 " current: %d, current max %d", q->id, new_size, size.visible,
940 q->max_size.visible);
942 /* do not reduce max size below current level if the single queue
943 * has grown because of empty queue */
945 q->max_size.visible = new_size;
946 } else if (q->max_size.visible == 0) {
947 q->max_size.visible = MAX (new_size, size.visible);
948 } else if (new_size > size.visible) {
949 q->max_size.visible = new_size;
951 update_buffering (mq, q);
952 gst_data_queue_limits_changed (q->queue);
953 tmp = g_list_next (tmp);
956 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
957 gst_multi_queue_post_buffering (mq);
961 case PROP_MAX_SIZE_TIME:
962 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
963 mq->max_size.time = g_value_get_uint64 (value);
964 SET_CHILD_PROPERTY (mq, time);
965 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
966 gst_multi_queue_post_buffering (mq);
968 case PROP_EXTRA_SIZE_BYTES:
969 mq->extra_size.bytes = g_value_get_uint (value);
971 case PROP_EXTRA_SIZE_BUFFERS:
972 mq->extra_size.visible = g_value_get_uint (value);
974 case PROP_EXTRA_SIZE_TIME:
975 mq->extra_size.time = g_value_get_uint64 (value);
977 case PROP_USE_BUFFERING:
978 mq->use_buffering = g_value_get_boolean (value);
979 recheck_buffering_status (mq);
981 case PROP_LOW_PERCENT:
982 mq->low_watermark = g_value_get_int (value) * BUF_LEVEL_PERCENT_FACTOR;
983 /* Recheck buffering status - the new low_watermark value might
984 * be above the current fill level. If the old low_watermark one
985 * was below the current level, this means that mq->buffering is
986 * disabled and needs to be re-enabled. */
987 recheck_buffering_status (mq);
989 case PROP_HIGH_PERCENT:
990 mq->high_watermark = g_value_get_int (value) * BUF_LEVEL_PERCENT_FACTOR;
991 recheck_buffering_status (mq);
993 case PROP_LOW_WATERMARK:
994 mq->low_watermark = g_value_get_double (value) * MAX_BUFFERING_LEVEL;
995 recheck_buffering_status (mq);
997 case PROP_HIGH_WATERMARK:
998 mq->high_watermark = g_value_get_double (value) * MAX_BUFFERING_LEVEL;
999 recheck_buffering_status (mq);
1001 case PROP_SYNC_BY_RUNNING_TIME:
1002 mq->sync_by_running_time = g_value_get_boolean (value);
1004 case PROP_USE_INTERLEAVE:
1005 mq->use_interleave = g_value_get_boolean (value);
1007 case PROP_UNLINKED_CACHE_TIME:
1008 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1009 mq->unlinked_cache_time = g_value_get_uint64 (value);
1010 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1011 gst_multi_queue_post_buffering (mq);
1013 case PROP_MINIMUM_INTERLEAVE:
1014 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1015 mq->min_interleave_time = g_value_get_uint64 (value);
1016 if (mq->use_interleave)
1017 calculate_interleave (mq, NULL);
1018 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1021 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
1026 /* Called with mutex held */
1027 static GstStructure *
1028 gst_multi_queue_get_stats (GstMultiQueue * mq)
1031 gst_structure_new_empty ("application/x-gst-multi-queue-stats");
1035 if (mq->queues != NULL) {
1036 GValue queues = G_VALUE_INIT;
1037 GValue v = G_VALUE_INIT;
1039 g_value_init (&queues, GST_TYPE_ARRAY);
1041 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
1042 GstDataQueueSize level;
1045 g_value_init (&v, GST_TYPE_STRUCTURE);
1047 sq = (GstSingleQueue *) tmp->data;
1048 gst_data_queue_get_level (sq->queue, &level);
1049 id = g_strdup_printf ("queue_%d", sq->id);
1050 s = gst_structure_new (id,
1051 "buffers", G_TYPE_UINT, level.visible,
1052 "bytes", G_TYPE_UINT, level.bytes,
1053 "time", G_TYPE_UINT64, sq->cur_time, NULL);
1054 g_value_take_boxed (&v, s);
1055 gst_value_array_append_and_take_value (&queues, &v);
1058 gst_structure_take_value (ret, "queues", &queues);
1065 gst_multi_queue_get_property (GObject * object, guint prop_id,
1066 GValue * value, GParamSpec * pspec)
1068 GstMultiQueue *mq = GST_MULTI_QUEUE (object);
1070 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1073 case PROP_EXTRA_SIZE_BYTES:
1074 g_value_set_uint (value, mq->extra_size.bytes);
1076 case PROP_EXTRA_SIZE_BUFFERS:
1077 g_value_set_uint (value, mq->extra_size.visible);
1079 case PROP_EXTRA_SIZE_TIME:
1080 g_value_set_uint64 (value, mq->extra_size.time);
1082 case PROP_MAX_SIZE_BYTES:
1083 g_value_set_uint (value, mq->max_size.bytes);
1085 case PROP_MAX_SIZE_BUFFERS:
1086 g_value_set_uint (value, mq->max_size.visible);
1088 case PROP_MAX_SIZE_TIME:
1089 g_value_set_uint64 (value, mq->max_size.time);
1091 case PROP_USE_BUFFERING:
1092 g_value_set_boolean (value, mq->use_buffering);
1094 case PROP_LOW_PERCENT:
1095 g_value_set_int (value, mq->low_watermark / BUF_LEVEL_PERCENT_FACTOR);
1097 case PROP_HIGH_PERCENT:
1098 g_value_set_int (value, mq->high_watermark / BUF_LEVEL_PERCENT_FACTOR);
1100 case PROP_LOW_WATERMARK:
1101 g_value_set_double (value, mq->low_watermark /
1102 (gdouble) MAX_BUFFERING_LEVEL);
1104 case PROP_HIGH_WATERMARK:
1105 g_value_set_double (value, mq->high_watermark /
1106 (gdouble) MAX_BUFFERING_LEVEL);
1108 case PROP_SYNC_BY_RUNNING_TIME:
1109 g_value_set_boolean (value, mq->sync_by_running_time);
1111 case PROP_USE_INTERLEAVE:
1112 g_value_set_boolean (value, mq->use_interleave);
1114 case PROP_UNLINKED_CACHE_TIME:
1115 g_value_set_uint64 (value, mq->unlinked_cache_time);
1117 case PROP_MINIMUM_INTERLEAVE:
1118 g_value_set_uint64 (value, mq->min_interleave_time);
1121 g_value_take_boxed (value, gst_multi_queue_get_stats (mq));
1124 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
1128 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1131 static GstIterator *
1132 gst_multi_queue_iterate_internal_links (GstPad * pad, GstObject * parent)
1134 GstIterator *it = NULL;
1135 GstPad *opad, *sinkpad, *srcpad;
1136 GstSingleQueue *squeue;
1137 GstMultiQueue *mq = GST_MULTI_QUEUE (parent);
1138 GValue val = { 0, };
1140 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1141 squeue = GST_MULTIQUEUE_PAD (pad)->sq;
1145 srcpad = g_weak_ref_get (&squeue->srcpad);
1146 sinkpad = g_weak_ref_get (&squeue->sinkpad);
1147 if (sinkpad == pad && srcpad) {
1149 gst_clear_object (&sinkpad);
1151 } else if (srcpad == pad && sinkpad) {
1153 gst_clear_object (&srcpad);
1156 gst_clear_object (&srcpad);
1157 gst_clear_object (&sinkpad);
1161 g_value_init (&val, GST_TYPE_PAD);
1162 g_value_set_object (&val, opad);
1163 it = gst_iterator_new_single (GST_TYPE_PAD, &val);
1164 g_value_unset (&val);
1166 gst_object_unref (opad);
1169 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1176 * GstElement methods
1180 gst_multi_queue_request_new_pad (GstElement * element, GstPadTemplate * temp,
1181 const gchar * name, const GstCaps * caps)
1183 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
1184 GstSingleQueue *squeue;
1189 sscanf (name + 4, "_%u", &temp_id);
1190 GST_LOG_OBJECT (element, "name : %s (id %d)", GST_STR_NULL (name), temp_id);
1193 /* Create a new single queue, add the sink and source pad and return the sink pad */
1194 squeue = gst_single_queue_new (mqueue, temp_id);
1196 new_pad = squeue ? g_weak_ref_get (&squeue->sinkpad) : NULL;
1197 /* request pad assumes the element is owning the ref of the pad it returns */
1199 gst_object_unref (new_pad);
1201 GST_DEBUG_OBJECT (mqueue, "Returning pad %" GST_PTR_FORMAT, new_pad);
1207 gst_multi_queue_release_pad (GstElement * element, GstPad * pad)
1209 GstPad *sinkpad = NULL, *srcpad = NULL;
1210 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
1211 GstSingleQueue *sq = NULL;
1214 GST_LOG_OBJECT (element, "pad %s:%s", GST_DEBUG_PAD_NAME (pad));
1216 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
1217 /* Find which single queue it belongs to, knowing that it should be a sinkpad */
1218 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
1219 sq = (GstSingleQueue *) tmp->data;
1220 sinkpad = g_weak_ref_get (&sq->sinkpad);
1222 if (sinkpad == pad) {
1223 srcpad = g_weak_ref_get (&sq->srcpad);
1227 gst_object_unref (sinkpad);
1231 gst_clear_object (&sinkpad);
1232 gst_clear_object (&srcpad);
1233 GST_WARNING_OBJECT (mqueue, "That pad doesn't belong to this element ???");
1234 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1238 /* FIXME: The removal of the singlequeue should probably not happen until it
1239 * finishes draining */
1241 /* remove it from the list */
1242 mqueue->queues = g_list_delete_link (mqueue->queues, tmp);
1243 mqueue->queues_cookie++;
1245 /* FIXME : recompute next-non-linked */
1246 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1248 /* delete SingleQueue */
1249 gst_data_queue_set_flushing (sq->queue, TRUE);
1251 gst_pad_set_active (srcpad, FALSE);
1252 gst_pad_set_active (sinkpad, FALSE);
1253 gst_element_remove_pad (element, srcpad);
1254 gst_element_remove_pad (element, sinkpad);
1255 gst_object_unref (srcpad);
1256 gst_object_unref (sinkpad);
1259 static GstStateChangeReturn
1260 gst_multi_queue_change_state (GstElement * element, GstStateChange transition)
1262 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
1263 GstSingleQueue *sq = NULL;
1264 GstStateChangeReturn result;
1266 switch (transition) {
1267 case GST_STATE_CHANGE_READY_TO_PAUSED:{
1270 /* Set all pads to non-flushing */
1271 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
1272 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
1273 sq = (GstSingleQueue *) tmp->data;
1274 sq->flushing = FALSE;
1277 /* the visible limit might not have been set on single queues that have grown because of other queueus were empty */
1278 SET_CHILD_PROPERTY (mqueue, visible);
1280 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1281 gst_multi_queue_post_buffering (mqueue);
1285 case GST_STATE_CHANGE_PAUSED_TO_READY:{
1288 /* Un-wait all waiting pads */
1289 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
1290 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
1291 sq = (GstSingleQueue *) tmp->data;
1292 sq->flushing = TRUE;
1293 g_cond_signal (&sq->turn);
1295 sq->last_query = FALSE;
1296 g_cond_signal (&sq->query_handled);
1298 mqueue->interleave_incomplete = FALSE;
1299 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1306 result = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
1308 switch (transition) {
1317 gst_single_queue_start (GstMultiQueue * mq, GstSingleQueue * sq)
1319 gboolean res = FALSE;
1320 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
1322 GST_LOG_OBJECT (mq, "SingleQueue %d : starting task", sq->id);
1325 res = gst_pad_start_task (srcpad,
1326 (GstTaskFunction) gst_multi_queue_loop, srcpad, NULL);
1327 gst_object_unref (srcpad);
1334 gst_single_queue_pause (GstMultiQueue * mq, GstSingleQueue * sq)
1336 gboolean result = FALSE;
1337 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
1339 GST_LOG_OBJECT (mq, "SingleQueue %d : pausing task", sq->id);
1341 result = gst_pad_pause_task (srcpad);
1342 gst_object_unref (srcpad);
1345 sq->sink_tainted = sq->src_tainted = TRUE;
1350 gst_single_queue_stop (GstMultiQueue * mq, GstSingleQueue * sq)
1352 gboolean result = FALSE;
1353 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
1355 GST_LOG_OBJECT (mq, "SingleQueue %d : stopping task", sq->id);
1357 result = gst_pad_stop_task (srcpad);
1358 gst_object_unref (srcpad);
1360 sq->sink_tainted = sq->src_tainted = TRUE;
1365 gst_single_queue_flush (GstMultiQueue * mq, GstSingleQueue * sq, gboolean flush,
1368 GST_DEBUG_OBJECT (mq, "flush %s queue %d", (flush ? "start" : "stop"),
1372 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1373 sq->srcresult = GST_FLOW_FLUSHING;
1374 gst_data_queue_set_flushing (sq->queue, TRUE);
1376 sq->flushing = TRUE;
1378 /* wake up non-linked task */
1379 GST_LOG_OBJECT (mq, "SingleQueue %d : waking up eventually waiting task",
1381 g_cond_signal (&sq->turn);
1382 sq->last_query = FALSE;
1383 g_cond_signal (&sq->query_handled);
1384 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1386 gst_single_queue_flush_queue (sq, full);
1388 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1389 gst_segment_init (&sq->sink_segment, GST_FORMAT_TIME);
1390 gst_segment_init (&sq->src_segment, GST_FORMAT_TIME);
1391 sq->has_src_segment = FALSE;
1392 /* All pads start off OK for a smooth kick-off */
1393 sq->srcresult = GST_FLOW_OK;
1396 sq->max_size.visible = mq->max_size.visible;
1398 sq->is_segment_done = FALSE;
1401 sq->last_oldid = G_MAXUINT32;
1402 sq->next_time = GST_CLOCK_STIME_NONE;
1403 sq->last_time = GST_CLOCK_STIME_NONE;
1404 sq->cached_sinktime = GST_CLOCK_STIME_NONE;
1405 sq->group_high_time = GST_CLOCK_STIME_NONE;
1406 gst_data_queue_set_flushing (sq->queue, FALSE);
1408 /* We will become active again on the next buffer/gap */
1411 /* Reset high time to be recomputed next */
1412 mq->high_time = GST_CLOCK_STIME_NONE;
1414 sq->flushing = FALSE;
1415 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1419 /* WITH LOCK TAKEN */
1421 get_buffering_level (GstMultiQueue * mq, GstSingleQueue * sq)
1423 GstDataQueueSize size;
1424 gint buffering_level, tmp;
1426 gst_data_queue_get_level (sq->queue, &size);
1428 GST_DEBUG_OBJECT (mq,
1429 "queue %d: visible %u/%u, bytes %u/%u, time %" G_GUINT64_FORMAT "/%"
1430 G_GUINT64_FORMAT, sq->id, size.visible, sq->max_size.visible,
1431 size.bytes, sq->max_size.bytes, sq->cur_time, sq->max_size.time);
1433 /* get bytes and time buffer levels and take the max */
1434 if (sq->is_eos || sq->is_segment_done || sq->srcresult == GST_FLOW_NOT_LINKED
1436 buffering_level = MAX_BUFFERING_LEVEL;
1438 buffering_level = 0;
1439 if (sq->max_size.time > 0) {
1441 gst_util_uint64_scale (sq->cur_time,
1442 MAX_BUFFERING_LEVEL, sq->max_size.time);
1443 buffering_level = MAX (buffering_level, tmp);
1445 if (sq->max_size.bytes > 0) {
1447 gst_util_uint64_scale_int (size.bytes,
1448 MAX_BUFFERING_LEVEL, sq->max_size.bytes);
1449 buffering_level = MAX (buffering_level, tmp);
1453 return buffering_level;
1456 /* WITH LOCK TAKEN */
1458 update_buffering (GstMultiQueue * mq, GstSingleQueue * sq)
1460 gint buffering_level, percent;
1462 /* nothing to dowhen we are not in buffering mode */
1463 if (!mq->use_buffering)
1466 buffering_level = get_buffering_level (mq, sq);
1468 /* scale so that if buffering_level equals the high watermark,
1469 * the percentage is 100% */
1470 percent = gst_util_uint64_scale (buffering_level, 100, mq->high_watermark);
1475 if (mq->buffering) {
1476 if (buffering_level >= mq->high_watermark) {
1477 mq->buffering = FALSE;
1479 /* make sure it increases */
1480 percent = MAX (mq->buffering_percent, percent);
1482 SET_PERCENT (mq, percent);
1485 gboolean is_buffering = TRUE;
1487 for (iter = mq->queues; iter; iter = g_list_next (iter)) {
1488 GstSingleQueue *oq = (GstSingleQueue *) iter->data;
1490 if (get_buffering_level (mq, oq) >= mq->high_watermark) {
1491 is_buffering = FALSE;
1497 if (is_buffering && buffering_level < mq->low_watermark) {
1498 mq->buffering = TRUE;
1499 SET_PERCENT (mq, percent);
1505 gst_multi_queue_post_buffering (GstMultiQueue * mq)
1507 GstMessage *msg = NULL;
1509 g_mutex_lock (&mq->buffering_post_lock);
1510 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1511 if (mq->buffering_percent_changed) {
1512 gint percent = mq->buffering_percent;
1514 mq->buffering_percent_changed = FALSE;
1516 GST_DEBUG_OBJECT (mq, "Going to post buffering: %d%%", percent);
1517 msg = gst_message_new_buffering (GST_OBJECT_CAST (mq), percent);
1519 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1522 gst_element_post_message (GST_ELEMENT_CAST (mq), msg);
1524 g_mutex_unlock (&mq->buffering_post_lock);
1528 recheck_buffering_status (GstMultiQueue * mq)
1530 if (!mq->use_buffering && mq->buffering) {
1531 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1532 mq->buffering = FALSE;
1533 GST_DEBUG_OBJECT (mq,
1534 "Buffering property disabled, but queue was still buffering; "
1535 "setting buffering percentage to 100%%");
1536 SET_PERCENT (mq, 100);
1537 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1540 if (mq->use_buffering) {
1544 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1546 /* force buffering percentage to be recalculated */
1547 old_perc = mq->buffering_percent;
1548 mq->buffering_percent = 0;
1552 GstSingleQueue *q = (GstSingleQueue *) tmp->data;
1553 update_buffering (mq, q);
1554 gst_data_queue_limits_changed (q->queue);
1555 tmp = g_list_next (tmp);
1558 GST_DEBUG_OBJECT (mq,
1559 "Recalculated buffering percentage: old: %d%% new: %d%%",
1560 old_perc, mq->buffering_percent);
1562 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1565 gst_multi_queue_post_buffering (mq);
1569 calculate_interleave (GstMultiQueue * mq, GstSingleQueue * sq)
1571 GstClockTimeDiff low, high;
1572 GstClockTime interleave, other_interleave = 0;
1573 gboolean some_inactive = FALSE;
1576 low = high = GST_CLOCK_STIME_NONE;
1577 interleave = mq->interleave;
1578 /* Go over all single queues and calculate lowest/highest value */
1579 for (tmp = mq->queues; tmp; tmp = tmp->next) {
1580 GstSingleQueue *oq = (GstSingleQueue *) tmp->data;
1581 /* Ignore sparse streams for interleave calculation */
1585 /* If some streams aren't active yet (haven't received any buffers), we will
1586 * grow interleave accordingly */
1588 some_inactive = TRUE;
1592 /* Calculate within each streaming thread */
1593 if (sq && sq->thread != oq->thread) {
1594 if (oq->interleave > other_interleave)
1595 other_interleave = oq->interleave;
1599 /* If the stream isn't EOS, update the low/high input value */
1600 if (GST_CLOCK_STIME_IS_VALID (oq->cached_sinktime) && !oq->is_eos) {
1601 if (low == GST_CLOCK_STIME_NONE || oq->cached_sinktime < low)
1602 low = oq->cached_sinktime;
1603 if (high == GST_CLOCK_STIME_NONE || oq->cached_sinktime > high)
1604 high = oq->cached_sinktime;
1607 "queue %d , sinktime:%" GST_STIME_FORMAT " low:%" GST_STIME_FORMAT
1608 " high:%" GST_STIME_FORMAT, oq->id,
1609 GST_STIME_ARGS (oq->cached_sinktime), GST_STIME_ARGS (low),
1610 GST_STIME_ARGS (high));
1613 if (GST_CLOCK_STIME_IS_VALID (low) && GST_CLOCK_STIME_IS_VALID (high)) {
1614 gboolean do_update = high == low;
1615 interleave = high - low;
1616 /* Padding of interleave and minimum value */
1617 interleave = (150 * interleave / 100) + mq->min_interleave_time;
1619 sq->interleave = interleave;
1621 interleave = MAX (interleave, other_interleave);
1623 /* Progressively grow up the interleave up to 5s if some streams were inactive */
1624 if (some_inactive && interleave <= mq->interleave) {
1625 interleave = MIN (5 * GST_SECOND, mq->interleave + 100 * GST_MSECOND);
1629 /* We force the interleave update if:
1630 * * the interleave was previously set while some streams were not active
1631 * yet but they now all are
1632 * * OR the interleave was previously based on all streams being active
1633 * whereas some now aren't
1635 if (mq->interleave_incomplete != some_inactive)
1638 mq->interleave_incomplete = some_inactive;
1640 /* Update the stored interleave if:
1641 * * No data has arrived yet (high == low)
1642 * * Or it went higher
1643 * * Or it went lower and we've gone past the previous interleave needed */
1644 if (do_update || interleave > mq->interleave ||
1645 ((mq->last_interleave_update + (2 * MIN (GST_SECOND,
1646 mq->interleave)) < low)
1647 && interleave < (mq->interleave * 3 / 4))) {
1648 /* Update the interleave */
1649 mq->interleave = interleave;
1650 mq->last_interleave_update = high;
1651 /* Update max-size time */
1652 mq->max_size.time = mq->interleave;
1653 SET_CHILD_PROPERTY (mq, time);
1657 GST_DEBUG_OBJECT (mq,
1658 "low:%" GST_STIME_FORMAT " high:%" GST_STIME_FORMAT " interleave:%"
1659 GST_TIME_FORMAT " mq->interleave:%" GST_TIME_FORMAT
1660 " last_interleave_update:%" GST_STIME_FORMAT, GST_STIME_ARGS (low),
1661 GST_STIME_ARGS (high), GST_TIME_ARGS (interleave),
1662 GST_TIME_ARGS (mq->interleave),
1663 GST_STIME_ARGS (mq->last_interleave_update));
1667 /* calculate the diff between running time on the sink and src of the queue.
1668 * This is the total amount of time in the queue.
1669 * WITH LOCK TAKEN */
1671 update_time_level (GstMultiQueue * mq, GstSingleQueue * sq)
1673 GstClockTimeDiff sink_time, src_time;
1675 if (sq->sink_tainted) {
1676 sink_time = sq->sinktime = my_segment_to_running_time (&sq->sink_segment,
1677 sq->sink_segment.position);
1679 GST_DEBUG_OBJECT (mq,
1680 "queue %d sink_segment.position:%" GST_TIME_FORMAT ", sink_time:%"
1681 GST_STIME_FORMAT, sq->id, GST_TIME_ARGS (sq->sink_segment.position),
1682 GST_STIME_ARGS (sink_time));
1684 if (G_UNLIKELY (sq->last_time == GST_CLOCK_STIME_NONE)) {
1685 /* If the single queue still doesn't have a last_time set, this means
1686 * that nothing has been pushed out yet.
1687 * In order for the high_time computation to be as efficient as possible,
1688 * we set the last_time */
1689 sq->last_time = sink_time;
1691 if (G_UNLIKELY (sink_time != GST_CLOCK_STIME_NONE)) {
1692 /* if we have a time, we become untainted and use the time */
1693 sq->sink_tainted = FALSE;
1694 if (mq->use_interleave) {
1695 sq->cached_sinktime = sink_time;
1696 calculate_interleave (mq, sq);
1700 sink_time = sq->sinktime;
1702 if (sq->src_tainted) {
1703 GstSegment *segment;
1706 if (sq->has_src_segment) {
1707 segment = &sq->src_segment;
1708 position = sq->src_segment.position;
1711 * If the src pad had no segment yet, use the sink segment
1712 * to avoid signalling overrun if the received sink segment has a
1713 * a position > max-size-time while the src pad time would be the default=0
1715 * This can happen when switching pads on chained/adaptive streams and the
1716 * new chain has a segment with a much larger position
1718 segment = &sq->sink_segment;
1719 position = sq->sink_segment.position;
1722 src_time = sq->srctime = my_segment_to_running_time (segment, position);
1723 /* if we have a time, we become untainted and use the time */
1724 if (G_UNLIKELY (src_time != GST_CLOCK_STIME_NONE)) {
1725 sq->src_tainted = FALSE;
1728 src_time = sq->srctime;
1730 GST_DEBUG_OBJECT (mq,
1731 "queue %d, sink %" GST_STIME_FORMAT ", src %" GST_STIME_FORMAT, sq->id,
1732 GST_STIME_ARGS (sink_time), GST_STIME_ARGS (src_time));
1734 /* This allows for streams with out of order timestamping - sometimes the
1735 * emerging timestamp is later than the arriving one(s) */
1736 if (G_LIKELY (GST_CLOCK_STIME_IS_VALID (sink_time) &&
1737 GST_CLOCK_STIME_IS_VALID (src_time) && sink_time > src_time))
1738 sq->cur_time = sink_time - src_time;
1742 /* updating the time level can change the buffering state */
1743 update_buffering (mq, sq);
1748 /* take a SEGMENT event and apply the values to segment, updating the time
1749 * level of queue. */
1751 apply_segment (GstMultiQueue * mq, GstSingleQueue * sq, GstEvent * event,
1752 GstSegment * segment)
1754 gst_event_copy_segment (event, segment);
1756 /* now configure the values, we use these to track timestamps on the
1758 if (segment->format != GST_FORMAT_TIME) {
1759 /* non-time format, pretent the current time segment is closed with a
1760 * 0 start and unknown stop time. */
1761 segment->format = GST_FORMAT_TIME;
1766 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1768 /* Make sure we have a valid initial segment position (and not garbage
1770 if (segment->rate > 0.0)
1771 segment->position = segment->start;
1773 segment->position = segment->stop;
1774 if (segment == &sq->sink_segment)
1775 sq->sink_tainted = TRUE;
1777 sq->has_src_segment = TRUE;
1778 sq->src_tainted = TRUE;
1781 GST_DEBUG_OBJECT (mq,
1782 "queue %d, configured SEGMENT %" GST_SEGMENT_FORMAT, sq->id, segment);
1784 /* segment can update the time level of the queue */
1785 update_time_level (mq, sq);
1787 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1788 gst_multi_queue_post_buffering (mq);
1791 /* take a buffer and update segment, updating the time level of the queue. */
1793 apply_buffer (GstMultiQueue * mq, GstSingleQueue * sq, GstClockTime timestamp,
1794 GstClockTime duration, GstSegment * segment)
1796 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1798 /* if no timestamp is set, assume it's continuous with the previous
1800 if (timestamp == GST_CLOCK_TIME_NONE)
1801 timestamp = segment->position;
1804 if (duration != GST_CLOCK_TIME_NONE)
1805 timestamp += duration;
1807 GST_DEBUG_OBJECT (mq, "queue %d, %s position updated to %" GST_TIME_FORMAT,
1808 sq->id, segment == &sq->sink_segment ? "sink" : "src",
1809 GST_TIME_ARGS (timestamp));
1811 segment->position = timestamp;
1813 if (segment == &sq->sink_segment)
1814 sq->sink_tainted = TRUE;
1816 sq->src_tainted = TRUE;
1818 /* calc diff with other end */
1819 update_time_level (mq, sq);
1820 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1821 gst_multi_queue_post_buffering (mq);
1825 apply_gap (GstMultiQueue * mq, GstSingleQueue * sq, GstEvent * event,
1826 GstSegment * segment)
1828 GstClockTime timestamp;
1829 GstClockTime duration;
1831 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1833 gst_event_parse_gap (event, ×tamp, &duration);
1835 if (GST_CLOCK_TIME_IS_VALID (timestamp)) {
1837 if (GST_CLOCK_TIME_IS_VALID (duration)) {
1838 timestamp += duration;
1841 GST_DEBUG_OBJECT (mq, "queue %d, %s position updated to %" GST_TIME_FORMAT,
1842 sq->id, segment == &sq->sink_segment ? "sink" : "src",
1843 GST_TIME_ARGS (timestamp));
1845 segment->position = timestamp;
1847 if (segment == &sq->sink_segment)
1848 sq->sink_tainted = TRUE;
1850 sq->src_tainted = TRUE;
1852 /* calc diff with other end */
1853 update_time_level (mq, sq);
1856 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1857 gst_multi_queue_post_buffering (mq);
1860 static GstClockTimeDiff
1861 get_running_time (GstSegment * segment, GstMiniObject * object, gboolean end)
1863 GstClockTimeDiff time = GST_CLOCK_STIME_NONE;
1865 if (GST_IS_BUFFER (object)) {
1866 GstBuffer *buf = GST_BUFFER_CAST (object);
1867 GstClockTime btime = GST_BUFFER_DTS_OR_PTS (buf);
1869 if (GST_CLOCK_TIME_IS_VALID (btime)) {
1870 if (end && GST_BUFFER_DURATION_IS_VALID (buf))
1871 btime += GST_BUFFER_DURATION (buf);
1872 time = my_segment_to_running_time (segment, btime);
1874 } else if (GST_IS_BUFFER_LIST (object)) {
1875 GstBufferList *list = GST_BUFFER_LIST_CAST (object);
1879 n = gst_buffer_list_length (list);
1880 for (i = 0; i < n; i++) {
1882 buf = gst_buffer_list_get (list, i);
1883 btime = GST_BUFFER_DTS_OR_PTS (buf);
1884 if (GST_CLOCK_TIME_IS_VALID (btime)) {
1885 if (end && GST_BUFFER_DURATION_IS_VALID (buf))
1886 btime += GST_BUFFER_DURATION (buf);
1887 time = my_segment_to_running_time (segment, btime);
1894 } else if (GST_IS_EVENT (object)) {
1895 GstEvent *event = GST_EVENT_CAST (object);
1897 /* For newsegment events return the running time of the start position */
1898 if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
1899 const GstSegment *new_segment;
1901 gst_event_parse_segment (event, &new_segment);
1902 if (new_segment->format == GST_FORMAT_TIME) {
1904 my_segment_to_running_time ((GstSegment *) new_segment,
1905 new_segment->start);
1907 } else if (GST_EVENT_TYPE (event) == GST_EVENT_GAP) {
1908 GstClockTime ts, dur;
1909 gst_event_parse_gap (event, &ts, &dur);
1910 if (GST_CLOCK_TIME_IS_VALID (ts)) {
1911 if (GST_CLOCK_TIME_IS_VALID (dur))
1913 time = my_segment_to_running_time (segment, ts);
1922 static GstFlowReturn
1923 gst_single_queue_push_one (GstMultiQueue * mq, GstSingleQueue * sq,
1924 GstMiniObject * object, gboolean * allow_drop)
1926 GstFlowReturn result = sq->srcresult;
1927 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
1930 GST_INFO_OBJECT (mq,
1931 "Pushing while corresponding sourcepad has been cleared");
1932 return GST_FLOW_FLUSHING;
1935 if (GST_IS_BUFFER (object)) {
1937 GstClockTime timestamp, duration;
1939 buffer = GST_BUFFER_CAST (object);
1940 timestamp = GST_BUFFER_DTS_OR_PTS (buffer);
1941 duration = GST_BUFFER_DURATION (buffer);
1943 apply_buffer (mq, sq, timestamp, duration, &sq->src_segment);
1945 /* Applying the buffer may have made the queue non-full again, unblock it if needed */
1946 gst_data_queue_limits_changed (sq->queue);
1948 if (G_UNLIKELY (*allow_drop)) {
1949 GST_DEBUG_OBJECT (mq,
1950 "SingleQueue %d : Dropping EOS buffer %p with ts %" GST_TIME_FORMAT,
1951 sq->id, buffer, GST_TIME_ARGS (timestamp));
1952 gst_buffer_unref (buffer);
1954 GST_DEBUG_OBJECT (mq,
1955 "SingleQueue %d : Pushing buffer %p with ts %" GST_TIME_FORMAT,
1956 sq->id, buffer, GST_TIME_ARGS (timestamp));
1957 result = gst_pad_push (srcpad, buffer);
1959 } else if (GST_IS_EVENT (object)) {
1962 event = GST_EVENT_CAST (object);
1964 switch (GST_EVENT_TYPE (event)) {
1965 case GST_EVENT_SEGMENT_DONE:
1966 *allow_drop = FALSE;
1969 result = GST_FLOW_EOS;
1970 if (G_UNLIKELY (*allow_drop))
1971 *allow_drop = FALSE;
1973 case GST_EVENT_STREAM_START:
1974 result = GST_FLOW_OK;
1975 if (G_UNLIKELY (*allow_drop))
1976 *allow_drop = FALSE;
1978 case GST_EVENT_SEGMENT:
1979 apply_segment (mq, sq, event, &sq->src_segment);
1980 /* Applying the segment may have made the queue non-full again, unblock it if needed */
1981 gst_data_queue_limits_changed (sq->queue);
1982 if (G_UNLIKELY (*allow_drop)) {
1983 result = GST_FLOW_OK;
1984 *allow_drop = FALSE;
1988 apply_gap (mq, sq, event, &sq->src_segment);
1989 /* Applying the gap may have made the queue non-full again, unblock it if needed */
1990 gst_data_queue_limits_changed (sq->queue);
1996 if (G_UNLIKELY (*allow_drop)) {
1997 GST_DEBUG_OBJECT (mq,
1998 "SingleQueue %d : Dropping EOS event %p of type %s",
1999 sq->id, event, GST_EVENT_TYPE_NAME (event));
2000 gst_event_unref (event);
2002 GST_DEBUG_OBJECT (mq,
2003 "SingleQueue %d : Pushing event %p of type %s",
2004 sq->id, event, GST_EVENT_TYPE_NAME (event));
2006 gst_pad_push_event (srcpad, event);
2008 } else if (GST_IS_QUERY (object)) {
2012 query = GST_QUERY_CAST (object);
2014 if (G_UNLIKELY (*allow_drop)) {
2015 GST_DEBUG_OBJECT (mq,
2016 "SingleQueue %d : Dropping EOS query %p", sq->id, query);
2017 gst_query_unref (query);
2020 res = gst_pad_peer_query (srcpad, query);
2023 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2024 sq->last_query = res;
2025 sq->last_handled_query = query;
2026 g_cond_signal (&sq->query_handled);
2027 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2029 g_warning ("Unexpected object in singlequeue %u (refcounting problem?)",
2033 gst_object_unref (srcpad);
2039 static GstMiniObject *
2040 gst_multi_queue_item_steal_object (GstMultiQueueItem * item)
2045 item->object = NULL;
2051 gst_multi_queue_item_destroy (GstMultiQueueItem * item)
2053 if (!item->is_query && item->object)
2054 gst_mini_object_unref (item->object);
2055 g_slice_free (GstMultiQueueItem, item);
2058 /* takes ownership of passed mini object! */
2059 static GstMultiQueueItem *
2060 gst_multi_queue_buffer_item_new (GstMiniObject * object, guint32 curid)
2062 GstMultiQueueItem *item;
2064 item = g_slice_new (GstMultiQueueItem);
2065 item->object = object;
2066 item->destroy = (GDestroyNotify) gst_multi_queue_item_destroy;
2067 item->posid = curid;
2068 item->is_query = GST_IS_QUERY (object);
2070 item->size = gst_buffer_get_size (GST_BUFFER_CAST (object));
2071 item->duration = GST_BUFFER_DURATION (object);
2072 if (item->duration == GST_CLOCK_TIME_NONE)
2074 item->visible = TRUE;
2078 static GstMultiQueueItem *
2079 gst_multi_queue_mo_item_new (GstMiniObject * object, guint32 curid)
2081 GstMultiQueueItem *item;
2083 item = g_slice_new (GstMultiQueueItem);
2084 item->object = object;
2085 item->destroy = (GDestroyNotify) gst_multi_queue_item_destroy;
2086 item->posid = curid;
2087 item->is_query = GST_IS_QUERY (object);
2091 item->visible = FALSE;
2095 /* Each main loop attempts to push buffers until the return value
2096 * is not-linked. not-linked pads are not allowed to push data beyond
2097 * any linked pads, so they don't 'rush ahead of the pack'.
2100 gst_multi_queue_loop (GstPad * pad)
2103 GstMultiQueueItem *item;
2104 GstDataQueueItem *sitem;
2106 GstMiniObject *object = NULL;
2108 GstFlowReturn result;
2109 GstClockTimeDiff next_time;
2110 gboolean is_buffer, is_query;
2111 gboolean do_update_buffering = FALSE;
2112 gboolean dropping = FALSE;
2113 GstPad *srcpad = NULL;
2115 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2116 mq = g_weak_ref_get (&sq->mqueue);
2117 srcpad = g_weak_ref_get (&sq->srcpad);
2123 GST_DEBUG_OBJECT (mq, "SingleQueue %d : trying to pop an object", sq->id);
2128 /* Get something from the queue, blocking until that happens, or we get
2130 if (!(gst_data_queue_pop (sq->queue, &sitem)))
2133 item = (GstMultiQueueItem *) sitem;
2134 newid = item->posid;
2136 is_query = item->is_query;
2138 /* steal the object and destroy the item */
2139 object = gst_multi_queue_item_steal_object (item);
2140 gst_multi_queue_item_destroy (item);
2142 is_buffer = GST_IS_BUFFER (object);
2144 /* Get running time of the item. Events will have GST_CLOCK_STIME_NONE */
2145 next_time = get_running_time (&sq->src_segment, object, FALSE);
2147 GST_LOG_OBJECT (mq, "SingleQueue %d : newid:%d , oldid:%d",
2148 sq->id, newid, sq->last_oldid);
2150 /* If we're not-linked, we do some extra work because we might need to
2151 * wait before pushing. If we're linked but there's a gap in the IDs,
2152 * or it's the first loop, or we just passed the previous highid,
2153 * we might need to wake some sleeping pad up, so there's extra work
2155 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2156 if (sq->srcresult == GST_FLOW_NOT_LINKED
2157 || (sq->last_oldid == G_MAXUINT32) || (newid != (sq->last_oldid + 1))
2158 || sq->last_oldid > mq->highid) {
2159 GST_LOG_OBJECT (mq, "CHECKING sq->srcresult: %s",
2160 gst_flow_get_name (sq->srcresult));
2162 /* Check again if we're flushing after the lock is taken,
2163 * the flush flag might have been changed in the meantime */
2165 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2169 /* Update the nextid so other threads know when to wake us up */
2171 /* Take into account the extra cache time since we're unlinked */
2172 if (GST_CLOCK_STIME_IS_VALID (next_time))
2173 next_time += mq->unlinked_cache_time;
2174 sq->next_time = next_time;
2176 /* Update the oldid (the last ID we output) for highid tracking */
2177 if (sq->last_oldid != G_MAXUINT32)
2178 sq->oldid = sq->last_oldid;
2180 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2181 gboolean should_wait;
2182 /* Go to sleep until it's time to push this buffer */
2184 /* Recompute the highid */
2185 compute_high_id (mq);
2186 /* Recompute the high time */
2187 compute_high_time (mq, sq->groupid);
2189 GST_DEBUG_OBJECT (mq,
2190 "groupid %d high_time %" GST_STIME_FORMAT " next_time %"
2191 GST_STIME_FORMAT, sq->groupid, GST_STIME_ARGS (sq->group_high_time),
2192 GST_STIME_ARGS (next_time));
2194 if (mq->sync_by_running_time) {
2195 if (sq->group_high_time == GST_CLOCK_STIME_NONE) {
2196 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
2197 (mq->high_time == GST_CLOCK_STIME_NONE
2198 || next_time > mq->high_time);
2200 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
2201 next_time > sq->group_high_time;
2204 should_wait = newid > mq->highid;
2206 while (should_wait && sq->srcresult == GST_FLOW_NOT_LINKED) {
2208 GST_DEBUG_OBJECT (mq,
2209 "queue %d sleeping for not-linked wakeup with "
2210 "newid %u, highid %u, next_time %" GST_STIME_FORMAT
2211 ", high_time %" GST_STIME_FORMAT, sq->id, newid, mq->highid,
2212 GST_STIME_ARGS (next_time), GST_STIME_ARGS (sq->group_high_time));
2214 /* Wake up all non-linked pads before we sleep */
2215 wake_up_next_non_linked (mq);
2218 g_cond_wait (&sq->turn, &mq->qlock);
2222 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2226 /* Recompute the high time and ID */
2227 compute_high_time (mq, sq->groupid);
2228 compute_high_id (mq);
2230 GST_DEBUG_OBJECT (mq, "queue %d woken from sleeping for not-linked "
2231 "wakeup with newid %u, highid %u, next_time %" GST_STIME_FORMAT
2232 ", high_time %" GST_STIME_FORMAT " mq high_time %" GST_STIME_FORMAT,
2233 sq->id, newid, mq->highid,
2234 GST_STIME_ARGS (next_time), GST_STIME_ARGS (sq->group_high_time),
2235 GST_STIME_ARGS (mq->high_time));
2237 if (mq->sync_by_running_time) {
2238 if (sq->group_high_time == GST_CLOCK_STIME_NONE) {
2239 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
2240 (mq->high_time == GST_CLOCK_STIME_NONE
2241 || next_time > mq->high_time);
2243 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
2244 next_time > sq->group_high_time;
2247 should_wait = newid > mq->highid;
2250 /* Re-compute the high_id in case someone else pushed */
2251 compute_high_id (mq);
2252 compute_high_time (mq, sq->groupid);
2254 compute_high_id (mq);
2255 compute_high_time (mq, sq->groupid);
2256 /* Wake up all non-linked pads */
2257 wake_up_next_non_linked (mq);
2259 /* We're done waiting, we can clear the nextid and nexttime */
2261 sq->next_time = GST_CLOCK_STIME_NONE;
2263 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2268 GST_LOG_OBJECT (mq, "sq:%d BEFORE PUSHING sq->srcresult: %s", sq->id,
2269 gst_flow_get_name (sq->srcresult));
2271 /* Update time stats */
2272 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2273 next_time = get_running_time (&sq->src_segment, object, TRUE);
2274 if (GST_CLOCK_STIME_IS_VALID (next_time)) {
2275 if (sq->last_time == GST_CLOCK_STIME_NONE || sq->last_time < next_time)
2276 sq->last_time = next_time;
2277 if (mq->high_time == GST_CLOCK_STIME_NONE || mq->high_time <= next_time) {
2278 /* Wake up all non-linked pads now that we advanced the high time */
2279 mq->high_time = next_time;
2280 wake_up_next_non_linked (mq);
2283 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2285 /* Try to push out the new object */
2286 result = gst_single_queue_push_one (mq, sq, object, &dropping);
2289 /* Check if we pushed something already and if this is
2290 * now a switch from an active to a non-active stream.
2292 * If it is, we reset all the waiting streams, let them
2293 * push another buffer to see if they're now active again.
2294 * This allows faster switching between streams and prevents
2295 * deadlocks if downstream does any waiting too.
2297 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2298 if (sq->pushed && sq->srcresult == GST_FLOW_OK
2299 && result == GST_FLOW_NOT_LINKED) {
2302 GST_LOG_OBJECT (mq, "SingleQueue %d : Changed from active to non-active",
2305 compute_high_id (mq);
2306 compute_high_time (mq, sq->groupid);
2307 do_update_buffering = TRUE;
2309 /* maybe no-one is waiting */
2310 if (mq->numwaiting > 0) {
2311 /* Else figure out which singlequeue(s) need waking up */
2312 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
2313 GstSingleQueue *sq2 = (GstSingleQueue *) tmp->data;
2315 if (sq2->srcresult == GST_FLOW_NOT_LINKED) {
2316 GST_LOG_OBJECT (mq, "Waking up singlequeue %d", sq2->id);
2317 sq2->pushed = FALSE;
2318 sq2->srcresult = GST_FLOW_OK;
2319 g_cond_signal (&sq2->turn);
2328 /* now hold on a bit;
2329 * can not simply throw this result to upstream, because
2330 * that might already be onto another segment, so we have to make
2331 * sure we are relaying the correct info wrt proper segment */
2332 if (result == GST_FLOW_EOS && !dropping &&
2333 sq->srcresult != GST_FLOW_NOT_LINKED) {
2334 GST_DEBUG_OBJECT (mq, "starting EOS drop on sq %d", sq->id);
2336 /* pretend we have not seen EOS yet for upstream's sake */
2337 result = sq->srcresult;
2338 } else if (dropping && gst_data_queue_is_empty (sq->queue)) {
2339 /* queue empty, so stop dropping
2340 * we can commit the result we have now,
2341 * which is either OK after a segment, or EOS */
2342 GST_DEBUG_OBJECT (mq, "committed EOS drop on sq %d", sq->id);
2344 result = GST_FLOW_EOS;
2346 sq->srcresult = result;
2347 sq->last_oldid = newid;
2349 if (do_update_buffering)
2350 update_buffering (mq, sq);
2352 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2353 gst_multi_queue_post_buffering (mq);
2355 GST_LOG_OBJECT (mq, "sq:%d AFTER PUSHING sq->srcresult: %s (is_eos:%d)",
2356 sq->id, gst_flow_get_name (sq->srcresult), GST_PAD_IS_EOS (srcpad));
2358 /* Need to make sure wake up any sleeping pads when we exit */
2359 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2360 if (mq->numwaiting > 0 && (GST_PAD_IS_EOS (srcpad)
2361 || sq->srcresult == GST_FLOW_EOS)) {
2362 compute_high_time (mq, sq->groupid);
2363 compute_high_id (mq);
2364 wake_up_next_non_linked (mq);
2366 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2371 if (result != GST_FLOW_OK && result != GST_FLOW_NOT_LINKED
2372 && result != GST_FLOW_EOS)
2376 gst_clear_object (&mq);
2377 gst_clear_object (&srcpad);
2383 if (object && !is_query)
2384 gst_mini_object_unref (object);
2386 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2387 sq->last_query = FALSE;
2388 g_cond_signal (&sq->query_handled);
2390 /* Post an error message if we got EOS while downstream
2391 * has returned an error flow return. After EOS there
2392 * will be no further buffer which could propagate the
2394 if ((sq->is_eos || sq->is_segment_done) && sq->srcresult < GST_FLOW_EOS) {
2395 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2396 GST_ELEMENT_FLOW_ERROR (mq, sq->srcresult);
2398 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2401 /* upstream needs to see fatal result ASAP to shut things down,
2402 * but might be stuck in one of our other full queues;
2403 * so empty this one and trigger dynamic queue growth. At
2404 * this point the srcresult is not OK, NOT_LINKED
2405 * or EOS, i.e. a real failure */
2406 gst_single_queue_flush_queue (sq, FALSE);
2407 single_queue_underrun_cb (sq->queue, sq);
2408 gst_data_queue_set_flushing (sq->queue, TRUE);
2409 gst_pad_pause_task (srcpad);
2410 GST_CAT_LOG_OBJECT (multi_queue_debug, mq,
2411 "SingleQueue[%d] task paused, reason:%s",
2412 sq->id, gst_flow_get_name (sq->srcresult));
2418 * gst_multi_queue_chain:
2420 * This is similar to GstQueue's chain function, except:
2421 * _ we don't have leak behaviours,
2422 * _ we push with a unique id (curid)
2424 static GstFlowReturn
2425 gst_multi_queue_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
2429 GstMultiQueueItem *item = NULL;
2431 GstClockTime timestamp, duration;
2433 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2434 mq = g_weak_ref_get (&sq->mqueue);
2439 /* if eos, we are always full, so avoid hanging incoming indefinitely */
2445 /* Get a unique incrementing id */
2446 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2448 timestamp = GST_BUFFER_DTS_OR_PTS (buffer);
2449 duration = GST_BUFFER_DURATION (buffer);
2452 "SingleQueue %d : about to enqueue buffer %p with id %d (pts:%"
2453 GST_TIME_FORMAT " dts:%" GST_TIME_FORMAT " dur:%" GST_TIME_FORMAT ")",
2454 sq->id, buffer, curid, GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
2455 GST_TIME_ARGS (GST_BUFFER_DTS (buffer)), GST_TIME_ARGS (duration));
2457 item = gst_multi_queue_buffer_item_new (GST_MINI_OBJECT_CAST (buffer), curid);
2459 /* Update interleave before pushing data into queue */
2460 if (mq->use_interleave) {
2461 GstClockTime val = timestamp;
2462 GstClockTimeDiff dval;
2464 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2465 if (val == GST_CLOCK_TIME_NONE)
2466 val = sq->sink_segment.position;
2467 if (duration != GST_CLOCK_TIME_NONE)
2470 dval = my_segment_to_running_time (&sq->sink_segment, val);
2471 if (GST_CLOCK_STIME_IS_VALID (dval)) {
2472 sq->cached_sinktime = dval;
2473 GST_DEBUG_OBJECT (mq,
2474 "Queue %d cached sink time now %" G_GINT64_FORMAT " %"
2475 GST_STIME_FORMAT, sq->id, sq->cached_sinktime,
2476 GST_STIME_ARGS (sq->cached_sinktime));
2477 calculate_interleave (mq, sq);
2479 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2482 if (!(gst_data_queue_push (sq->queue, (GstDataQueueItem *) item)))
2485 /* update time level, we must do this after pushing the data in the queue so
2486 * that we never end up filling the queue first. */
2487 apply_buffer (mq, sq, timestamp, duration, &sq->sink_segment);
2490 gst_clear_object (&mq);
2491 return sq->srcresult;
2496 GST_LOG_OBJECT (mq, "SingleQueue %d : exit because task paused, reason: %s",
2497 sq->id, gst_flow_get_name (sq->srcresult));
2499 gst_multi_queue_item_destroy (item);
2504 GST_DEBUG_OBJECT (mq, "we are EOS, dropping buffer, return EOS");
2505 gst_buffer_unref (buffer);
2506 gst_object_unref (mq);
2507 return GST_FLOW_EOS;
2512 gst_multi_queue_sink_activate_mode (GstPad * pad, GstObject * parent,
2513 GstPadMode mode, gboolean active)
2519 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2520 mq = (GstMultiQueue *) gst_pad_get_parent (pad);
2522 /* mq is NULL if the pad is activated/deactivated before being
2523 * added to the multiqueue */
2525 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2528 case GST_PAD_MODE_PUSH:
2530 /* All pads start off linked until they push one buffer */
2531 sq->srcresult = GST_FLOW_OK;
2533 gst_data_queue_set_flushing (sq->queue, FALSE);
2535 sq->srcresult = GST_FLOW_FLUSHING;
2536 sq->last_query = FALSE;
2537 g_cond_signal (&sq->query_handled);
2538 gst_data_queue_set_flushing (sq->queue, TRUE);
2540 /* Wait until streaming thread has finished */
2542 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2543 GST_PAD_STREAM_LOCK (pad);
2545 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2546 gst_data_queue_flush (sq->queue);
2548 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2549 GST_PAD_STREAM_UNLOCK (pad);
2551 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2561 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2562 gst_object_unref (mq);
2568 static GstFlowReturn
2569 gst_multi_queue_sink_event (GstPad * pad, GstObject * parent, GstEvent * event)
2574 GstMultiQueueItem *item;
2575 gboolean res = TRUE;
2576 GstFlowReturn flowret = GST_FLOW_OK;
2578 GstEvent *sref = NULL;
2582 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2583 mq = (GstMultiQueue *) parent;
2584 srcpad = g_weak_ref_get (&sq->srcpad);
2587 GST_INFO_OBJECT (pad, "Pushing while corresponding sourcepad has been"
2588 " removed already");
2590 return GST_FLOW_FLUSHING;
2593 type = GST_EVENT_TYPE (event);
2596 case GST_EVENT_STREAM_START:
2598 if (mq->sync_by_running_time) {
2599 GstStreamFlags stream_flags;
2600 gst_event_parse_stream_flags (event, &stream_flags);
2601 if ((stream_flags & GST_STREAM_FLAG_SPARSE)) {
2602 GST_INFO_OBJECT (mq, "SingleQueue %d is a sparse stream", sq->id);
2603 sq->is_sparse = TRUE;
2607 sq->thread = g_thread_self ();
2609 /* Remove EOS flag */
2613 case GST_EVENT_FLUSH_START:
2614 GST_DEBUG_OBJECT (mq, "SingleQueue %d : received flush start event",
2617 res = gst_pad_push_event (srcpad, event);
2619 gst_single_queue_flush (mq, sq, TRUE, FALSE);
2620 gst_single_queue_pause (mq, sq);
2623 case GST_EVENT_FLUSH_STOP:
2624 GST_DEBUG_OBJECT (mq, "SingleQueue %d : received flush stop event",
2627 res = gst_pad_push_event (srcpad, event);
2629 gst_single_queue_flush (mq, sq, FALSE, FALSE);
2630 gst_single_queue_start (mq, sq);
2633 case GST_EVENT_SEGMENT:
2634 sq->is_segment_done = FALSE;
2635 sref = gst_event_ref (event);
2638 /* take ref because the queue will take ownership and we need the event
2639 * afterwards to update the segment */
2640 sref = gst_event_ref (event);
2641 if (mq->use_interleave) {
2642 GstClockTime val, dur;
2644 gst_event_parse_gap (event, &val, &dur);
2645 if (GST_CLOCK_TIME_IS_VALID (val)) {
2646 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2647 if (GST_CLOCK_TIME_IS_VALID (dur))
2649 stime = my_segment_to_running_time (&sq->sink_segment, val);
2650 if (GST_CLOCK_STIME_IS_VALID (stime)) {
2651 sq->cached_sinktime = stime;
2652 calculate_interleave (mq, sq);
2654 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2660 if (!(GST_EVENT_IS_SERIALIZED (event))) {
2661 res = gst_pad_push_event (srcpad, event);
2667 /* if eos, we are always full, so avoid hanging incoming indefinitely */
2671 /* Get an unique incrementing id. */
2672 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2674 item = gst_multi_queue_mo_item_new ((GstMiniObject *) event, curid);
2676 GST_DEBUG_OBJECT (mq,
2677 "SingleQueue %d : Enqueuing event %p of type %s with id %d",
2678 sq->id, event, GST_EVENT_TYPE_NAME (event), curid);
2680 if (!gst_data_queue_push (sq->queue, (GstDataQueueItem *) item))
2683 /* mark EOS when we received one, we must do that after putting the
2684 * buffer in the queue because EOS marks the buffer as filled. */
2686 case GST_EVENT_SEGMENT_DONE:
2687 sq->is_segment_done = TRUE;
2688 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2689 update_buffering (mq, sq);
2690 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2691 single_queue_overrun_cb (sq->queue, sq);
2692 gst_multi_queue_post_buffering (mq);
2695 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2698 /* Post an error message if we got EOS while downstream
2699 * has returned an error flow return. After EOS there
2700 * will be no further buffer which could propagate the
2702 if (sq->srcresult < GST_FLOW_EOS) {
2703 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2704 GST_ELEMENT_FLOW_ERROR (mq, sq->srcresult);
2706 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2709 /* EOS affects the buffering state */
2710 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2711 update_buffering (mq, sq);
2712 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2713 single_queue_overrun_cb (sq->queue, sq);
2714 gst_multi_queue_post_buffering (mq);
2716 case GST_EVENT_SEGMENT:
2717 apply_segment (mq, sq, sref, &sq->sink_segment);
2718 gst_event_unref (sref);
2719 /* a new segment allows us to accept more buffers if we got EOS
2720 * from downstream */
2721 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2722 if (sq->srcresult == GST_FLOW_EOS)
2723 sq->srcresult = GST_FLOW_OK;
2724 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2728 apply_gap (mq, sq, sref, &sq->sink_segment);
2729 gst_event_unref (sref);
2736 gst_object_unref (srcpad);
2738 flowret = GST_FLOW_ERROR;
2739 GST_DEBUG_OBJECT (mq, "SingleQueue %d : returning %s", sq->id,
2740 gst_flow_get_name (flowret));
2745 gst_object_unref (srcpad);
2746 GST_LOG_OBJECT (mq, "SingleQueue %d : exit because task paused, reason: %s",
2747 sq->id, gst_flow_get_name (sq->srcresult));
2749 gst_event_unref (sref);
2750 gst_multi_queue_item_destroy (item);
2751 return sq->srcresult;
2755 gst_object_unref (srcpad);
2756 GST_DEBUG_OBJECT (mq, "we are EOS, dropping event, return GST_FLOW_EOS");
2757 gst_event_unref (event);
2758 return GST_FLOW_EOS;
2763 gst_multi_queue_sink_query (GstPad * pad, GstObject * parent, GstQuery * query)
2769 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2770 mq = (GstMultiQueue *) parent;
2772 switch (GST_QUERY_TYPE (query)) {
2774 if (GST_QUERY_IS_SERIALIZED (query)) {
2776 GstMultiQueueItem *item;
2778 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2779 if (sq->srcresult != GST_FLOW_OK)
2782 /* serialized events go in the queue. We need to be certain that we
2783 * don't cause deadlocks waiting for the query return value. We check if
2784 * the queue is empty (nothing is blocking downstream and the query can
2785 * be pushed for sure) or we are not buffering. If we are buffering,
2786 * the pipeline waits to unblock downstream until our queue fills up
2787 * completely, which can not happen if we block on the query..
2788 * Therefore we only potentially block when we are not buffering. */
2789 if (!mq->use_buffering || gst_data_queue_is_empty (sq->queue)) {
2790 /* Get an unique incrementing id. */
2791 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2793 item = gst_multi_queue_mo_item_new ((GstMiniObject *) query, curid);
2795 GST_DEBUG_OBJECT (mq,
2796 "SingleQueue %d : Enqueuing query %p of type %s with id %d",
2797 sq->id, query, GST_QUERY_TYPE_NAME (query), curid);
2798 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2799 res = gst_data_queue_push (sq->queue, (GstDataQueueItem *) item);
2800 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2801 if (!res || sq->flushing)
2803 /* it might be that the query has been taken out of the queue
2804 * while we were unlocked. So, we need to check if the last
2805 * handled query is the same one than the one we just
2806 * pushed. If it is, we don't need to wait for the condition
2807 * variable, otherwise we wait for the condition variable to
2809 while (!sq->flushing && sq->srcresult == GST_FLOW_OK
2810 && sq->last_handled_query != query)
2811 g_cond_wait (&sq->query_handled, &mq->qlock);
2812 res = sq->last_query;
2813 sq->last_handled_query = NULL;
2815 GST_DEBUG_OBJECT (mq, "refusing query, we are buffering and the "
2816 "queue is not empty");
2819 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2821 /* default handling */
2822 res = gst_pad_query_default (pad, parent, query);
2830 GST_DEBUG_OBJECT (mq, "Flushing");
2831 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2837 gst_multi_queue_src_activate_mode (GstPad * pad, GstObject * parent,
2838 GstPadMode mode, gboolean active)
2844 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2845 mq = g_weak_ref_get (&sq->mqueue);
2848 GST_ERROR_OBJECT (pad, "No multique set anymore, can't activate pad");
2853 GST_DEBUG_OBJECT (mq, "SingleQueue %d", sq->id);
2856 case GST_PAD_MODE_PUSH:
2858 gst_single_queue_flush (mq, sq, FALSE, TRUE);
2859 result = parent ? gst_single_queue_start (mq, sq) : TRUE;
2861 gst_single_queue_flush (mq, sq, TRUE, TRUE);
2862 result = gst_single_queue_stop (mq, sq);
2869 gst_object_unref (mq);
2874 gst_multi_queue_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
2876 GstSingleQueue *sq = GST_MULTIQUEUE_PAD (pad)->sq;
2877 GstMultiQueue *mq = g_weak_ref_get (&sq->mqueue);
2879 GstPad *sinkpad = g_weak_ref_get (&sq->sinkpad);
2881 if (!mq || !sinkpad) {
2882 gst_clear_object (&sinkpad);
2883 gst_clear_object (&mq);
2884 GST_INFO_OBJECT (pad, "No multique/sinkpad set anymore, flushing");
2889 switch (GST_EVENT_TYPE (event)) {
2890 case GST_EVENT_RECONFIGURE:
2891 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2892 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2893 sq->srcresult = GST_FLOW_OK;
2894 g_cond_signal (&sq->turn);
2896 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2898 ret = gst_pad_push_event (sinkpad, event);
2901 ret = gst_pad_push_event (sinkpad, event);
2905 gst_object_unref (sinkpad);
2906 gst_object_unref (mq);
2912 gst_multi_queue_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
2916 /* FIXME, Handle position offset depending on queue size */
2917 switch (GST_QUERY_TYPE (query)) {
2919 /* default handling */
2920 res = gst_pad_query_default (pad, parent, query);
2927 * Next-non-linked functions
2930 /* WITH LOCK TAKEN */
2932 wake_up_next_non_linked (GstMultiQueue * mq)
2936 /* maybe no-one is waiting */
2937 if (mq->numwaiting < 1)
2940 if (mq->sync_by_running_time && GST_CLOCK_STIME_IS_VALID (mq->high_time)) {
2941 /* Else figure out which singlequeue(s) need waking up */
2942 for (tmp = mq->queues; tmp; tmp = tmp->next) {
2943 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
2944 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2945 GstClockTimeDiff high_time;
2947 if (GST_CLOCK_STIME_IS_VALID (sq->group_high_time))
2948 high_time = sq->group_high_time;
2950 high_time = mq->high_time;
2952 if (GST_CLOCK_STIME_IS_VALID (sq->next_time) &&
2953 GST_CLOCK_STIME_IS_VALID (high_time)
2954 && sq->next_time <= high_time) {
2955 GST_LOG_OBJECT (mq, "Waking up singlequeue %d", sq->id);
2956 g_cond_signal (&sq->turn);
2961 /* Else figure out which singlequeue(s) need waking up */
2962 for (tmp = mq->queues; tmp; tmp = tmp->next) {
2963 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
2964 if (sq->srcresult == GST_FLOW_NOT_LINKED &&
2965 sq->nextid != 0 && sq->nextid <= mq->highid) {
2966 GST_LOG_OBJECT (mq, "Waking up singlequeue %d", sq->id);
2967 g_cond_signal (&sq->turn);
2973 /* WITH LOCK TAKEN */
2975 compute_high_id (GstMultiQueue * mq)
2977 /* The high-id is either the highest id among the linked pads, or if all
2978 * pads are not-linked, it's the lowest not-linked pad */
2980 guint32 lowest = G_MAXUINT32;
2981 guint32 highid = G_MAXUINT32;
2983 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
2984 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
2985 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
2988 GST_INFO_OBJECT (mq,
2989 "srcpad has been removed already... ignoring single queue");
2994 GST_LOG_OBJECT (mq, "inspecting sq:%d , nextid:%d, oldid:%d, srcresult:%s",
2995 sq->id, sq->nextid, sq->oldid, gst_flow_get_name (sq->srcresult));
2997 /* No need to consider queues which are not waiting */
2998 if (sq->nextid == 0) {
2999 GST_LOG_OBJECT (mq, "sq:%d is not waiting - ignoring", sq->id);
3000 gst_object_unref (srcpad);
3004 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
3005 if (sq->nextid < lowest)
3006 lowest = sq->nextid;
3007 } else if (!GST_PAD_IS_EOS (srcpad) && sq->srcresult != GST_FLOW_EOS) {
3008 /* If we don't have a global highid, or the global highid is lower than
3009 * this single queue's last outputted id, store the queue's one,
3010 * unless the singlequeue output is at EOS */
3011 if ((highid == G_MAXUINT32) || (sq->oldid > highid))
3014 gst_object_unref (srcpad);
3017 if (highid == G_MAXUINT32 || lowest < highid)
3018 mq->highid = lowest;
3020 mq->highid = highid;
3022 GST_LOG_OBJECT (mq, "Highid is now : %u, lowest non-linked %u", mq->highid,
3026 /* WITH LOCK TAKEN */
3028 compute_high_time (GstMultiQueue * mq, guint groupid)
3030 /* The high-time is either the highest last time among the linked
3031 * pads, or if all pads are not-linked, it's the lowest nex time of
3034 GstClockTimeDiff highest = GST_CLOCK_STIME_NONE;
3035 GstClockTimeDiff lowest = GST_CLOCK_STIME_NONE;
3036 GstClockTimeDiff group_high = GST_CLOCK_STIME_NONE;
3037 GstClockTimeDiff group_low = GST_CLOCK_STIME_NONE;
3038 GstClockTimeDiff res;
3039 /* Number of streams which belong to groupid */
3040 guint group_count = 0;
3042 if (!mq->sync_by_running_time)
3043 /* return GST_CLOCK_STIME_NONE; */
3046 for (tmp = mq->queues; tmp; tmp = tmp->next) {
3047 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
3048 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
3051 GST_INFO_OBJECT (mq,
3052 "srcpad has been removed already... ignoring single queue");
3058 "inspecting sq:%d (group:%d) , next_time:%" GST_STIME_FORMAT
3059 ", last_time:%" GST_STIME_FORMAT ", srcresult:%s", sq->id, sq->groupid,
3060 GST_STIME_ARGS (sq->next_time), GST_STIME_ARGS (sq->last_time),
3061 gst_flow_get_name (sq->srcresult));
3063 if (sq->groupid == groupid)
3066 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
3067 /* No need to consider queues which are not waiting */
3068 if (!GST_CLOCK_STIME_IS_VALID (sq->next_time)) {
3069 GST_LOG_OBJECT (mq, "sq:%d is not waiting - ignoring", sq->id);
3070 gst_object_unref (srcpad);
3074 if (lowest == GST_CLOCK_STIME_NONE || sq->next_time < lowest)
3075 lowest = sq->next_time;
3076 if (sq->groupid == groupid && (group_low == GST_CLOCK_STIME_NONE
3077 || sq->next_time < group_low))
3078 group_low = sq->next_time;
3079 } else if (!GST_PAD_IS_EOS (srcpad) && sq->srcresult != GST_FLOW_EOS) {
3080 /* If we don't have a global high time, or the global high time
3081 * is lower than this single queue's last outputted time, store
3082 * the queue's one, unless the singlequeue output is at EOS. */
3083 if (highest == GST_CLOCK_STIME_NONE
3084 || (sq->last_time != GST_CLOCK_STIME_NONE && sq->last_time > highest))
3085 highest = sq->last_time;
3086 if (sq->groupid == groupid && (group_high == GST_CLOCK_STIME_NONE
3087 || (sq->last_time != GST_CLOCK_STIME_NONE
3088 && sq->last_time > group_high)))
3089 group_high = sq->last_time;
3092 "highest now %" GST_STIME_FORMAT " lowest %" GST_STIME_FORMAT,
3093 GST_STIME_ARGS (highest), GST_STIME_ARGS (lowest));
3094 if (sq->groupid == groupid)
3096 "grouphigh %" GST_STIME_FORMAT " grouplow %" GST_STIME_FORMAT,
3097 GST_STIME_ARGS (group_high), GST_STIME_ARGS (group_low));
3099 gst_object_unref (srcpad);
3102 if (highest == GST_CLOCK_STIME_NONE)
3103 mq->high_time = lowest;
3105 mq->high_time = highest;
3107 /* If there's only one stream of a given type, use the global high */
3108 if (group_count < 2)
3109 res = GST_CLOCK_STIME_NONE;
3110 else if (group_high == GST_CLOCK_STIME_NONE)
3115 GST_LOG_OBJECT (mq, "group count %d for groupid %u", group_count, groupid);
3117 "MQ High time is now : %" GST_STIME_FORMAT ", group %d high time %"
3118 GST_STIME_FORMAT ", lowest non-linked %" GST_STIME_FORMAT,
3119 GST_STIME_ARGS (mq->high_time), groupid, GST_STIME_ARGS (mq->high_time),
3120 GST_STIME_ARGS (lowest));
3122 for (tmp = mq->queues; tmp; tmp = tmp->next) {
3123 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
3124 if (groupid == sq->groupid)
3125 sq->group_high_time = res;
3129 #define IS_FILLED(q, format, value) (((q)->max_size.format) != 0 && \
3130 ((q)->max_size.format) <= (value))
3133 * GstSingleQueue functions
3136 single_queue_overrun_cb (GstDataQueue * dq, GstSingleQueue * sq)
3139 GstDataQueueSize size;
3140 gboolean filled = TRUE;
3141 gboolean empty_found = FALSE;
3142 GstMultiQueue *mq = g_weak_ref_get (&sq->mqueue);
3145 GST_ERROR ("No multique set anymore, not doing anything");
3150 gst_data_queue_get_level (sq->queue, &size);
3153 "Single Queue %d: EOS %d, visible %u/%u, bytes %u/%u, time %"
3154 G_GUINT64_FORMAT "/%" G_GUINT64_FORMAT, sq->id, sq->is_eos, size.visible,
3155 sq->max_size.visible, size.bytes, sq->max_size.bytes, sq->cur_time,
3158 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
3160 /* check if we reached the hard time/bytes limits;
3161 time limit is only taken into account for non-sparse streams */
3162 if (sq->is_eos || IS_FILLED (sq, bytes, size.bytes) ||
3163 (!sq->is_sparse && IS_FILLED (sq, time, sq->cur_time))) {
3167 /* Search for empty queues */
3168 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
3169 GstSingleQueue *oq = (GstSingleQueue *) tmp->data;
3174 if (oq->srcresult == GST_FLOW_NOT_LINKED) {
3175 GST_LOG_OBJECT (mq, "Queue %d is not-linked", oq->id);
3179 GST_LOG_OBJECT (mq, "Checking Queue %d", oq->id);
3180 if (gst_data_queue_is_empty (oq->queue) && !oq->is_sparse) {
3181 GST_LOG_OBJECT (mq, "Queue %d is empty", oq->id);
3187 /* if hard limits are not reached then we allow one more buffer in the full
3188 * queue, but only if any of the other singelqueues are empty */
3190 if (IS_FILLED (sq, visible, size.visible)) {
3191 sq->max_size.visible = size.visible + 1;
3192 GST_DEBUG_OBJECT (mq,
3193 "Bumping single queue %d max visible to %d",
3194 sq->id, sq->max_size.visible);
3200 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
3201 gst_object_unref (mq);
3203 /* Overrun is always forwarded, since this is blocking the upstream element */
3205 GST_DEBUG_OBJECT (mq, "Queue %d is filled, signalling overrun", sq->id);
3206 g_signal_emit (mq, gst_multi_queue_signals[SIGNAL_OVERRUN], 0);
3211 single_queue_underrun_cb (GstDataQueue * dq, GstSingleQueue * sq)
3213 gboolean empty = TRUE;
3214 GstMultiQueue *mq = g_weak_ref_get (&sq->mqueue);
3218 GST_ERROR ("No multique set anymore, not doing anything");
3223 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
3224 GST_LOG_OBJECT (mq, "Single Queue %d is empty but not-linked", sq->id);
3225 gst_object_unref (mq);
3229 "Single Queue %d is empty, Checking other single queues", sq->id);
3232 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
3233 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
3234 GstSingleQueue *oq = (GstSingleQueue *) tmp->data;
3236 if (gst_data_queue_is_full (oq->queue)) {
3237 GstDataQueueSize size;
3239 gst_data_queue_get_level (oq->queue, &size);
3240 if (IS_FILLED (oq, visible, size.visible)) {
3241 oq->max_size.visible = size.visible + 1;
3242 GST_DEBUG_OBJECT (mq,
3243 "queue %d is filled, bumping its max visible to %d", oq->id,
3244 oq->max_size.visible);
3245 gst_data_queue_limits_changed (oq->queue);
3248 if (!gst_data_queue_is_empty (oq->queue) || oq->is_sparse)
3251 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
3252 gst_object_unref (mq);
3255 GST_DEBUG_OBJECT (mq, "All queues are empty, signalling it");
3256 g_signal_emit (mq, gst_multi_queue_signals[SIGNAL_UNDERRUN], 0);
3261 single_queue_check_full (GstDataQueue * dataq, guint visible, guint bytes,
3262 guint64 time, GstSingleQueue * sq)
3265 GstMultiQueue *mq = g_weak_ref_get (&sq->mqueue);
3268 GST_ERROR ("No multique set anymore, let's say we are full");
3273 GST_DEBUG_OBJECT (mq,
3274 "queue %d: visible %u/%u, bytes %u/%u, time %" G_GUINT64_FORMAT "/%"
3275 G_GUINT64_FORMAT, sq->id, visible, sq->max_size.visible, bytes,
3276 sq->max_size.bytes, sq->cur_time, sq->max_size.time);
3278 /* we are always filled on EOS */
3279 if (sq->is_eos || sq->is_segment_done) {
3284 /* we never go past the max visible items unless we are in buffering mode */
3285 if (!mq->use_buffering && IS_FILLED (sq, visible, visible)) {
3290 /* check time or bytes */
3291 res = IS_FILLED (sq, bytes, bytes);
3292 /* We only care about limits in time if we're not a sparse stream or
3293 * we're not syncing by running time */
3294 if (!sq->is_sparse || !mq->sync_by_running_time) {
3295 /* If unlinked, take into account the extra unlinked cache time */
3296 if (mq->sync_by_running_time && sq->srcresult == GST_FLOW_NOT_LINKED) {
3297 if (sq->cur_time > mq->unlinked_cache_time)
3298 res |= IS_FILLED (sq, time, sq->cur_time - mq->unlinked_cache_time);
3302 res |= IS_FILLED (sq, time, sq->cur_time);
3305 gst_object_unref (mq);
3311 gst_single_queue_flush_queue (GstSingleQueue * sq, gboolean full)
3313 GstDataQueueItem *sitem;
3314 GstMultiQueueItem *mitem;
3315 gboolean was_flushing = FALSE;
3316 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
3317 GstMultiQueue *mq = g_weak_ref_get (&sq->mqueue);
3319 while (!gst_data_queue_is_empty (sq->queue)) {
3320 GstMiniObject *data;
3322 /* FIXME: If this fails here although the queue is not empty,
3323 * we're flushing... but we want to rescue all sticky
3324 * events nonetheless.
3326 if (!gst_data_queue_pop (sq->queue, &sitem)) {
3327 was_flushing = TRUE;
3328 gst_data_queue_set_flushing (sq->queue, FALSE);
3332 mitem = (GstMultiQueueItem *) sitem;
3334 data = sitem->object;
3336 if (!full && !mitem->is_query && GST_IS_EVENT (data)
3337 && srcpad && GST_EVENT_IS_STICKY (data)
3338 && GST_EVENT_TYPE (data) != GST_EVENT_SEGMENT
3339 && GST_EVENT_TYPE (data) != GST_EVENT_EOS) {
3340 gst_pad_store_sticky_event (srcpad, GST_EVENT_CAST (data));
3343 sitem->destroy (sitem);
3345 gst_clear_object (&srcpad);
3347 gst_data_queue_flush (sq->queue);
3349 gst_data_queue_set_flushing (sq->queue, TRUE);
3352 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
3353 update_buffering (mq, sq);
3354 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
3355 gst_multi_queue_post_buffering (mq);
3356 gst_object_unref (mq);
3361 gst_single_queue_unref (GstSingleQueue * sq)
3363 if (g_atomic_int_dec_and_test (&sq->refcount)) {
3365 gst_data_queue_flush (sq->queue);
3366 g_object_unref (sq->queue);
3367 g_cond_clear (&sq->turn);
3368 g_cond_clear (&sq->query_handled);
3369 g_weak_ref_clear (&sq->sinkpad);
3370 g_weak_ref_clear (&sq->srcpad);
3371 g_weak_ref_clear (&sq->mqueue);
3377 static GstSingleQueue *
3378 gst_single_queue_ref (GstSingleQueue * squeue)
3380 g_atomic_int_inc (&squeue->refcount);
3385 static GstSingleQueue *
3386 gst_single_queue_new (GstMultiQueue * mqueue, guint id)
3388 GstPad *srcpad, *sinkpad;
3390 GstPadTemplate *templ;
3393 guint temp_id = (id == -1) ? 0 : id;
3395 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
3397 /* Find an unused queue ID, if possible the passed one */
3398 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
3399 GstSingleQueue *sq2 = (GstSingleQueue *) tmp->data;
3400 /* This works because the IDs are sorted in ascending order */
3401 if (sq2->id == temp_id) {
3402 /* If this ID was requested by the caller return NULL,
3403 * otherwise just get us the next one */
3405 temp_id = sq2->id + 1;
3407 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
3410 } else if (sq2->id > temp_id) {
3415 sq = g_new0 (GstSingleQueue, 1);
3416 g_atomic_int_set (&sq->refcount, 1);
3420 sq->groupid = DEFAULT_PAD_GROUP_ID;
3421 sq->group_high_time = GST_CLOCK_STIME_NONE;
3423 mqueue->queues = g_list_insert_before (mqueue->queues, tmp, sq);
3424 mqueue->queues_cookie++;
3426 /* copy over max_size and extra_size so we don't need to take the lock
3427 * any longer when checking if the queue is full. */
3428 sq->max_size.visible = mqueue->max_size.visible;
3429 sq->max_size.bytes = mqueue->max_size.bytes;
3430 sq->max_size.time = mqueue->max_size.time;
3432 sq->extra_size.visible = mqueue->extra_size.visible;
3433 sq->extra_size.bytes = mqueue->extra_size.bytes;
3434 sq->extra_size.time = mqueue->extra_size.time;
3436 GST_DEBUG_OBJECT (mqueue, "Creating GstSingleQueue id:%d", sq->id);
3438 g_weak_ref_init (&sq->mqueue, mqueue);
3439 sq->srcresult = GST_FLOW_FLUSHING;
3441 sq->queue = gst_data_queue_new ((GstDataQueueCheckFullFunction)
3442 single_queue_check_full,
3443 (GstDataQueueFullCallback) single_queue_overrun_cb,
3444 (GstDataQueueEmptyCallback) single_queue_underrun_cb, sq);
3446 sq->is_sparse = FALSE;
3447 sq->flushing = FALSE;
3449 gst_segment_init (&sq->sink_segment, GST_FORMAT_TIME);
3450 gst_segment_init (&sq->src_segment, GST_FORMAT_TIME);
3454 sq->next_time = GST_CLOCK_STIME_NONE;
3455 sq->last_time = GST_CLOCK_STIME_NONE;
3456 g_cond_init (&sq->turn);
3457 g_cond_init (&sq->query_handled);
3459 sq->sinktime = GST_CLOCK_STIME_NONE;
3460 sq->srctime = GST_CLOCK_STIME_NONE;
3461 sq->sink_tainted = TRUE;
3462 sq->src_tainted = TRUE;
3464 name = g_strdup_printf ("sink_%u", sq->id);
3465 templ = gst_static_pad_template_get (&sinktemplate);
3466 sinkpad = g_object_new (GST_TYPE_MULTIQUEUE_PAD, "name", name,
3467 "direction", templ->direction, "template", templ, NULL);
3468 g_weak_ref_init (&sq->sinkpad, sinkpad);
3469 gst_object_unref (templ);
3472 GST_MULTIQUEUE_PAD (sinkpad)->sq = sq;
3474 gst_pad_set_chain_function (sinkpad,
3475 GST_DEBUG_FUNCPTR (gst_multi_queue_chain));
3476 gst_pad_set_activatemode_function (sinkpad,
3477 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_activate_mode));
3478 gst_pad_set_event_full_function (sinkpad,
3479 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_event));
3480 gst_pad_set_query_function (sinkpad,
3481 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_query));
3482 gst_pad_set_iterate_internal_links_function (sinkpad,
3483 GST_DEBUG_FUNCPTR (gst_multi_queue_iterate_internal_links));
3484 GST_OBJECT_FLAG_SET (sinkpad, GST_PAD_FLAG_PROXY_CAPS);
3486 name = g_strdup_printf ("src_%u", sq->id);
3487 templ = gst_static_pad_template_get (&srctemplate);
3488 srcpad = g_object_new (GST_TYPE_MULTIQUEUE_PAD, "name", name,
3489 "direction", templ->direction, "template", templ, NULL);
3490 g_weak_ref_init (&sq->srcpad, srcpad);
3491 gst_object_unref (templ);
3494 GST_MULTIQUEUE_PAD (srcpad)->sq = gst_single_queue_ref (sq);
3496 gst_pad_set_activatemode_function (srcpad,
3497 GST_DEBUG_FUNCPTR (gst_multi_queue_src_activate_mode));
3498 gst_pad_set_event_function (srcpad,
3499 GST_DEBUG_FUNCPTR (gst_multi_queue_src_event));
3500 gst_pad_set_query_function (srcpad,
3501 GST_DEBUG_FUNCPTR (gst_multi_queue_src_query));
3502 gst_pad_set_iterate_internal_links_function (srcpad,
3503 GST_DEBUG_FUNCPTR (gst_multi_queue_iterate_internal_links));
3504 GST_OBJECT_FLAG_SET (srcpad, GST_PAD_FLAG_PROXY_CAPS);
3506 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
3508 /* only activate the pads when we are not in the NULL state
3509 * and add the pad under the state_lock to prevent state changes
3510 * between activating and adding */
3511 g_rec_mutex_lock (GST_STATE_GET_LOCK (mqueue));
3512 if (GST_STATE_TARGET (mqueue) != GST_STATE_NULL) {
3513 gst_pad_set_active (srcpad, TRUE);
3514 gst_pad_set_active (sinkpad, TRUE);
3516 gst_element_add_pad (GST_ELEMENT (mqueue), srcpad);
3517 gst_element_add_pad (GST_ELEMENT (mqueue), sinkpad);
3518 if (GST_STATE_TARGET (mqueue) != GST_STATE_NULL) {
3519 gst_single_queue_start (mqueue, sq);
3521 g_rec_mutex_unlock (GST_STATE_GET_LOCK (mqueue));
3523 GST_DEBUG_OBJECT (mqueue, "GstSingleQueue [%d] created and pads added",