2 * Copyright (C) 2006 Edward Hervey <edward@fluendo.com>
3 * Copyright (C) 2007 Jan Schmidt <jan@fluendo.com>
4 * Copyright (C) 2007 Wim Taymans <wim@fluendo.com>
5 * Copyright (C) 2011 Sebastian Dröge <sebastian.droege@collabora.co.uk>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Library General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Library General Public License for more details.
19 * You should have received a copy of the GNU Library General Public
20 * License along with this library; if not, write to the
21 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
22 * Boston, MA 02110-1301, USA.
26 * SECTION:element-multiqueue
28 * @see_also: #GstQueue
30 * Multiqueue is similar to a normal #GstQueue with the following additional
33 * 1) Multiple streamhandling
35 * * The element handles queueing data on more than one stream at once. To
36 * achieve such a feature it has request sink pads (sink%u) and
37 * 'sometimes' src pads (src%u). When requesting a given sinkpad with gst_element_request_pad(),
38 * the associated srcpad for that stream will be created.
39 * Example: requesting sink1 will generate src1.
41 * 2) Non-starvation on multiple stream
43 * * If more than one stream is used with the element, the streams' queues
44 * will be dynamically grown (up to a limit), in order to ensure that no
45 * stream is risking data starvation. This guarantees that at any given
46 * time there are at least N bytes queued and available for each individual
47 * stream. If an EOS event comes through a srcpad, the associated queue will be
48 * considered as 'not-empty' in the queue-size-growing algorithm.
50 * 3) Non-linked srcpads graceful handling
52 * * In order to better support dynamic switching between streams, the multiqueue
53 * (unlike the current GStreamer queue) continues to push buffers on non-linked
54 * pads rather than shutting down. In addition, to prevent a non-linked stream from very quickly consuming all
55 * available buffers and thus 'racing ahead' of the other streams, the element
56 * must ensure that buffers and inlined events for a non-linked stream are pushed
57 * in the same order as they were received, relative to the other streams
58 * controlled by the element. This means that a buffer cannot be pushed to a
59 * non-linked pad any sooner than buffers in any other stream which were received
62 * Data is queued until one of the limits specified by the
63 * #GstMultiQueue:max-size-buffers, #GstMultiQueue:max-size-bytes and/or
64 * #GstMultiQueue:max-size-time properties has been reached. Any attempt to push
65 * more buffers into the queue will block the pushing thread until more space
66 * becomes available. #GstMultiQueue:extra-size-buffers,
69 * #GstMultiQueue:extra-size-bytes and #GstMultiQueue:extra-size-time are
72 * The default queue size limits are 5 buffers, 10MB of data, or
73 * two second worth of data, whichever is reached first. Note that the number
74 * of buffers will dynamically grow depending on the fill level of
77 * The #GstMultiQueue::underrun signal is emitted when all of the queues
78 * are empty. The #GstMultiQueue::overrun signal is emitted when one of the
80 * Both signals are emitted from the context of the streaming thread.
82 * When using #GstMultiQueue:sync-by-running-time the unlinked streams will
83 * be throttled by the highest running-time of linked streams. This allows
84 * further relinking of those unlinked streams without them being in the
85 * future (i.e. to achieve gapless playback).
86 * When dealing with streams which have got different consumption requirements
87 * downstream (ex: video decoders which will consume more buffer (in time) than
88 * audio decoders), it is recommended to group streams of the same type
89 * by using the pad "group-id" property. This will further throttle streams
90 * in time within that group.
98 #include <gst/glib-compat-private.h>
101 #include "gstmultiqueue.h"
102 #include "gstcoreelementselements.h"
105 * @sinkpad: associated sink #GstPad
106 * @srcpad: associated source #GstPad
108 * Structure containing all information and properties about
111 typedef struct _GstSingleQueue GstSingleQueue;
113 struct _GstSingleQueue
117 /* unique identifier of the queue */
119 /* group of streams to which this queue belongs to */
122 #ifndef GST_DISABLE_GST_DEBUG
123 /* debug identifier */
126 GstClockTimeDiff group_high_time;
132 /* flowreturn of previous srcpad push */
133 GstFlowReturn srcresult;
134 /* If something was actually pushed on
135 * this pad after flushing/pad activation
136 * and the srcresult corresponds to something
142 GstSegment sink_segment;
143 GstSegment src_segment;
144 gboolean has_src_segment; /* preferred over initializing the src_segment to
145 * UNDEFINED as this doesn't requires adding ifs
146 * in every segment usage */
148 /* position of src/sink */
149 GstClockTimeDiff sinktime, srctime;
150 /* cached input value, used for interleave */
151 GstClockTimeDiff cached_sinktime;
152 /* TRUE if either position needs to be recalculated */
153 gboolean sink_tainted, src_tainted;
155 /* stream group id */
156 guint32 sink_stream_gid;
157 guint32 src_stream_gid;
159 /* TRUE if the stream group-id changed. Resetted to FALSE the next time the
160 * segment is calculated */
161 gboolean sink_stream_gid_changed;
162 gboolean src_stream_gid_changed;
166 GstDataQueueSize max_size, extra_size;
167 GstClockTime cur_time;
169 gboolean is_segment_done;
174 /* Protected by global lock */
175 guint32 nextid; /* ID of the next object waiting to be pushed */
176 guint32 oldid; /* ID of the last object pushed (last in a series) */
177 guint32 last_oldid; /* Previously observed old_id, reset to MAXUINT32 on flush */
178 GstClockTimeDiff next_time; /* End running time of next buffer to be pushed */
179 GstClockTimeDiff last_time; /* Start running time of last pushed buffer */
180 GCond turn; /* SingleQueue turn waiting conditional */
182 /* for serialized queries */
185 GstQuery *last_handled_query;
187 /* For interleave calculation */
188 GThread *thread; /* Streaming thread of SingleQueue */
189 GstClockTime interleave; /* Calculated interleve within the thread */
192 /* Extension of GstDataQueueItem structure for our usage */
193 typedef struct _GstMultiQueueItem GstMultiQueueItem;
195 struct _GstMultiQueueItem
197 GstMiniObject *object;
202 GDestroyNotify destroy;
208 static GstSingleQueue *gst_single_queue_new (GstMultiQueue * mqueue, guint id);
209 static void gst_single_queue_unref (GstSingleQueue * squeue);
210 static GstSingleQueue *gst_single_queue_ref (GstSingleQueue * squeue);
212 static void wake_up_next_non_linked (GstMultiQueue * mq);
213 static void compute_high_id (GstMultiQueue * mq);
214 static void compute_high_time (GstMultiQueue * mq, guint groupid);
215 static void single_queue_overrun_cb (GstDataQueue * dq, GstSingleQueue * sq);
216 static void single_queue_underrun_cb (GstDataQueue * dq, GstSingleQueue * sq);
218 static void update_buffering (GstMultiQueue * mq, GstSingleQueue * sq);
219 static void gst_multi_queue_post_buffering (GstMultiQueue * mq);
220 static void recheck_buffering_status (GstMultiQueue * mq);
222 static void gst_single_queue_flush_queue (GstSingleQueue * sq, gboolean full);
224 static void calculate_interleave (GstMultiQueue * mq, GstSingleQueue * sq);
226 static GstStaticPadTemplate sinktemplate = GST_STATIC_PAD_TEMPLATE ("sink_%u",
229 GST_STATIC_CAPS_ANY);
231 static GstStaticPadTemplate srctemplate = GST_STATIC_PAD_TEMPLATE ("src_%u",
234 GST_STATIC_CAPS_ANY);
236 GST_DEBUG_CATEGORY_STATIC (multi_queue_debug);
237 #define GST_CAT_DEFAULT (multi_queue_debug)
239 /* Signals and args */
247 /* default limits, we try to keep up to 2 seconds of data and if there is not
248 * time, up to 10 MB. The number of buffers is dynamically scaled to make sure
249 * there is data in the queues. Normally, the byte and time limits are not hit
250 * in theses conditions. */
251 #define DEFAULT_MAX_SIZE_BYTES 10 * 1024 * 1024 /* 10 MB */
252 #define DEFAULT_MAX_SIZE_BUFFERS 5
253 #define DEFAULT_MAX_SIZE_TIME 2 * GST_SECOND
255 /* second limits. When we hit one of the above limits we are probably dealing
256 * with a badly muxed file and we scale the limits to these emergency values.
257 * This is currently not yet implemented.
258 * Since we dynamically scale the queue buffer size up to the limits but avoid
259 * going above the max-size-buffers when we can, we don't really need this
260 * additional extra size. */
261 #define DEFAULT_EXTRA_SIZE_BYTES 10 * 1024 * 1024 /* 10 MB */
262 #define DEFAULT_EXTRA_SIZE_BUFFERS 5
263 #ifdef TIZEN_FEATURE_MQ_MODIFICATION_EXTRA_SIZE_TIME
264 #define DEFAULT_EXTRA_SIZE_TIME 10 * GST_SECOND
266 #define DEFAULT_EXTRA_SIZE_TIME 3 * GST_SECOND
269 #define DEFAULT_USE_BUFFERING FALSE
270 #define DEFAULT_LOW_WATERMARK 0.01
271 #define DEFAULT_HIGH_WATERMARK 0.99
272 #define DEFAULT_SYNC_BY_RUNNING_TIME FALSE
273 #define DEFAULT_USE_INTERLEAVE FALSE
274 #define DEFAULT_UNLINKED_CACHE_TIME 250 * GST_MSECOND
276 #define DEFAULT_MINIMUM_INTERLEAVE (250 * GST_MSECOND)
281 PROP_EXTRA_SIZE_BYTES,
282 PROP_EXTRA_SIZE_BUFFERS,
283 PROP_EXTRA_SIZE_TIME,
285 PROP_MAX_SIZE_BUFFERS,
287 #ifdef TIZEN_FEATURE_MQ_MODIFICATION
288 PROP_CURR_SIZE_BYTES,
295 PROP_SYNC_BY_RUNNING_TIME,
297 PROP_UNLINKED_CACHE_TIME,
298 PROP_MINIMUM_INTERLEAVE,
303 /* Explanation for buffer levels and percentages:
305 * The buffering_level functions here return a value in a normalized range
306 * that specifies the current fill level of a queue. The range goes from 0 to
307 * MAX_BUFFERING_LEVEL. The low/high watermarks also use this same range.
309 * This is not to be confused with the buffering_percent value, which is
310 * a *relative* quantity - relative to the low/high watermarks.
311 * buffering_percent = 0% means overall buffering_level is at the low watermark.
312 * buffering_percent = 100% means overall buffering_level is at the high watermark.
313 * buffering_percent is used for determining if the fill level has reached
314 * the high watermark, and for producing BUFFERING messages. This value
315 * always uses a 0..100 range (since it is a percentage).
317 * To avoid future confusions, whenever "buffering level" is mentioned, it
318 * refers to the absolute level which is in the 0..MAX_BUFFERING_LEVEL
319 * range. Whenever "buffering_percent" is mentioned, it refers to the
320 * percentage value that is relative to the low/high watermark. */
322 /* Using a buffering level range of 0..1000000 to allow for a
323 * resolution in ppm (1 ppm = 0.0001%) */
324 #define MAX_BUFFERING_LEVEL 1000000
326 /* How much 1% makes up in the buffer level range */
327 #define BUF_LEVEL_PERCENT_FACTOR ((MAX_BUFFERING_LEVEL) / 100)
329 /* GstMultiQueuePad */
331 #define DEFAULT_PAD_GROUP_ID 0
337 PROP_CURRENT_LEVEL_BUFFERS,
338 PROP_CURRENT_LEVEL_BYTES,
339 PROP_CURRENT_LEVEL_TIME,
342 #define GST_TYPE_MULTIQUEUE_PAD (gst_multiqueue_pad_get_type())
343 #define GST_MULTIQUEUE_PAD(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_MULTIQUEUE_PAD,GstMultiQueuePad))
344 #define GST_IS_MULTIQUEUE_PAD(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_MULTIQUEUE_PAD))
345 #define GST_MULTIQUEUE_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass) ,GST_TYPE_MULTIQUEUE_PAD,GstMultiQueuePadClass))
346 #define GST_IS_MULTIQUEUE_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass) ,GST_TYPE_MULTIQUEUE_PAD))
347 #define GST_MULTIQUEUE_PAD_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS((obj) ,GST_TYPE_MULTIQUEUE_PAD,GstMultiQueuePadClass))
349 #define GST_MULTI_QUEUE_MUTEX_LOCK(q) G_STMT_START { \
350 g_mutex_lock (&q->qlock); \
353 #define GST_MULTI_QUEUE_MUTEX_UNLOCK(q) G_STMT_START { \
354 g_mutex_unlock (&q->qlock); \
357 #define SET_PERCENT(mq, perc) G_STMT_START { \
358 if (perc != mq->buffering_percent) { \
359 mq->buffering_percent = perc; \
360 mq->buffering_percent_changed = TRUE; \
361 GST_DEBUG_OBJECT (mq, "buffering %d percent", perc); \
365 struct _GstMultiQueuePad
372 struct _GstMultiQueuePadClass
374 GstPadClass parent_class;
377 GType gst_multiqueue_pad_get_type (void);
379 G_DEFINE_TYPE (GstMultiQueuePad, gst_multiqueue_pad, GST_TYPE_PAD);
382 gst_multiqueue_pad_get_group_id (GstMultiQueuePad * pad)
390 mq = g_weak_ref_get (&pad->sq->mqueue);
393 GST_OBJECT_LOCK (mq);
396 ret = pad->sq->groupid;
399 GST_OBJECT_UNLOCK (mq);
400 gst_object_unref (mq);
407 gst_multiqueue_pad_get_current_level_buffers (GstMultiQueuePad * pad)
409 GstSingleQueue *sq = pad->sq;
410 GstDataQueueSize level;
416 mq = g_weak_ref_get (&pad->sq->mqueue);
419 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
422 gst_data_queue_get_level (sq->queue, &level);
425 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
426 gst_object_unref (mq);
429 return level.visible;
433 gst_multiqueue_pad_get_current_level_bytes (GstMultiQueuePad * pad)
435 GstSingleQueue *sq = pad->sq;
436 GstDataQueueSize level;
442 mq = g_weak_ref_get (&pad->sq->mqueue);
445 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
448 gst_data_queue_get_level (sq->queue, &level);
451 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
452 gst_object_unref (mq);
459 gst_multiqueue_pad_get_current_level_time (GstMultiQueuePad * pad)
461 GstSingleQueue *sq = pad->sq;
468 mq = g_weak_ref_get (&pad->sq->mqueue);
471 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
477 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
478 gst_object_unref (mq);
485 gst_multiqueue_pad_get_property (GObject * object, guint prop_id,
486 GValue * value, GParamSpec * pspec)
488 GstMultiQueuePad *pad = GST_MULTIQUEUE_PAD (object);
491 case PROP_PAD_GROUP_ID:
492 g_value_set_uint (value, gst_multiqueue_pad_get_group_id (pad));
494 case PROP_CURRENT_LEVEL_BUFFERS:{
495 g_value_set_uint (value,
496 gst_multiqueue_pad_get_current_level_buffers (pad));
499 case PROP_CURRENT_LEVEL_BYTES:{
500 g_value_set_uint (value,
501 gst_multiqueue_pad_get_current_level_bytes (pad));
504 case PROP_CURRENT_LEVEL_TIME:{
505 g_value_set_uint64 (value,
506 gst_multiqueue_pad_get_current_level_time (pad));
510 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
516 gst_multiqueue_pad_set_property (GObject * object, guint prop_id,
517 const GValue * value, GParamSpec * pspec)
519 GstMultiQueuePad *pad = GST_MULTIQUEUE_PAD (object);
522 case PROP_PAD_GROUP_ID:
524 GstMultiQueue *mqueue = g_weak_ref_get (&pad->sq->mqueue);
527 GST_OBJECT_LOCK (mqueue);
529 pad->sq->groupid = g_value_get_uint (value);
532 GST_OBJECT_UNLOCK (mqueue);
533 gst_object_unref (mqueue);
538 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
544 gst_multiqueue_pad_finalize (GObject * object)
546 GstMultiQueuePad *pad = GST_MULTIQUEUE_PAD (object);
549 gst_single_queue_unref (pad->sq);
551 G_OBJECT_CLASS (gst_multiqueue_pad_parent_class)->finalize (object);
555 gst_multiqueue_pad_class_init (GstMultiQueuePadClass * klass)
557 GObjectClass *gobject_class = (GObjectClass *) klass;
559 gobject_class->set_property = gst_multiqueue_pad_set_property;
560 gobject_class->get_property = gst_multiqueue_pad_get_property;
561 gobject_class->finalize = gst_multiqueue_pad_finalize;
564 * GstMultiQueuePad:group-id:
566 * Group to which this pad belongs.
570 g_object_class_install_property (gobject_class, PROP_PAD_GROUP_ID,
571 g_param_spec_uint ("group-id", "Group ID",
572 "Group to which this pad belongs", 0, G_MAXUINT32,
573 DEFAULT_PAD_GROUP_ID, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
576 * GstMultiQueuePad:current-level-buffers:
578 * The corresponding queue's current level of buffers.
582 g_object_class_install_property (gobject_class, PROP_CURRENT_LEVEL_BUFFERS,
583 g_param_spec_uint ("current-level-buffers", "Current level buffers",
584 "Current level buffers", 0, G_MAXUINT32,
585 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
588 * GstMultiQueuePad:current-level-bytes:
590 * The corresponding queue's current level of bytes.
594 g_object_class_install_property (gobject_class, PROP_CURRENT_LEVEL_BYTES,
595 g_param_spec_uint ("current-level-bytes", "Current level bytes",
596 "Current level bytes", 0, G_MAXUINT32,
597 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
600 * GstMultiQueuePad:current-level-time:
602 * The corresponding queue's current level of time.
606 g_object_class_install_property (gobject_class, PROP_CURRENT_LEVEL_TIME,
607 g_param_spec_uint64 ("current-level-time", "Current level time",
608 "Current level time", 0, G_MAXUINT64,
609 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
613 gst_multiqueue_pad_init (GstMultiQueuePad * pad)
619 /* Convenience function */
620 static inline GstClockTimeDiff
621 my_segment_to_running_time (GstSegment * segment, GstClockTime val)
623 GstClockTimeDiff res = GST_CLOCK_STIME_NONE;
625 if (GST_CLOCK_TIME_IS_VALID (val)) {
627 gst_segment_to_running_time_full (segment, GST_FORMAT_TIME, val, &val);
636 static void gst_multi_queue_finalize (GObject * object);
637 static void gst_multi_queue_set_property (GObject * object,
638 guint prop_id, const GValue * value, GParamSpec * pspec);
639 static void gst_multi_queue_get_property (GObject * object,
640 guint prop_id, GValue * value, GParamSpec * pspec);
642 static GstPad *gst_multi_queue_request_new_pad (GstElement * element,
643 GstPadTemplate * temp, const gchar * name, const GstCaps * caps);
644 static void gst_multi_queue_release_pad (GstElement * element, GstPad * pad);
645 static GstStateChangeReturn gst_multi_queue_change_state (GstElement *
646 element, GstStateChange transition);
648 static void gst_multi_queue_loop (GstPad * pad);
651 GST_DEBUG_CATEGORY_INIT (multi_queue_debug, "multiqueue", 0, "multiqueue element");
652 #define gst_multi_queue_parent_class parent_class
653 G_DEFINE_TYPE_WITH_CODE (GstMultiQueue, gst_multi_queue, GST_TYPE_ELEMENT,
655 GST_ELEMENT_REGISTER_DEFINE (multiqueue, "multiqueue", GST_RANK_NONE,
656 GST_TYPE_MULTI_QUEUE);
658 static guint gst_multi_queue_signals[LAST_SIGNAL] = { 0 };
661 gst_multi_queue_class_init (GstMultiQueueClass * klass)
663 GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
664 GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
666 gobject_class->set_property = gst_multi_queue_set_property;
667 gobject_class->get_property = gst_multi_queue_get_property;
672 * GstMultiQueue::underrun:
673 * @multiqueue: the multiqueue instance
675 * This signal is emitted from the streaming thread when there is
676 * no data in any of the queues inside the multiqueue instance (underrun).
678 * This indicates either starvation or EOS from the upstream data sources.
680 gst_multi_queue_signals[SIGNAL_UNDERRUN] =
681 g_signal_new ("underrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
682 G_STRUCT_OFFSET (GstMultiQueueClass, underrun), NULL, NULL,
683 NULL, G_TYPE_NONE, 0);
686 * GstMultiQueue::overrun:
687 * @multiqueue: the multiqueue instance
689 * Reports that one of the queues in the multiqueue is full (overrun).
690 * A queue is full if the total amount of data inside it (num-buffers, time,
691 * size) is higher than the boundary values which can be set through the
692 * GObject properties.
694 * This can be used as an indicator of pre-roll.
696 gst_multi_queue_signals[SIGNAL_OVERRUN] =
697 g_signal_new ("overrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
698 G_STRUCT_OFFSET (GstMultiQueueClass, overrun), NULL, NULL,
699 NULL, G_TYPE_NONE, 0);
703 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_BYTES,
704 g_param_spec_uint ("max-size-bytes", "Max. size (kB)",
705 "Max. amount of data in the queue (bytes, 0=disable)",
706 0, G_MAXUINT, DEFAULT_MAX_SIZE_BYTES,
707 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
708 G_PARAM_STATIC_STRINGS));
709 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_BUFFERS,
710 g_param_spec_uint ("max-size-buffers", "Max. size (buffers)",
711 "Max. number of buffers in the queue (0=disable)", 0, G_MAXUINT,
712 DEFAULT_MAX_SIZE_BUFFERS,
713 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
714 G_PARAM_STATIC_STRINGS));
715 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_TIME,
716 g_param_spec_uint64 ("max-size-time", "Max. size (ns)",
717 "Max. amount of data in the queue (in ns, 0=disable)", 0, G_MAXUINT64,
718 DEFAULT_MAX_SIZE_TIME, G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
719 G_PARAM_STATIC_STRINGS));
720 #ifdef TIZEN_FEATURE_MQ_MODIFICATION
721 g_object_class_install_property (gobject_class, PROP_CURR_SIZE_BYTES,
722 g_param_spec_uint ("curr-size-bytes", "Current buffered size (kB)",
723 "buffered amount of data in the queue (bytes)", 0, G_MAXUINT,
724 0, G_PARAM_READABLE | GST_PARAM_MUTABLE_PLAYING |
725 G_PARAM_STATIC_STRINGS));
727 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_BYTES,
728 g_param_spec_uint ("extra-size-bytes", "Extra Size (kB)",
729 "Amount of data the queues can grow if one of them is empty (bytes, 0=disable)"
730 " (NOT IMPLEMENTED)",
731 0, G_MAXUINT, DEFAULT_EXTRA_SIZE_BYTES,
732 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
733 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_BUFFERS,
734 g_param_spec_uint ("extra-size-buffers", "Extra Size (buffers)",
735 "Amount of buffers the queues can grow if one of them is empty (0=disable)"
736 " (NOT IMPLEMENTED)",
737 0, G_MAXUINT, DEFAULT_EXTRA_SIZE_BUFFERS,
738 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
739 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_TIME,
740 g_param_spec_uint64 ("extra-size-time", "Extra Size (ns)",
741 "Amount of time the queues can grow if one of them is empty (in ns, 0=disable)"
742 " (NOT IMPLEMENTED)",
743 0, G_MAXUINT64, DEFAULT_EXTRA_SIZE_TIME,
744 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
747 * GstMultiQueue:use-buffering:
749 * Enable the buffering option in multiqueue so that BUFFERING messages are
750 * emitted based on low-/high-percent thresholds.
752 g_object_class_install_property (gobject_class, PROP_USE_BUFFERING,
753 g_param_spec_boolean ("use-buffering", "Use buffering",
754 "Emit GST_MESSAGE_BUFFERING based on low-/high-percent thresholds "
755 "(0% = low-watermark, 100% = high-watermark)",
756 DEFAULT_USE_BUFFERING, G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
757 G_PARAM_STATIC_STRINGS));
759 * GstMultiQueue:low-percent:
761 * Low threshold percent for buffering to start.
763 g_object_class_install_property (gobject_class, PROP_LOW_PERCENT,
764 g_param_spec_int ("low-percent", "Low percent",
765 "Low threshold for buffering to start. Only used if use-buffering is True "
766 "(Deprecated: use low-watermark instead)",
767 0, 100, DEFAULT_LOW_WATERMARK * 100,
768 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
770 * GstMultiQueue:high-percent:
772 * High threshold percent for buffering to finish.
774 g_object_class_install_property (gobject_class, PROP_HIGH_PERCENT,
775 g_param_spec_int ("high-percent", "High percent",
776 "High threshold for buffering to finish. Only used if use-buffering is True "
777 "(Deprecated: use high-watermark instead)",
778 0, 100, DEFAULT_HIGH_WATERMARK * 100,
779 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
781 * GstMultiQueue:low-watermark:
783 * Low threshold watermark for buffering to start.
787 g_object_class_install_property (gobject_class, PROP_LOW_WATERMARK,
788 g_param_spec_double ("low-watermark", "Low watermark",
789 "Low threshold for buffering to start. Only used if use-buffering is True",
790 0.0, 1.0, DEFAULT_LOW_WATERMARK,
791 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
793 * GstMultiQueue:high-watermark:
795 * High threshold watermark for buffering to finish.
799 g_object_class_install_property (gobject_class, PROP_HIGH_WATERMARK,
800 g_param_spec_double ("high-watermark", "High watermark",
801 "High threshold for buffering to finish. Only used if use-buffering is True",
802 0.0, 1.0, DEFAULT_HIGH_WATERMARK,
803 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
806 * GstMultiQueue:sync-by-running-time:
808 * If enabled multiqueue will synchronize deactivated or not-linked streams
809 * to the activated and linked streams by taking the running time.
810 * Otherwise multiqueue will synchronize the deactivated or not-linked
811 * streams by keeping the order in which buffers and events arrived compared
812 * to active and linked streams.
814 g_object_class_install_property (gobject_class, PROP_SYNC_BY_RUNNING_TIME,
815 g_param_spec_boolean ("sync-by-running-time", "Sync By Running Time",
816 "Synchronize deactivated or not-linked streams by running time",
817 DEFAULT_SYNC_BY_RUNNING_TIME,
818 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
820 g_object_class_install_property (gobject_class, PROP_USE_INTERLEAVE,
821 g_param_spec_boolean ("use-interleave", "Use interleave",
822 "Adjust time limits based on input interleave",
823 DEFAULT_USE_INTERLEAVE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
825 g_object_class_install_property (gobject_class, PROP_UNLINKED_CACHE_TIME,
826 g_param_spec_uint64 ("unlinked-cache-time", "Unlinked cache time (ns)",
827 "Extra buffering in time for unlinked streams (if 'sync-by-running-time')",
828 0, G_MAXUINT64, DEFAULT_UNLINKED_CACHE_TIME,
829 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
830 G_PARAM_STATIC_STRINGS));
832 g_object_class_install_property (gobject_class, PROP_MINIMUM_INTERLEAVE,
833 g_param_spec_uint64 ("min-interleave-time", "Minimum interleave time",
834 "Minimum extra buffering for deinterleaving (size of the queues) when use-interleave=true",
835 0, G_MAXUINT64, DEFAULT_MINIMUM_INTERLEAVE,
836 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
837 G_PARAM_STATIC_STRINGS));
840 * GstMultiQueue:stats:
842 * Various #GstMultiQueue statistics. This property returns a #GstStructure
843 * with name "application/x-gst-multi-queue-stats" with the following fields:
845 * - "queues" GST_TYPE_ARRAY Contains one GstStructure named "queue_%d"
846 * (where \%d is the queue's ID) per internal queue:
847 * - "buffers" G_TYPE_UINT The queue's current level of buffers
848 * - "bytes" G_TYPE_UINT The queue's current level of bytes
849 * - "time" G_TYPE_UINT64 The queue's current level of time
853 g_object_class_install_property (gobject_class, PROP_STATS,
854 g_param_spec_boxed ("stats", "Stats",
855 "Multiqueue Statistics",
856 GST_TYPE_STRUCTURE, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
858 gobject_class->finalize = gst_multi_queue_finalize;
860 gst_element_class_set_static_metadata (gstelement_class,
862 "Generic", "Multiple data queue", "Edward Hervey <edward@fluendo.com>");
863 gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
864 &sinktemplate, GST_TYPE_MULTIQUEUE_PAD);
865 gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
866 &srctemplate, GST_TYPE_MULTIQUEUE_PAD);
868 gstelement_class->request_new_pad =
869 GST_DEBUG_FUNCPTR (gst_multi_queue_request_new_pad);
870 gstelement_class->release_pad =
871 GST_DEBUG_FUNCPTR (gst_multi_queue_release_pad);
872 gstelement_class->change_state =
873 GST_DEBUG_FUNCPTR (gst_multi_queue_change_state);
875 gst_type_mark_as_plugin_api (GST_TYPE_MULTIQUEUE_PAD, 0);
879 gst_multi_queue_init (GstMultiQueue * mqueue)
881 mqueue->nbqueues = 0;
882 mqueue->queues = NULL;
884 mqueue->max_size.bytes = DEFAULT_MAX_SIZE_BYTES;
885 mqueue->max_size.visible = DEFAULT_MAX_SIZE_BUFFERS;
886 mqueue->max_size.time = DEFAULT_MAX_SIZE_TIME;
888 mqueue->extra_size.bytes = DEFAULT_EXTRA_SIZE_BYTES;
889 mqueue->extra_size.visible = DEFAULT_EXTRA_SIZE_BUFFERS;
890 mqueue->extra_size.time = DEFAULT_EXTRA_SIZE_TIME;
892 mqueue->use_buffering = DEFAULT_USE_BUFFERING;
893 mqueue->low_watermark = DEFAULT_LOW_WATERMARK * MAX_BUFFERING_LEVEL;
894 mqueue->high_watermark = DEFAULT_HIGH_WATERMARK * MAX_BUFFERING_LEVEL;
896 mqueue->sync_by_running_time = DEFAULT_SYNC_BY_RUNNING_TIME;
897 mqueue->use_interleave = DEFAULT_USE_INTERLEAVE;
898 mqueue->min_interleave_time = DEFAULT_MINIMUM_INTERLEAVE;
899 mqueue->unlinked_cache_time = DEFAULT_UNLINKED_CACHE_TIME;
903 mqueue->high_time = GST_CLOCK_STIME_NONE;
905 g_mutex_init (&mqueue->qlock);
906 g_mutex_init (&mqueue->reconf_lock);
907 g_mutex_init (&mqueue->buffering_post_lock);
911 gst_multi_queue_finalize (GObject * object)
913 GstMultiQueue *mqueue = GST_MULTI_QUEUE (object);
915 g_list_free_full (mqueue->queues, (GDestroyNotify) gst_single_queue_unref);
916 mqueue->queues = NULL;
917 mqueue->queues_cookie++;
919 /* free/unref instance data */
920 g_mutex_clear (&mqueue->qlock);
921 g_mutex_clear (&mqueue->reconf_lock);
922 g_mutex_clear (&mqueue->buffering_post_lock);
924 G_OBJECT_CLASS (parent_class)->finalize (object);
927 #define SET_CHILD_PROPERTY(mq,format) G_STMT_START { \
928 GList * tmp = mq->queues; \
930 GstSingleQueue *q = (GstSingleQueue*)tmp->data; \
931 q->max_size.format = mq->max_size.format; \
932 update_buffering (mq, q); \
933 gst_data_queue_limits_changed (q->queue); \
934 tmp = g_list_next(tmp); \
939 gst_multi_queue_set_property (GObject * object, guint prop_id,
940 const GValue * value, GParamSpec * pspec)
942 GstMultiQueue *mq = GST_MULTI_QUEUE (object);
945 case PROP_MAX_SIZE_BYTES:
946 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
947 mq->max_size.bytes = g_value_get_uint (value);
948 SET_CHILD_PROPERTY (mq, bytes);
949 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
950 gst_multi_queue_post_buffering (mq);
952 case PROP_MAX_SIZE_BUFFERS:
955 gint new_size = g_value_get_uint (value);
957 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
959 mq->max_size.visible = new_size;
963 GstDataQueueSize size;
964 GstSingleQueue *q = (GstSingleQueue *) tmp->data;
965 gst_data_queue_get_level (q->queue, &size);
967 GST_DEBUG_ID (q->debug_id, "Requested buffers size: %d,"
968 " current: %d, current max %d", new_size, size.visible,
969 q->max_size.visible);
971 /* do not reduce max size below current level if the single queue
972 * has grown because of empty queue */
974 q->max_size.visible = new_size;
975 } else if (q->max_size.visible == 0) {
976 q->max_size.visible = MAX (new_size, size.visible);
977 } else if (new_size > size.visible) {
978 q->max_size.visible = new_size;
980 update_buffering (mq, q);
981 gst_data_queue_limits_changed (q->queue);
982 tmp = g_list_next (tmp);
985 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
986 gst_multi_queue_post_buffering (mq);
990 case PROP_MAX_SIZE_TIME:
991 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
992 mq->max_size.time = g_value_get_uint64 (value);
993 SET_CHILD_PROPERTY (mq, time);
994 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
995 gst_multi_queue_post_buffering (mq);
997 case PROP_EXTRA_SIZE_BYTES:
998 mq->extra_size.bytes = g_value_get_uint (value);
1000 case PROP_EXTRA_SIZE_BUFFERS:
1001 mq->extra_size.visible = g_value_get_uint (value);
1003 case PROP_EXTRA_SIZE_TIME:
1004 mq->extra_size.time = g_value_get_uint64 (value);
1006 case PROP_USE_BUFFERING:
1007 mq->use_buffering = g_value_get_boolean (value);
1008 recheck_buffering_status (mq);
1010 case PROP_LOW_PERCENT:
1011 mq->low_watermark = g_value_get_int (value) * BUF_LEVEL_PERCENT_FACTOR;
1012 /* Recheck buffering status - the new low_watermark value might
1013 * be above the current fill level. If the old low_watermark one
1014 * was below the current level, this means that mq->buffering is
1015 * disabled and needs to be re-enabled. */
1016 recheck_buffering_status (mq);
1018 case PROP_HIGH_PERCENT:
1019 mq->high_watermark = g_value_get_int (value) * BUF_LEVEL_PERCENT_FACTOR;
1020 recheck_buffering_status (mq);
1022 case PROP_LOW_WATERMARK:
1023 mq->low_watermark = g_value_get_double (value) * MAX_BUFFERING_LEVEL;
1024 recheck_buffering_status (mq);
1026 case PROP_HIGH_WATERMARK:
1027 mq->high_watermark = g_value_get_double (value) * MAX_BUFFERING_LEVEL;
1028 recheck_buffering_status (mq);
1030 case PROP_SYNC_BY_RUNNING_TIME:
1031 mq->sync_by_running_time = g_value_get_boolean (value);
1033 case PROP_USE_INTERLEAVE:
1034 mq->use_interleave = g_value_get_boolean (value);
1036 case PROP_UNLINKED_CACHE_TIME:
1037 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1038 mq->unlinked_cache_time = g_value_get_uint64 (value);
1039 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1040 gst_multi_queue_post_buffering (mq);
1042 case PROP_MINIMUM_INTERLEAVE:
1043 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1044 mq->min_interleave_time = g_value_get_uint64 (value);
1045 if (mq->use_interleave)
1046 calculate_interleave (mq, NULL);
1047 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1050 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
1055 #ifdef TIZEN_FEATURE_MQ_MODIFICATION
1057 get_current_size_bytes (GstMultiQueue * mq)
1060 guint current_size_bytes = 0;
1062 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
1063 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
1064 GstDataQueueSize size;
1066 gst_data_queue_get_level (sq->queue, &size);
1068 current_size_bytes += size.bytes;
1070 GST_DEBUG_OBJECT (mq,
1071 "queue %d: bytes %u/%u, time %" G_GUINT64_FORMAT "/%"
1072 G_GUINT64_FORMAT, sq->id, size.bytes, sq->max_size.bytes,
1073 sq->cur_time, sq->max_size.time);
1076 GST_INFO_OBJECT (mq, "current_size_bytes : %u", current_size_bytes);
1078 return current_size_bytes;
1082 /* Called with mutex held */
1083 static GstStructure *
1084 gst_multi_queue_get_stats (GstMultiQueue * mq)
1087 gst_structure_new_empty ("application/x-gst-multi-queue-stats");
1091 if (mq->queues != NULL) {
1092 GValue queues = G_VALUE_INIT;
1093 GValue v = G_VALUE_INIT;
1095 g_value_init (&queues, GST_TYPE_ARRAY);
1097 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
1098 GstDataQueueSize level;
1101 g_value_init (&v, GST_TYPE_STRUCTURE);
1103 sq = (GstSingleQueue *) tmp->data;
1104 gst_data_queue_get_level (sq->queue, &level);
1105 id = g_strdup_printf ("queue_%d", sq->id);
1106 s = gst_structure_new (id,
1107 "buffers", G_TYPE_UINT, level.visible,
1108 "bytes", G_TYPE_UINT, level.bytes,
1109 "time", G_TYPE_UINT64, sq->cur_time, NULL);
1110 g_value_take_boxed (&v, s);
1111 gst_value_array_append_and_take_value (&queues, &v);
1114 gst_structure_take_value (ret, "queues", &queues);
1121 gst_multi_queue_get_property (GObject * object, guint prop_id,
1122 GValue * value, GParamSpec * pspec)
1124 GstMultiQueue *mq = GST_MULTI_QUEUE (object);
1126 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1129 case PROP_EXTRA_SIZE_BYTES:
1130 g_value_set_uint (value, mq->extra_size.bytes);
1132 case PROP_EXTRA_SIZE_BUFFERS:
1133 g_value_set_uint (value, mq->extra_size.visible);
1135 case PROP_EXTRA_SIZE_TIME:
1136 g_value_set_uint64 (value, mq->extra_size.time);
1138 case PROP_MAX_SIZE_BYTES:
1139 g_value_set_uint (value, mq->max_size.bytes);
1141 case PROP_MAX_SIZE_BUFFERS:
1142 g_value_set_uint (value, mq->max_size.visible);
1144 case PROP_MAX_SIZE_TIME:
1145 g_value_set_uint64 (value, mq->max_size.time);
1147 #ifdef TIZEN_FEATURE_MQ_MODIFICATION
1148 case PROP_CURR_SIZE_BYTES:
1149 g_value_set_uint (value, get_current_size_bytes(mq));
1152 case PROP_USE_BUFFERING:
1153 g_value_set_boolean (value, mq->use_buffering);
1155 case PROP_LOW_PERCENT:
1156 g_value_set_int (value, mq->low_watermark / BUF_LEVEL_PERCENT_FACTOR);
1158 case PROP_HIGH_PERCENT:
1159 g_value_set_int (value, mq->high_watermark / BUF_LEVEL_PERCENT_FACTOR);
1161 case PROP_LOW_WATERMARK:
1162 g_value_set_double (value, mq->low_watermark /
1163 (gdouble) MAX_BUFFERING_LEVEL);
1165 case PROP_HIGH_WATERMARK:
1166 g_value_set_double (value, mq->high_watermark /
1167 (gdouble) MAX_BUFFERING_LEVEL);
1169 case PROP_SYNC_BY_RUNNING_TIME:
1170 g_value_set_boolean (value, mq->sync_by_running_time);
1172 case PROP_USE_INTERLEAVE:
1173 g_value_set_boolean (value, mq->use_interleave);
1175 case PROP_UNLINKED_CACHE_TIME:
1176 g_value_set_uint64 (value, mq->unlinked_cache_time);
1178 case PROP_MINIMUM_INTERLEAVE:
1179 g_value_set_uint64 (value, mq->min_interleave_time);
1182 g_value_take_boxed (value, gst_multi_queue_get_stats (mq));
1185 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
1189 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1192 static GstIterator *
1193 gst_multi_queue_iterate_internal_links (GstPad * pad, GstObject * parent)
1195 GstIterator *it = NULL;
1196 GstPad *opad, *sinkpad, *srcpad;
1197 GstSingleQueue *squeue;
1198 GstMultiQueue *mq = GST_MULTI_QUEUE (parent);
1199 GValue val = { 0, };
1201 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1202 squeue = GST_MULTIQUEUE_PAD (pad)->sq;
1206 srcpad = g_weak_ref_get (&squeue->srcpad);
1207 sinkpad = g_weak_ref_get (&squeue->sinkpad);
1208 if (sinkpad == pad && srcpad) {
1210 gst_clear_object (&sinkpad);
1212 } else if (srcpad == pad && sinkpad) {
1214 gst_clear_object (&srcpad);
1217 gst_clear_object (&srcpad);
1218 gst_clear_object (&sinkpad);
1222 g_value_init (&val, GST_TYPE_PAD);
1223 g_value_set_object (&val, opad);
1224 it = gst_iterator_new_single (GST_TYPE_PAD, &val);
1225 g_value_unset (&val);
1227 gst_object_unref (opad);
1230 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1237 * GstElement methods
1241 gst_multi_queue_request_new_pad (GstElement * element, GstPadTemplate * temp,
1242 const gchar * name, const GstCaps * caps)
1244 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
1245 GstSingleQueue *squeue;
1250 sscanf (name + 4, "_%u", &temp_id);
1251 GST_LOG_OBJECT (element, "name : %s (id %d)", GST_STR_NULL (name), temp_id);
1254 g_mutex_lock (&mqueue->reconf_lock);
1255 /* Create a new single queue, add the sink and source pad and return the sink pad */
1256 squeue = gst_single_queue_new (mqueue, temp_id);
1257 g_mutex_unlock (&mqueue->reconf_lock);
1259 new_pad = squeue ? g_weak_ref_get (&squeue->sinkpad) : NULL;
1260 /* request pad assumes the element is owning the ref of the pad it returns */
1262 gst_object_unref (new_pad);
1264 GST_DEBUG_OBJECT (mqueue, "Returning pad %" GST_PTR_FORMAT, new_pad);
1270 gst_multi_queue_release_pad (GstElement * element, GstPad * pad)
1272 GstPad *sinkpad = NULL, *srcpad = NULL;
1273 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
1274 GstSingleQueue *sq = NULL;
1277 GST_LOG_OBJECT (element, "pad %s:%s", GST_DEBUG_PAD_NAME (pad));
1279 /* Take the reconfiguration lock before the qlock to avoid deadlocks
1280 * from two release_pad running in parallel on different mqueue slots.
1281 * We need reconf_lock for removing the singlequeue from the list, to
1282 * prevent overlapping release/request from causing problems */
1283 g_mutex_lock (&mqueue->reconf_lock);
1285 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
1286 /* Find which single queue it belongs to, knowing that it should be a sinkpad */
1287 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
1288 sq = (GstSingleQueue *) tmp->data;
1289 sinkpad = g_weak_ref_get (&sq->sinkpad);
1291 if (sinkpad == pad) {
1292 srcpad = g_weak_ref_get (&sq->srcpad);
1296 gst_object_unref (sinkpad);
1300 gst_clear_object (&sinkpad);
1301 gst_clear_object (&srcpad);
1302 GST_WARNING_OBJECT (mqueue, "That pad doesn't belong to this element ???");
1303 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1304 g_mutex_unlock (&mqueue->reconf_lock);
1308 /* FIXME: The removal of the singlequeue should probably not happen until it
1309 * finishes draining */
1311 /* remove it from the list */
1312 mqueue->queues = g_list_delete_link (mqueue->queues, tmp);
1313 mqueue->queues_cookie++;
1315 /* FIXME : recompute next-non-linked */
1316 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1318 /* delete SingleQueue */
1319 gst_data_queue_set_flushing (sq->queue, TRUE);
1321 gst_pad_set_active (srcpad, FALSE);
1322 gst_pad_set_active (sinkpad, FALSE);
1323 gst_element_remove_pad (element, srcpad);
1324 gst_element_remove_pad (element, sinkpad);
1325 gst_object_unref (srcpad);
1326 gst_object_unref (sinkpad);
1328 g_mutex_unlock (&mqueue->reconf_lock);
1331 static GstStateChangeReturn
1332 gst_multi_queue_change_state (GstElement * element, GstStateChange transition)
1334 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
1335 GstSingleQueue *sq = NULL;
1336 GstStateChangeReturn result;
1338 switch (transition) {
1339 case GST_STATE_CHANGE_READY_TO_PAUSED:{
1342 /* Set all pads to non-flushing */
1343 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
1344 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
1345 sq = (GstSingleQueue *) tmp->data;
1346 sq->flushing = FALSE;
1347 sq->sink_stream_gid = sq->src_stream_gid = GST_GROUP_ID_INVALID;
1350 /* the visible limit might not have been set on single queues that have grown because of other queueus were empty */
1351 SET_CHILD_PROPERTY (mqueue, visible);
1353 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1354 gst_multi_queue_post_buffering (mqueue);
1358 #ifdef TIZEN_FEATURE_MQ_MODIFICATION
1359 /* to stop buffering during playing state */
1360 case GST_STATE_CHANGE_PAUSED_TO_PLAYING:{
1361 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
1362 mqueue->buffering = FALSE;
1363 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1364 gst_multi_queue_post_buffering (mqueue);
1368 case GST_STATE_CHANGE_PAUSED_TO_READY:{
1371 /* Un-wait all waiting pads */
1372 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
1373 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
1374 sq = (GstSingleQueue *) tmp->data;
1375 sq->flushing = TRUE;
1376 g_cond_signal (&sq->turn);
1378 sq->last_query = FALSE;
1379 g_cond_signal (&sq->query_handled);
1381 mqueue->interleave_incomplete = FALSE;
1382 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1389 result = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
1391 switch (transition) {
1400 gst_single_queue_start (GstMultiQueue * mq, GstSingleQueue * sq)
1402 gboolean res = FALSE;
1403 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
1405 GST_LOG_ID (sq->debug_id, "starting task");
1408 res = gst_pad_start_task (srcpad,
1409 (GstTaskFunction) gst_multi_queue_loop, srcpad, NULL);
1410 gst_object_unref (srcpad);
1417 gst_single_queue_pause (GstMultiQueue * mq, GstSingleQueue * sq)
1419 gboolean result = FALSE;
1420 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
1422 GST_LOG_ID (sq->debug_id, "pausing task");
1424 result = gst_pad_pause_task (srcpad);
1425 gst_object_unref (srcpad);
1428 sq->sink_tainted = sq->src_tainted = TRUE;
1434 gst_single_queue_stop (GstMultiQueue * mq, GstSingleQueue * sq)
1436 gboolean result = FALSE;
1437 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
1439 GST_LOG_ID (sq->debug_id, "stopping task");
1441 result = gst_pad_stop_task (srcpad);
1442 gst_object_unref (srcpad);
1444 sq->sink_tainted = sq->src_tainted = TRUE;
1450 gst_single_queue_flush (GstMultiQueue * mq, GstSingleQueue * sq, gboolean flush,
1453 GST_DEBUG_ID (sq->debug_id, "flush %s", (flush ? "start" : "stop"));
1456 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1457 sq->srcresult = GST_FLOW_FLUSHING;
1458 gst_data_queue_set_flushing (sq->queue, TRUE);
1460 sq->flushing = TRUE;
1462 /* wake up non-linked task */
1463 GST_LOG_ID (sq->debug_id, "Waking up eventually waiting task");
1464 g_cond_signal (&sq->turn);
1465 sq->last_query = FALSE;
1466 g_cond_signal (&sq->query_handled);
1467 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1469 gst_single_queue_flush_queue (sq, full);
1471 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1472 gst_segment_init (&sq->sink_segment, GST_FORMAT_TIME);
1473 gst_segment_init (&sq->src_segment, GST_FORMAT_TIME);
1474 sq->has_src_segment = FALSE;
1475 /* All pads start off OK for a smooth kick-off */
1476 sq->srcresult = GST_FLOW_OK;
1479 sq->max_size.visible = mq->max_size.visible;
1481 sq->is_segment_done = FALSE;
1484 sq->last_oldid = G_MAXUINT32;
1485 sq->next_time = GST_CLOCK_STIME_NONE;
1486 sq->last_time = GST_CLOCK_STIME_NONE;
1487 sq->cached_sinktime = GST_CLOCK_STIME_NONE;
1488 sq->group_high_time = GST_CLOCK_STIME_NONE;
1489 gst_data_queue_set_flushing (sq->queue, FALSE);
1491 /* We will become active again on the next buffer/gap */
1494 /* Reset high time to be recomputed next */
1495 mq->high_time = GST_CLOCK_STIME_NONE;
1497 sq->flushing = FALSE;
1498 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1502 /* WITH LOCK TAKEN */
1504 get_buffering_level (GstMultiQueue * mq, GstSingleQueue * sq)
1506 GstDataQueueSize size;
1507 gint buffering_level, tmp;
1509 gst_data_queue_get_level (sq->queue, &size);
1511 GST_DEBUG_ID (sq->debug_id,
1512 "visible %u/%u, bytes %u/%u, time %" G_GUINT64_FORMAT "/%"
1513 G_GUINT64_FORMAT, size.visible, sq->max_size.visible,
1514 size.bytes, sq->max_size.bytes, sq->cur_time, sq->max_size.time);
1516 /* get bytes and time buffer levels and take the max */
1517 if (sq->is_eos || sq->is_segment_done || sq->srcresult == GST_FLOW_NOT_LINKED
1519 buffering_level = MAX_BUFFERING_LEVEL;
1521 buffering_level = 0;
1522 if (sq->max_size.time > 0) {
1524 gst_util_uint64_scale (sq->cur_time,
1525 MAX_BUFFERING_LEVEL, sq->max_size.time);
1526 buffering_level = MAX (buffering_level, tmp);
1528 if (sq->max_size.bytes > 0) {
1530 gst_util_uint64_scale_int (size.bytes,
1531 MAX_BUFFERING_LEVEL, sq->max_size.bytes);
1532 buffering_level = MAX (buffering_level, tmp);
1536 return buffering_level;
1539 /* WITH LOCK TAKEN */
1541 update_buffering (GstMultiQueue * mq, GstSingleQueue * sq)
1543 gint buffering_level, percent;
1545 /* nothing to dowhen we are not in buffering mode */
1546 if (!mq->use_buffering)
1549 #ifdef TIZEN_FEATURE_MQ_SKIP_BUFFERING
1550 GstPad *sinkpad = g_weak_ref_get (&sq->sinkpad);
1552 GstCaps *caps = gst_pad_get_current_caps (sinkpad);
1556 /* skip buffering except audio and video */
1557 if ((s = gst_caps_get_structure (caps, 0)) &&
1558 (!g_strrstr (gst_structure_get_name (s), "video")) &&
1559 (!g_strrstr (gst_structure_get_name (s), "audio"))) {
1560 gst_caps_unref (caps);
1561 gst_clear_object (&sinkpad);
1564 gst_caps_unref (caps);
1567 gst_clear_object (&sinkpad);
1570 buffering_level = get_buffering_level (mq, sq);
1572 /* scale so that if buffering_level equals the high watermark,
1573 * the percentage is 100% */
1574 percent = gst_util_uint64_scale (buffering_level, 100, mq->high_watermark);
1579 if (mq->buffering) {
1580 if (buffering_level >= mq->high_watermark) {
1581 mq->buffering = FALSE;
1583 /* make sure it increases */
1584 percent = MAX (mq->buffering_percent, percent);
1586 SET_PERCENT (mq, percent);
1589 gboolean is_buffering = TRUE;
1591 for (iter = mq->queues; iter; iter = g_list_next (iter)) {
1592 GstSingleQueue *oq = (GstSingleQueue *) iter->data;
1594 if (get_buffering_level (mq, oq) >= mq->high_watermark) {
1595 is_buffering = FALSE;
1601 if (is_buffering && buffering_level < mq->low_watermark) {
1602 mq->buffering = TRUE;
1603 SET_PERCENT (mq, percent);
1609 gst_multi_queue_post_buffering (GstMultiQueue * mq)
1611 GstMessage *msg = NULL;
1613 g_mutex_lock (&mq->buffering_post_lock);
1614 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1615 if (mq->buffering_percent_changed) {
1616 gint percent = mq->buffering_percent;
1618 mq->buffering_percent_changed = FALSE;
1620 GST_DEBUG_OBJECT (mq, "Going to post buffering: %d%%", percent);
1621 msg = gst_message_new_buffering (GST_OBJECT_CAST (mq), percent);
1623 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1626 gst_element_post_message (GST_ELEMENT_CAST (mq), msg);
1628 g_mutex_unlock (&mq->buffering_post_lock);
1632 recheck_buffering_status (GstMultiQueue * mq)
1634 if (!mq->use_buffering && mq->buffering) {
1635 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1636 mq->buffering = FALSE;
1637 GST_DEBUG_OBJECT (mq,
1638 "Buffering property disabled, but queue was still buffering; "
1639 "setting buffering percentage to 100%%");
1640 SET_PERCENT (mq, 100);
1641 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1644 if (mq->use_buffering) {
1648 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1650 /* force buffering percentage to be recalculated */
1651 old_perc = mq->buffering_percent;
1652 mq->buffering_percent = 0;
1656 GstSingleQueue *q = (GstSingleQueue *) tmp->data;
1657 update_buffering (mq, q);
1658 gst_data_queue_limits_changed (q->queue);
1659 tmp = g_list_next (tmp);
1662 GST_DEBUG_OBJECT (mq,
1663 "Recalculated buffering percentage: old: %d%% new: %d%%",
1664 old_perc, mq->buffering_percent);
1666 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1669 gst_multi_queue_post_buffering (mq);
1673 calculate_interleave (GstMultiQueue * mq, GstSingleQueue * sq)
1675 GstClockTimeDiff low, high;
1676 GstClockTime interleave, other_interleave = 0;
1677 gboolean some_inactive = FALSE;
1680 low = high = GST_CLOCK_STIME_NONE;
1681 interleave = mq->interleave;
1682 /* Go over all single queues and calculate lowest/highest value */
1683 for (tmp = mq->queues; tmp; tmp = tmp->next) {
1684 GstSingleQueue *oq = (GstSingleQueue *) tmp->data;
1685 /* Ignore sparse streams for interleave calculation */
1689 /* If some streams aren't active yet (haven't received any buffers), we will
1690 * grow interleave accordingly */
1692 some_inactive = TRUE;
1696 /* Calculate within each streaming thread */
1697 if (sq && sq->thread != oq->thread) {
1698 if (oq->interleave > other_interleave)
1699 other_interleave = oq->interleave;
1703 /* If the stream isn't EOS, update the low/high input value */
1704 if (GST_CLOCK_STIME_IS_VALID (oq->cached_sinktime) && !oq->is_eos) {
1705 if (low == GST_CLOCK_STIME_NONE || oq->cached_sinktime < low)
1706 low = oq->cached_sinktime;
1707 if (high == GST_CLOCK_STIME_NONE || oq->cached_sinktime > high)
1708 high = oq->cached_sinktime;
1710 /* If the input is before the segment start, consider as inactive to allow
1711 * the interleave to grow until *all* streams have data within the segment.
1713 * The reason for this is that there is no requirements for data before
1714 * the segment start to be "aligned" and therefore interleave calculation
1715 * can't reliably be done. For example a demuxer could provide video data
1716 * from the previous keyframe but audio only from just before the segment
1718 if (oq->cached_sinktime < 0)
1719 some_inactive = TRUE;
1721 GST_LOG_ID (oq->debug_id,
1722 "sinktime:%" GST_STIME_FORMAT " low:%" GST_STIME_FORMAT
1723 " high:%" GST_STIME_FORMAT,
1724 GST_STIME_ARGS (oq->cached_sinktime), GST_STIME_ARGS (low),
1725 GST_STIME_ARGS (high));
1728 if (GST_CLOCK_STIME_IS_VALID (low) && GST_CLOCK_STIME_IS_VALID (high)) {
1729 gboolean do_update = high == low;
1730 interleave = high - low;
1731 /* Padding of interleave and minimum value */
1732 interleave = (150 * interleave / 100) + mq->min_interleave_time;
1734 sq->interleave = interleave;
1736 interleave = MAX (interleave, other_interleave);
1738 /* Progressively grow up the interleave up to 5s if some streams were inactive */
1739 if (some_inactive && interleave <= mq->interleave) {
1740 interleave = MIN (5 * GST_SECOND, mq->interleave + 500 * GST_MSECOND);
1744 /* We force the interleave update if:
1745 * * the interleave was previously set while some streams were not active
1746 * yet but they now all are
1747 * * OR the interleave was previously based on all streams being active
1748 * whereas some now aren't
1750 if (mq->interleave_incomplete != some_inactive)
1753 mq->interleave_incomplete = some_inactive;
1755 /* Update the stored interleave if:
1756 * * No data has arrived yet (high == low)
1757 * * Or it went higher
1758 * * Or it went lower and we've gone past the previous interleave needed */
1759 if (do_update || interleave > mq->interleave ||
1760 ((mq->last_interleave_update + (2 * MIN (GST_SECOND,
1761 mq->interleave)) < low)
1762 && interleave < (mq->interleave * 3 / 4))) {
1763 /* Update the interleave */
1764 mq->interleave = interleave;
1765 mq->last_interleave_update = high;
1766 /* Update max-size time */
1767 mq->max_size.time = mq->interleave;
1768 SET_CHILD_PROPERTY (mq, time);
1772 GST_DEBUG_OBJECT (mq,
1773 "low:%" GST_STIME_FORMAT " high:%" GST_STIME_FORMAT " interleave:%"
1774 GST_TIME_FORMAT " mq->interleave:%" GST_TIME_FORMAT
1775 " last_interleave_update:%" GST_STIME_FORMAT, GST_STIME_ARGS (low),
1776 GST_STIME_ARGS (high), GST_TIME_ARGS (interleave),
1777 GST_TIME_ARGS (mq->interleave),
1778 GST_STIME_ARGS (mq->last_interleave_update));
1782 /* calculate the diff between running time on the sink and src of the queue.
1783 * This is the total amount of time in the queue.
1784 * WITH LOCK TAKEN */
1786 update_time_level (GstMultiQueue * mq, GstSingleQueue * sq)
1788 GstClockTimeDiff sink_time, src_time;
1790 if (sq->sink_tainted) {
1791 sink_time = sq->sinktime = my_segment_to_running_time (&sq->sink_segment,
1792 sq->sink_segment.position);
1794 GST_DEBUG_ID (sq->debug_id,
1795 "sink_segment.position:%" GST_TIME_FORMAT ", sink_time:%"
1796 GST_STIME_FORMAT, GST_TIME_ARGS (sq->sink_segment.position),
1797 GST_STIME_ARGS (sink_time));
1799 if (G_UNLIKELY (sq->last_time == GST_CLOCK_STIME_NONE)) {
1800 /* If the single queue still doesn't have a last_time set, this means
1801 * that nothing has been pushed out yet.
1802 * In order for the high_time computation to be as efficient as possible,
1803 * we set the last_time */
1804 sq->last_time = sink_time;
1806 if (G_UNLIKELY (sink_time != GST_CLOCK_STIME_NONE)) {
1807 /* if we have a time, we become untainted and use the time */
1808 sq->sink_tainted = FALSE;
1809 if (mq->use_interleave) {
1810 sq->cached_sinktime = sink_time;
1811 calculate_interleave (mq, sq);
1815 sink_time = sq->sinktime;
1817 if (sq->src_tainted) {
1818 GstSegment *segment;
1821 if (sq->has_src_segment) {
1822 segment = &sq->src_segment;
1823 position = sq->src_segment.position;
1826 * If the src pad had no segment yet, use the sink segment
1827 * to avoid signalling overrun if the received sink segment has a
1828 * a position > max-size-time while the src pad time would be the default=0
1830 * This can happen when switching pads on chained/adaptive streams and the
1831 * new chain has a segment with a much larger position
1833 segment = &sq->sink_segment;
1834 position = sq->sink_segment.position;
1837 src_time = sq->srctime = my_segment_to_running_time (segment, position);
1838 /* if we have a time, we become untainted and use the time */
1839 if (G_UNLIKELY (src_time != GST_CLOCK_STIME_NONE)) {
1840 sq->src_tainted = FALSE;
1843 src_time = sq->srctime;
1845 GST_DEBUG_ID (sq->debug_id,
1846 "sink %" GST_STIME_FORMAT ", src %" GST_STIME_FORMAT,
1847 GST_STIME_ARGS (sink_time), GST_STIME_ARGS (src_time));
1849 /* This allows for streams with out of order timestamping - sometimes the
1850 * emerging timestamp is later than the arriving one(s) */
1851 if (G_LIKELY (GST_CLOCK_STIME_IS_VALID (sink_time) &&
1852 GST_CLOCK_STIME_IS_VALID (src_time) && sink_time > src_time))
1853 sq->cur_time = sink_time - src_time;
1857 /* updating the time level can change the buffering state */
1858 update_buffering (mq, sq);
1863 /* take a SEGMENT event and apply the values to segment, updating the time
1864 * level of queue. */
1866 apply_segment (GstMultiQueue * mq, GstSingleQueue * sq, GstEvent * event,
1867 GstSegment * segment)
1869 GstClockTimeDiff ppos = 0;
1871 /* If we switched groups, grab the previous position */
1872 if (segment->rate > 0.0) {
1873 if (segment == &sq->sink_segment && sq->sink_stream_gid_changed) {
1875 gst_segment_to_running_time (segment, GST_FORMAT_TIME,
1877 sq->sink_stream_gid_changed = FALSE;
1878 } else if (segment == &sq->src_segment && sq->src_stream_gid_changed) {
1880 gst_segment_to_running_time (segment, GST_FORMAT_TIME,
1882 sq->src_stream_gid_changed = FALSE;
1886 gst_event_copy_segment (event, segment);
1888 /* now configure the values, we use these to track timestamps on the
1890 if (segment->format != GST_FORMAT_TIME) {
1891 /* non-time format, pretent the current time segment is closed with a
1892 * 0 start and unknown stop time. */
1893 segment->format = GST_FORMAT_TIME;
1898 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1901 GST_DEBUG_ID (sq->debug_id, "Applying base of %" GST_TIME_FORMAT,
1902 GST_TIME_ARGS (ppos));
1903 segment->base = ppos;
1906 /* Make sure we have a valid initial segment position (and not garbage
1908 if (segment->rate > 0.0)
1909 segment->position = segment->start;
1911 segment->position = segment->stop;
1913 if (segment == &sq->sink_segment)
1914 sq->sink_tainted = TRUE;
1916 sq->has_src_segment = TRUE;
1917 sq->src_tainted = TRUE;
1920 GST_DEBUG_ID (sq->debug_id,
1921 "configured SEGMENT %" GST_SEGMENT_FORMAT, segment);
1923 /* segment can update the time level of the queue */
1924 update_time_level (mq, sq);
1926 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1927 gst_multi_queue_post_buffering (mq);
1930 /* take a buffer and update segment, updating the time level of the queue. */
1932 apply_buffer (GstMultiQueue * mq, GstSingleQueue * sq, GstClockTime timestamp,
1933 GstClockTime duration, GstSegment * segment)
1935 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1937 /* if no timestamp is set, assume it's continuous with the previous
1939 if (timestamp == GST_CLOCK_TIME_NONE)
1940 timestamp = segment->position;
1943 if (duration != GST_CLOCK_TIME_NONE)
1944 timestamp += duration;
1946 GST_DEBUG_ID (sq->debug_id, "%s position updated to %" GST_TIME_FORMAT,
1947 segment == &sq->sink_segment ? "sink" : "src", GST_TIME_ARGS (timestamp));
1949 segment->position = timestamp;
1951 if (segment == &sq->sink_segment)
1952 sq->sink_tainted = TRUE;
1954 sq->src_tainted = TRUE;
1956 /* calc diff with other end */
1957 update_time_level (mq, sq);
1958 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1959 gst_multi_queue_post_buffering (mq);
1963 apply_gap (GstMultiQueue * mq, GstSingleQueue * sq, GstEvent * event,
1964 GstSegment * segment)
1966 GstClockTime timestamp;
1967 GstClockTime duration;
1969 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1971 gst_event_parse_gap (event, ×tamp, &duration);
1973 if (GST_CLOCK_TIME_IS_VALID (timestamp)) {
1975 if (GST_CLOCK_TIME_IS_VALID (duration)) {
1976 timestamp += duration;
1979 GST_DEBUG_ID (sq->debug_id,
1980 "%s position updated to %" GST_TIME_FORMAT,
1981 segment == &sq->sink_segment ? "sink" : "src",
1982 GST_TIME_ARGS (timestamp));
1984 segment->position = timestamp;
1986 if (segment == &sq->sink_segment)
1987 sq->sink_tainted = TRUE;
1989 sq->src_tainted = TRUE;
1991 /* calc diff with other end */
1992 update_time_level (mq, sq);
1995 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1996 gst_multi_queue_post_buffering (mq);
1999 static GstClockTimeDiff
2000 get_running_time (GstSegment * segment, GstMiniObject * object, gboolean end)
2002 GstClockTimeDiff time = GST_CLOCK_STIME_NONE;
2004 if (GST_IS_BUFFER (object)) {
2005 GstBuffer *buf = GST_BUFFER_CAST (object);
2006 GstClockTime btime = GST_BUFFER_DTS_OR_PTS (buf);
2008 if (GST_CLOCK_TIME_IS_VALID (btime)) {
2009 if (end && GST_BUFFER_DURATION_IS_VALID (buf))
2010 btime += GST_BUFFER_DURATION (buf);
2011 time = my_segment_to_running_time (segment, btime);
2013 } else if (GST_IS_BUFFER_LIST (object)) {
2014 GstBufferList *list = GST_BUFFER_LIST_CAST (object);
2018 n = gst_buffer_list_length (list);
2019 for (i = 0; i < n; i++) {
2021 buf = gst_buffer_list_get (list, i);
2022 btime = GST_BUFFER_DTS_OR_PTS (buf);
2023 if (GST_CLOCK_TIME_IS_VALID (btime)) {
2024 if (end && GST_BUFFER_DURATION_IS_VALID (buf))
2025 btime += GST_BUFFER_DURATION (buf);
2026 time = my_segment_to_running_time (segment, btime);
2033 } else if (GST_IS_EVENT (object)) {
2034 GstEvent *event = GST_EVENT_CAST (object);
2036 /* For newsegment events return the running time of the start position */
2037 if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
2038 const GstSegment *new_segment;
2040 gst_event_parse_segment (event, &new_segment);
2041 if (new_segment->format == GST_FORMAT_TIME) {
2043 my_segment_to_running_time ((GstSegment *) new_segment,
2044 new_segment->start);
2046 } else if (GST_EVENT_TYPE (event) == GST_EVENT_GAP) {
2047 GstClockTime ts, dur;
2048 gst_event_parse_gap (event, &ts, &dur);
2049 if (GST_CLOCK_TIME_IS_VALID (ts)) {
2050 if (GST_CLOCK_TIME_IS_VALID (dur))
2052 time = my_segment_to_running_time (segment, ts);
2061 static GstFlowReturn
2062 gst_single_queue_push_one (GstMultiQueue * mq, GstSingleQueue * sq,
2063 GstMiniObject * object, gboolean * allow_drop)
2065 GstFlowReturn result = sq->srcresult;
2066 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
2069 GST_INFO_OBJECT (mq,
2070 "Pushing while corresponding sourcepad has been cleared");
2071 return GST_FLOW_FLUSHING;
2074 if (GST_IS_BUFFER (object)) {
2076 GstClockTime timestamp, duration;
2078 buffer = GST_BUFFER_CAST (object);
2079 timestamp = GST_BUFFER_DTS_OR_PTS (buffer);
2080 duration = GST_BUFFER_DURATION (buffer);
2082 apply_buffer (mq, sq, timestamp, duration, &sq->src_segment);
2084 /* Applying the buffer may have made the queue non-full again, unblock it if needed */
2085 gst_data_queue_limits_changed (sq->queue);
2087 if (G_UNLIKELY (*allow_drop)) {
2088 GST_DEBUG_ID (sq->debug_id,
2089 "Dropping EOS buffer %p with ts %" GST_TIME_FORMAT,
2090 buffer, GST_TIME_ARGS (timestamp));
2091 gst_buffer_unref (buffer);
2093 GST_DEBUG_ID (sq->debug_id,
2094 "Pushing buffer %p with ts %" GST_TIME_FORMAT,
2095 buffer, GST_TIME_ARGS (timestamp));
2096 result = gst_pad_push (srcpad, buffer);
2098 } else if (GST_IS_EVENT (object)) {
2101 event = GST_EVENT_CAST (object);
2103 switch (GST_EVENT_TYPE (event)) {
2104 case GST_EVENT_SEGMENT_DONE:
2105 *allow_drop = FALSE;
2108 result = GST_FLOW_EOS;
2109 if (G_UNLIKELY (*allow_drop))
2110 *allow_drop = FALSE;
2112 case GST_EVENT_STREAM_START:
2115 if (gst_event_parse_group_id (event, &group_id)) {
2116 if (sq->src_stream_gid == GST_GROUP_ID_INVALID) {
2117 sq->src_stream_gid = group_id;
2118 } else if (group_id != sq->src_stream_gid) {
2119 sq->src_stream_gid = group_id;
2120 sq->src_stream_gid_changed = TRUE;
2123 result = GST_FLOW_OK;
2124 if (G_UNLIKELY (*allow_drop))
2125 *allow_drop = FALSE;
2128 case GST_EVENT_SEGMENT:
2129 apply_segment (mq, sq, event, &sq->src_segment);
2130 /* Applying the segment may have made the queue non-full again, unblock it if needed */
2131 gst_data_queue_limits_changed (sq->queue);
2132 if (G_UNLIKELY (*allow_drop)) {
2133 result = GST_FLOW_OK;
2134 *allow_drop = FALSE;
2138 apply_gap (mq, sq, event, &sq->src_segment);
2139 /* Applying the gap may have made the queue non-full again, unblock it if needed */
2140 gst_data_queue_limits_changed (sq->queue);
2146 if (G_UNLIKELY (*allow_drop)) {
2147 GST_DEBUG_ID (sq->debug_id,
2148 "Dropping EOS event %p of type %s",
2149 event, GST_EVENT_TYPE_NAME (event));
2150 gst_event_unref (event);
2152 GST_DEBUG_ID (sq->debug_id,
2153 "Pushing event %p of type %s", event, GST_EVENT_TYPE_NAME (event));
2155 gst_pad_push_event (srcpad, event);
2157 } else if (GST_IS_QUERY (object)) {
2161 query = GST_QUERY_CAST (object);
2163 if (G_UNLIKELY (*allow_drop)) {
2164 GST_DEBUG_ID (sq->debug_id, "Dropping EOS query %p", query);
2165 gst_query_unref (query);
2168 res = gst_pad_peer_query (srcpad, query);
2171 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2172 sq->last_query = res;
2173 sq->last_handled_query = query;
2174 g_cond_signal (&sq->query_handled);
2175 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2177 g_warning ("Unexpected object in singlequeue %u (refcounting problem?)",
2181 gst_object_unref (srcpad);
2187 static GstMiniObject *
2188 gst_multi_queue_item_steal_object (GstMultiQueueItem * item)
2193 item->object = NULL;
2199 gst_multi_queue_item_destroy (GstMultiQueueItem * item)
2201 if (!item->is_query && item->object)
2202 gst_mini_object_unref (item->object);
2203 g_slice_free (GstMultiQueueItem, item);
2206 /* takes ownership of passed mini object! */
2207 static GstMultiQueueItem *
2208 gst_multi_queue_buffer_item_new (GstMiniObject * object, guint32 curid)
2210 GstMultiQueueItem *item;
2212 item = g_slice_new (GstMultiQueueItem);
2213 item->object = object;
2214 item->destroy = (GDestroyNotify) gst_multi_queue_item_destroy;
2215 item->posid = curid;
2216 item->is_query = GST_IS_QUERY (object);
2218 item->size = gst_buffer_get_size (GST_BUFFER_CAST (object));
2219 item->duration = GST_BUFFER_DURATION (object);
2220 if (item->duration == GST_CLOCK_TIME_NONE)
2222 item->visible = TRUE;
2226 static GstMultiQueueItem *
2227 gst_multi_queue_mo_item_new (GstMiniObject * object, guint32 curid)
2229 GstMultiQueueItem *item;
2231 item = g_slice_new (GstMultiQueueItem);
2232 item->object = object;
2233 item->destroy = (GDestroyNotify) gst_multi_queue_item_destroy;
2234 item->posid = curid;
2235 item->is_query = GST_IS_QUERY (object);
2239 item->visible = FALSE;
2243 /* Each main loop attempts to push buffers until the return value
2244 * is not-linked. not-linked pads are not allowed to push data beyond
2245 * any linked pads, so they don't 'rush ahead of the pack'.
2248 gst_multi_queue_loop (GstPad * pad)
2251 GstMultiQueueItem *item;
2252 GstDataQueueItem *sitem;
2254 GstMiniObject *object = NULL;
2256 GstFlowReturn result;
2257 GstClockTimeDiff next_time;
2259 gboolean is_query = FALSE;
2260 gboolean do_update_buffering = FALSE;
2261 gboolean dropping = FALSE;
2262 GstPad *srcpad = NULL;
2264 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2265 mq = g_weak_ref_get (&sq->mqueue);
2266 srcpad = g_weak_ref_get (&sq->srcpad);
2272 GST_DEBUG_ID (sq->debug_id, "trying to pop an object");
2277 /* Get something from the queue, blocking until that happens, or we get
2279 if (!(gst_data_queue_pop (sq->queue, &sitem)))
2282 item = (GstMultiQueueItem *) sitem;
2283 newid = item->posid;
2285 is_query = item->is_query;
2287 /* steal the object and destroy the item */
2288 object = gst_multi_queue_item_steal_object (item);
2289 gst_multi_queue_item_destroy (item);
2291 is_buffer = GST_IS_BUFFER (object);
2293 /* Get running time of the item. Events will have GST_CLOCK_STIME_NONE */
2294 next_time = get_running_time (&sq->src_segment, object, FALSE);
2296 GST_LOG_ID (sq->debug_id, "newid:%d , oldid:%d", newid, sq->last_oldid);
2298 /* If we're not-linked, we do some extra work because we might need to
2299 * wait before pushing. If we're linked but there's a gap in the IDs,
2300 * or it's the first loop, or we just passed the previous highid,
2301 * we might need to wake some sleeping pad up, so there's extra work
2303 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2304 if (sq->srcresult == GST_FLOW_NOT_LINKED
2305 || (sq->last_oldid == G_MAXUINT32) || (newid != (sq->last_oldid + 1))
2306 || sq->last_oldid > mq->highid) {
2307 GST_LOG_ID (sq->debug_id, "CHECKING srcresult: %s",
2308 gst_flow_get_name (sq->srcresult));
2310 /* Check again if we're flushing after the lock is taken,
2311 * the flush flag might have been changed in the meantime */
2313 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2317 /* Update the nextid so other threads know when to wake us up */
2319 /* Take into account the extra cache time since we're unlinked */
2320 if (GST_CLOCK_STIME_IS_VALID (next_time))
2321 next_time += mq->unlinked_cache_time;
2322 sq->next_time = next_time;
2324 /* Update the oldid (the last ID we output) for highid tracking */
2325 if (sq->last_oldid != G_MAXUINT32)
2326 sq->oldid = sq->last_oldid;
2328 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2329 gboolean should_wait;
2330 /* Go to sleep until it's time to push this buffer */
2332 /* Recompute the highid */
2333 compute_high_id (mq);
2334 /* Recompute the high time */
2335 compute_high_time (mq, sq->groupid);
2337 GST_DEBUG_ID (sq->debug_id,
2338 "groupid %d high_time %" GST_STIME_FORMAT " next_time %"
2339 GST_STIME_FORMAT, sq->groupid, GST_STIME_ARGS (sq->group_high_time),
2340 GST_STIME_ARGS (next_time));
2342 if (mq->sync_by_running_time) {
2343 if (sq->group_high_time == GST_CLOCK_STIME_NONE) {
2344 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
2345 (mq->high_time == GST_CLOCK_STIME_NONE
2346 || next_time > mq->high_time);
2348 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
2349 next_time > sq->group_high_time;
2352 should_wait = newid > mq->highid;
2354 while (should_wait && sq->srcresult == GST_FLOW_NOT_LINKED) {
2356 GST_DEBUG_ID (sq->debug_id,
2357 "Sleeping for not-linked wakeup with "
2358 "newid %u, highid %u, next_time %" GST_STIME_FORMAT
2359 ", high_time %" GST_STIME_FORMAT, newid, mq->highid,
2360 GST_STIME_ARGS (next_time), GST_STIME_ARGS (sq->group_high_time));
2362 /* Wake up all non-linked pads before we sleep */
2363 wake_up_next_non_linked (mq);
2366 g_cond_wait (&sq->turn, &mq->qlock);
2370 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2374 /* Recompute the high time and ID */
2375 compute_high_time (mq, sq->groupid);
2376 compute_high_id (mq);
2378 GST_DEBUG_ID (sq->debug_id, "Woken from sleeping for not-linked "
2379 "wakeup with newid %u, highid %u, next_time %" GST_STIME_FORMAT
2380 ", high_time %" GST_STIME_FORMAT " mq high_time %" GST_STIME_FORMAT,
2382 GST_STIME_ARGS (next_time), GST_STIME_ARGS (sq->group_high_time),
2383 GST_STIME_ARGS (mq->high_time));
2385 if (mq->sync_by_running_time) {
2386 if (sq->group_high_time == GST_CLOCK_STIME_NONE) {
2387 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
2388 (mq->high_time == GST_CLOCK_STIME_NONE
2389 || next_time > mq->high_time);
2391 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
2392 next_time > sq->group_high_time;
2395 should_wait = newid > mq->highid;
2398 /* Re-compute the high_id in case someone else pushed */
2399 compute_high_id (mq);
2400 compute_high_time (mq, sq->groupid);
2402 compute_high_id (mq);
2403 compute_high_time (mq, sq->groupid);
2404 /* Wake up all non-linked pads */
2405 wake_up_next_non_linked (mq);
2407 /* We're done waiting, we can clear the nextid and nexttime */
2409 sq->next_time = GST_CLOCK_STIME_NONE;
2411 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2416 GST_LOG_ID (sq->debug_id, "BEFORE PUSHING sq->srcresult: %s",
2417 gst_flow_get_name (sq->srcresult));
2419 /* Update time stats */
2420 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2421 next_time = get_running_time (&sq->src_segment, object, TRUE);
2422 if (GST_CLOCK_STIME_IS_VALID (next_time)) {
2423 if (sq->last_time == GST_CLOCK_STIME_NONE || sq->last_time < next_time)
2424 sq->last_time = next_time;
2425 if (mq->high_time == GST_CLOCK_STIME_NONE || mq->high_time <= next_time) {
2426 /* Wake up all non-linked pads now that we advanced the high time */
2427 mq->high_time = next_time;
2428 wake_up_next_non_linked (mq);
2431 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2433 /* Try to push out the new object */
2434 result = gst_single_queue_push_one (mq, sq, object, &dropping);
2437 /* Check if we pushed something already and if this is
2438 * now a switch from an active to a non-active stream.
2440 * If it is, we reset all the waiting streams, let them
2441 * push another buffer to see if they're now active again.
2442 * This allows faster switching between streams and prevents
2443 * deadlocks if downstream does any waiting too.
2445 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2446 if (sq->pushed && sq->srcresult == GST_FLOW_OK
2447 && result == GST_FLOW_NOT_LINKED) {
2450 GST_LOG_ID (sq->debug_id, "Changed from active to non-active");
2452 compute_high_id (mq);
2453 compute_high_time (mq, sq->groupid);
2454 do_update_buffering = TRUE;
2456 /* maybe no-one is waiting */
2457 if (mq->numwaiting > 0) {
2458 /* Else figure out which singlequeue(s) need waking up */
2459 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
2460 GstSingleQueue *sq2 = (GstSingleQueue *) tmp->data;
2462 if (sq2->srcresult == GST_FLOW_NOT_LINKED) {
2463 GST_LOG_ID (sq2->debug_id, "Waking up singlequeue");
2464 sq2->pushed = FALSE;
2465 sq2->srcresult = GST_FLOW_OK;
2466 g_cond_signal (&sq2->turn);
2475 /* now hold on a bit;
2476 * can not simply throw this result to upstream, because
2477 * that might already be onto another segment, so we have to make
2478 * sure we are relaying the correct info wrt proper segment */
2479 if (result == GST_FLOW_EOS && !dropping &&
2480 sq->srcresult != GST_FLOW_NOT_LINKED) {
2481 GST_DEBUG_ID (sq->debug_id, "starting EOS drop");
2483 /* pretend we have not seen EOS yet for upstream's sake */
2484 result = sq->srcresult;
2485 } else if (dropping && gst_data_queue_is_empty (sq->queue)) {
2486 /* queue empty, so stop dropping
2487 * we can commit the result we have now,
2488 * which is either OK after a segment, or EOS */
2489 GST_DEBUG_ID (sq->debug_id, "committed EOS drop");
2491 result = GST_FLOW_EOS;
2493 sq->srcresult = result;
2494 sq->last_oldid = newid;
2496 if (do_update_buffering)
2497 update_buffering (mq, sq);
2499 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2500 gst_multi_queue_post_buffering (mq);
2502 GST_LOG_ID (sq->debug_id,
2503 "AFTER PUSHING sq->srcresult: %s (is_eos:%d)",
2504 gst_flow_get_name (sq->srcresult), GST_PAD_IS_EOS (srcpad));
2506 /* Need to make sure wake up any sleeping pads when we exit */
2507 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2508 if (mq->numwaiting > 0 && (GST_PAD_IS_EOS (srcpad)
2509 || sq->srcresult == GST_FLOW_EOS)) {
2510 compute_high_time (mq, sq->groupid);
2511 compute_high_id (mq);
2512 wake_up_next_non_linked (mq);
2514 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2519 if (result != GST_FLOW_OK && result != GST_FLOW_NOT_LINKED
2520 && result != GST_FLOW_EOS)
2524 gst_clear_object (&mq);
2525 gst_clear_object (&srcpad);
2531 if (object && !is_query)
2532 gst_mini_object_unref (object);
2534 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2535 sq->last_query = FALSE;
2536 g_cond_signal (&sq->query_handled);
2538 /* Post an error message if we got EOS while downstream
2539 * has returned an error flow return. After EOS there
2540 * will be no further buffer which could propagate the
2542 if ((sq->is_eos || sq->is_segment_done) && sq->srcresult < GST_FLOW_EOS) {
2543 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2544 GST_ELEMENT_FLOW_ERROR (mq, sq->srcresult);
2546 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2549 /* upstream needs to see fatal result ASAP to shut things down,
2550 * but might be stuck in one of our other full queues;
2551 * so empty this one and trigger dynamic queue growth. At
2552 * this point the srcresult is not OK, NOT_LINKED
2553 * or EOS, i.e. a real failure */
2554 gst_single_queue_flush_queue (sq, FALSE);
2555 single_queue_underrun_cb (sq->queue, sq);
2556 gst_data_queue_set_flushing (sq->queue, TRUE);
2557 gst_pad_pause_task (srcpad);
2558 GST_LOG_ID (sq->debug_id,
2559 "task paused, reason:%s", gst_flow_get_name (sq->srcresult));
2565 * gst_multi_queue_chain:
2567 * This is similar to GstQueue's chain function, except:
2568 * _ we don't have leak behaviours,
2569 * _ we push with a unique id (curid)
2571 static GstFlowReturn
2572 gst_multi_queue_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
2576 GstMultiQueueItem *item = NULL;
2578 GstClockTime timestamp, duration;
2580 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2581 mq = g_weak_ref_get (&sq->mqueue);
2586 /* if eos, we are always full, so avoid hanging incoming indefinitely */
2592 /* Get a unique incrementing id */
2593 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2595 timestamp = GST_BUFFER_DTS_OR_PTS (buffer);
2596 duration = GST_BUFFER_DURATION (buffer);
2598 GST_LOG_ID (sq->debug_id,
2599 "About to enqueue buffer %p with id %d (pts:%"
2600 GST_TIME_FORMAT " dts:%" GST_TIME_FORMAT " dur:%" GST_TIME_FORMAT ")",
2601 buffer, curid, GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
2602 GST_TIME_ARGS (GST_BUFFER_DTS (buffer)), GST_TIME_ARGS (duration));
2604 item = gst_multi_queue_buffer_item_new (GST_MINI_OBJECT_CAST (buffer), curid);
2606 /* Update interleave before pushing data into queue */
2607 if (mq->use_interleave) {
2608 GstClockTime val = timestamp;
2609 GstClockTimeDiff dval;
2611 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2612 if (val == GST_CLOCK_TIME_NONE)
2613 val = sq->sink_segment.position;
2614 if (duration != GST_CLOCK_TIME_NONE)
2617 dval = my_segment_to_running_time (&sq->sink_segment, val);
2618 if (GST_CLOCK_STIME_IS_VALID (dval)) {
2619 sq->cached_sinktime = dval;
2620 GST_DEBUG_ID (sq->debug_id,
2621 "Cached sink time now %" G_GINT64_FORMAT " %"
2622 GST_STIME_FORMAT, sq->cached_sinktime,
2623 GST_STIME_ARGS (sq->cached_sinktime));
2624 calculate_interleave (mq, sq);
2626 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2629 if (!(gst_data_queue_push (sq->queue, (GstDataQueueItem *) item)))
2632 /* update time level, we must do this after pushing the data in the queue so
2633 * that we never end up filling the queue first. */
2634 apply_buffer (mq, sq, timestamp, duration, &sq->sink_segment);
2637 gst_clear_object (&mq);
2638 return sq->srcresult;
2643 GST_LOG_ID (sq->debug_id, "exit because task paused, reason: %s",
2644 gst_flow_get_name (sq->srcresult));
2646 gst_multi_queue_item_destroy (item);
2651 GST_DEBUG_OBJECT (mq, "we are EOS, dropping buffer, return EOS");
2652 gst_buffer_unref (buffer);
2653 gst_object_unref (mq);
2654 return GST_FLOW_EOS;
2659 gst_multi_queue_sink_activate_mode (GstPad * pad, GstObject * parent,
2660 GstPadMode mode, gboolean active)
2666 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2667 mq = (GstMultiQueue *) gst_pad_get_parent (pad);
2669 /* mq is NULL if the pad is activated/deactivated before being
2670 * added to the multiqueue */
2672 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2675 case GST_PAD_MODE_PUSH:
2677 /* All pads start off linked until they push one buffer */
2678 sq->srcresult = GST_FLOW_OK;
2680 gst_data_queue_set_flushing (sq->queue, FALSE);
2682 sq->srcresult = GST_FLOW_FLUSHING;
2683 sq->last_query = FALSE;
2684 g_cond_signal (&sq->query_handled);
2685 gst_data_queue_set_flushing (sq->queue, TRUE);
2687 /* Wait until streaming thread has finished */
2689 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2690 GST_PAD_STREAM_LOCK (pad);
2692 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2693 gst_data_queue_flush (sq->queue);
2695 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2696 GST_PAD_STREAM_UNLOCK (pad);
2698 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2708 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2709 gst_object_unref (mq);
2715 static GstFlowReturn
2716 gst_multi_queue_sink_event (GstPad * pad, GstObject * parent, GstEvent * event)
2721 GstMultiQueueItem *item;
2722 gboolean res = TRUE;
2723 GstFlowReturn flowret = GST_FLOW_OK;
2725 GstEvent *sref = NULL;
2729 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2730 mq = (GstMultiQueue *) parent;
2731 srcpad = g_weak_ref_get (&sq->srcpad);
2734 GST_INFO_OBJECT (pad, "Pushing while corresponding sourcepad has been"
2735 " removed already");
2737 return GST_FLOW_FLUSHING;
2740 type = GST_EVENT_TYPE (event);
2743 case GST_EVENT_STREAM_START:
2746 if (gst_event_parse_group_id (event, &group_id)) {
2747 if (sq->sink_stream_gid == GST_GROUP_ID_INVALID) {
2748 sq->sink_stream_gid = group_id;
2749 } else if (group_id != sq->sink_stream_gid) {
2750 sq->sink_stream_gid = group_id;
2751 sq->sink_stream_gid_changed = TRUE;
2754 if (mq->sync_by_running_time) {
2755 GstStreamFlags stream_flags;
2756 gst_event_parse_stream_flags (event, &stream_flags);
2757 if ((stream_flags & GST_STREAM_FLAG_SPARSE)) {
2758 GST_INFO_ID (sq->debug_id, "Stream is sparse");
2759 sq->is_sparse = TRUE;
2763 sq->thread = g_thread_self ();
2765 /* Remove EOS flag */
2769 case GST_EVENT_FLUSH_START:
2770 GST_DEBUG_ID (sq->debug_id, "Received flush start event");
2772 res = gst_pad_push_event (srcpad, event);
2774 gst_single_queue_flush (mq, sq, TRUE, FALSE);
2775 gst_single_queue_pause (mq, sq);
2778 case GST_EVENT_FLUSH_STOP:
2779 GST_DEBUG_ID (sq->debug_id, "Received flush stop event");
2781 res = gst_pad_push_event (srcpad, event);
2783 gst_single_queue_flush (mq, sq, FALSE, FALSE);
2784 gst_single_queue_start (mq, sq);
2785 #ifdef TIZEN_FEATURE_MQ_MODIFICATION
2786 /* need to reset the buffering data after seeking */
2790 GstSingleQueue *q = (GstSingleQueue *) tmp->data;
2793 tmp = g_list_next (tmp);
2795 recheck_buffering_status (mq);
2799 case GST_EVENT_SEGMENT:
2800 sq->is_segment_done = FALSE;
2801 sref = gst_event_ref (event);
2804 /* take ref because the queue will take ownership and we need the event
2805 * afterwards to update the segment */
2806 sref = gst_event_ref (event);
2807 if (mq->use_interleave) {
2808 GstClockTime val, dur;
2810 gst_event_parse_gap (event, &val, &dur);
2811 if (GST_CLOCK_TIME_IS_VALID (val)) {
2812 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2813 if (GST_CLOCK_TIME_IS_VALID (dur))
2815 stime = my_segment_to_running_time (&sq->sink_segment, val);
2816 if (GST_CLOCK_STIME_IS_VALID (stime)) {
2817 sq->cached_sinktime = stime;
2818 calculate_interleave (mq, sq);
2820 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2826 if (!(GST_EVENT_IS_SERIALIZED (event))) {
2827 res = gst_pad_push_event (srcpad, event);
2833 /* if eos, we are always full, so avoid hanging incoming indefinitely */
2837 /* Get an unique incrementing id. */
2838 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2840 item = gst_multi_queue_mo_item_new ((GstMiniObject *) event, curid);
2842 GST_DEBUG_ID (sq->debug_id,
2843 "Enqueuing event %p of type %s with id %d",
2844 event, GST_EVENT_TYPE_NAME (event), curid);
2846 if (!gst_data_queue_push (sq->queue, (GstDataQueueItem *) item))
2849 /* mark EOS when we received one, we must do that after putting the
2850 * buffer in the queue because EOS marks the buffer as filled. */
2852 case GST_EVENT_SEGMENT_DONE:
2853 sq->is_segment_done = TRUE;
2854 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2855 update_buffering (mq, sq);
2856 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2857 single_queue_overrun_cb (sq->queue, sq);
2858 gst_multi_queue_post_buffering (mq);
2861 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2864 /* Post an error message if we got EOS while downstream
2865 * has returned an error flow return. After EOS there
2866 * will be no further buffer which could propagate the
2868 if (sq->srcresult < GST_FLOW_EOS) {
2869 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2870 GST_ELEMENT_FLOW_ERROR (mq, sq->srcresult);
2872 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2875 /* EOS affects the buffering state */
2876 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2877 update_buffering (mq, sq);
2878 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2879 single_queue_overrun_cb (sq->queue, sq);
2880 gst_multi_queue_post_buffering (mq);
2882 case GST_EVENT_SEGMENT:
2883 apply_segment (mq, sq, sref, &sq->sink_segment);
2884 gst_event_unref (sref);
2885 /* a new segment allows us to accept more buffers if we got EOS
2886 * from downstream */
2887 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2888 if (sq->srcresult == GST_FLOW_EOS)
2889 sq->srcresult = GST_FLOW_OK;
2890 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2894 apply_gap (mq, sq, sref, &sq->sink_segment);
2895 gst_event_unref (sref);
2902 gst_object_unref (srcpad);
2904 flowret = GST_FLOW_ERROR;
2905 GST_DEBUG_ID (sq->debug_id, "Returning %s", gst_flow_get_name (flowret));
2910 gst_object_unref (srcpad);
2911 GST_LOG_ID (sq->debug_id, "Exit because task paused, reason: %s",
2912 gst_flow_get_name (sq->srcresult));
2914 gst_event_unref (sref);
2915 gst_multi_queue_item_destroy (item);
2916 return sq->srcresult;
2920 gst_object_unref (srcpad);
2921 GST_DEBUG_OBJECT (mq, "we are EOS, dropping event, return GST_FLOW_EOS");
2922 gst_event_unref (event);
2923 return GST_FLOW_EOS;
2928 gst_multi_queue_sink_query (GstPad * pad, GstObject * parent, GstQuery * query)
2934 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2935 mq = (GstMultiQueue *) parent;
2937 switch (GST_QUERY_TYPE (query)) {
2939 if (GST_QUERY_IS_SERIALIZED (query)) {
2941 GstMultiQueueItem *item;
2943 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2944 if (sq->srcresult != GST_FLOW_OK)
2947 /* serialized events go in the queue. We need to be certain that we
2948 * don't cause deadlocks waiting for the query return value. We check if
2949 * the queue is empty (nothing is blocking downstream and the query can
2950 * be pushed for sure) or we are not buffering. If we are buffering,
2951 * the pipeline waits to unblock downstream until our queue fills up
2952 * completely, which can not happen if we block on the query..
2953 * Therefore we only potentially block when we are not buffering. */
2954 if (!mq->use_buffering || gst_data_queue_is_empty (sq->queue)) {
2955 /* Get an unique incrementing id. */
2956 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2958 item = gst_multi_queue_mo_item_new ((GstMiniObject *) query, curid);
2960 GST_DEBUG_ID (sq->debug_id,
2961 "Enqueuing query %p of type %s with id %d",
2962 query, GST_QUERY_TYPE_NAME (query), curid);
2963 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2964 res = gst_data_queue_push (sq->queue, (GstDataQueueItem *) item);
2965 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2966 #ifdef TIZEN_FEATURE_MQ_MODIFICATION
2967 if (!res || sq->flushing) {
2968 gst_multi_queue_item_destroy (item);
2972 if (!res || sq->flushing)
2975 /* it might be that the query has been taken out of the queue
2976 * while we were unlocked. So, we need to check if the last
2977 * handled query is the same one than the one we just
2978 * pushed. If it is, we don't need to wait for the condition
2979 * variable, otherwise we wait for the condition variable to
2981 while (!sq->flushing && sq->srcresult == GST_FLOW_OK
2982 && sq->last_handled_query != query)
2983 g_cond_wait (&sq->query_handled, &mq->qlock);
2984 res = sq->last_query;
2985 sq->last_handled_query = NULL;
2987 GST_DEBUG_OBJECT (mq, "refusing query, we are buffering and the "
2988 "queue is not empty");
2991 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2993 /* default handling */
2994 res = gst_pad_query_default (pad, parent, query);
3002 GST_DEBUG_OBJECT (mq, "Flushing");
3003 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
3009 gst_multi_queue_src_activate_mode (GstPad * pad, GstObject * parent,
3010 GstPadMode mode, gboolean active)
3016 sq = GST_MULTIQUEUE_PAD (pad)->sq;
3017 mq = g_weak_ref_get (&sq->mqueue);
3020 GST_ERROR_OBJECT (pad, "No multiqueue set anymore, can't activate pad");
3025 GST_DEBUG_ID (sq->debug_id, "active: %d", active);
3028 case GST_PAD_MODE_PUSH:
3030 gst_single_queue_flush (mq, sq, FALSE, TRUE);
3031 result = parent ? gst_single_queue_start (mq, sq) : TRUE;
3033 gst_single_queue_flush (mq, sq, TRUE, TRUE);
3034 result = gst_single_queue_stop (mq, sq);
3041 gst_object_unref (mq);
3046 gst_multi_queue_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
3048 GstSingleQueue *sq = GST_MULTIQUEUE_PAD (pad)->sq;
3049 GstMultiQueue *mq = g_weak_ref_get (&sq->mqueue);
3051 GstPad *sinkpad = g_weak_ref_get (&sq->sinkpad);
3053 if (!mq || !sinkpad) {
3054 gst_clear_object (&sinkpad);
3055 gst_clear_object (&mq);
3056 GST_INFO_OBJECT (pad, "No multique/sinkpad set anymore, flushing");
3061 switch (GST_EVENT_TYPE (event)) {
3062 case GST_EVENT_LATENCY:
3064 GstClockTime latency = GST_CLOCK_TIME_NONE;
3065 gst_event_parse_latency (event, &latency);
3066 if (GST_CLOCK_TIME_IS_VALID (latency)) {
3067 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
3068 if (latency > mq->min_interleave_time) {
3069 /* Due to the dynamic nature of multiqueue, whe `use-interleave` is
3070 * used we can't report a maximum tolerated latency (when queried)
3071 * since it is calculated dynamically.
3073 * When in such live pipelines, we need to make sure multiqueue can
3074 * handle the lowest global latency (provided by this event). Failure
3075 * to do that would result in not providing enough buffering for a
3076 * realtime pipeline.
3078 GST_DEBUG_OBJECT (mq,
3079 "Raising minimum interleave time to %" GST_TIME_FORMAT,
3080 GST_TIME_ARGS (latency));
3081 mq->min_interleave_time = latency;
3082 if (mq->use_interleave)
3083 calculate_interleave (mq, NULL);
3085 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
3087 ret = gst_pad_push_event (sinkpad, event);
3090 case GST_EVENT_RECONFIGURE:
3091 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
3092 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
3093 sq->srcresult = GST_FLOW_OK;
3094 g_cond_signal (&sq->turn);
3096 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
3098 ret = gst_pad_push_event (sinkpad, event);
3101 ret = gst_pad_push_event (sinkpad, event);
3105 gst_object_unref (sinkpad);
3106 gst_object_unref (mq);
3112 gst_multi_queue_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
3116 /* FIXME, Handle position offset depending on queue size */
3117 switch (GST_QUERY_TYPE (query)) {
3119 /* default handling */
3120 res = gst_pad_query_default (pad, parent, query);
3127 * Next-non-linked functions
3130 /* WITH LOCK TAKEN */
3132 wake_up_next_non_linked (GstMultiQueue * mq)
3136 /* maybe no-one is waiting */
3137 if (mq->numwaiting < 1)
3140 if (mq->sync_by_running_time && GST_CLOCK_STIME_IS_VALID (mq->high_time)) {
3141 /* Else figure out which singlequeue(s) need waking up */
3142 for (tmp = mq->queues; tmp; tmp = tmp->next) {
3143 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
3144 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
3145 GstClockTimeDiff high_time;
3147 if (GST_CLOCK_STIME_IS_VALID (sq->group_high_time))
3148 high_time = sq->group_high_time;
3150 high_time = mq->high_time;
3152 if (GST_CLOCK_STIME_IS_VALID (sq->next_time) &&
3153 GST_CLOCK_STIME_IS_VALID (high_time)
3154 && sq->next_time <= high_time) {
3155 GST_LOG_ID (sq->debug_id, "Waking up singlequeue");
3156 g_cond_signal (&sq->turn);
3161 /* Else figure out which singlequeue(s) need waking up */
3162 for (tmp = mq->queues; tmp; tmp = tmp->next) {
3163 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
3164 if (sq->srcresult == GST_FLOW_NOT_LINKED &&
3165 sq->nextid != 0 && sq->nextid <= mq->highid) {
3166 GST_LOG_ID (sq->debug_id, "Waking up singlequeue");
3167 g_cond_signal (&sq->turn);
3173 /* WITH LOCK TAKEN */
3175 compute_high_id (GstMultiQueue * mq)
3177 /* The high-id is either the highest id among the linked pads, or if all
3178 * pads are not-linked, it's the lowest not-linked pad */
3180 guint32 lowest = G_MAXUINT32;
3181 guint32 highid = G_MAXUINT32;
3183 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
3184 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
3185 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
3188 GST_INFO_OBJECT (mq,
3189 "srcpad has been removed already... ignoring single queue");
3194 GST_LOG_ID (sq->debug_id, "nextid:%d, oldid:%d, srcresult:%s",
3195 sq->nextid, sq->oldid, gst_flow_get_name (sq->srcresult));
3197 /* No need to consider queues which are not waiting */
3198 if (sq->nextid == 0) {
3199 GST_LOG_ID (sq->debug_id, "not waiting - ignoring");
3200 gst_object_unref (srcpad);
3204 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
3205 if (sq->nextid < lowest)
3206 lowest = sq->nextid;
3207 } else if (!GST_PAD_IS_EOS (srcpad) && sq->srcresult != GST_FLOW_EOS) {
3208 /* If we don't have a global highid, or the global highid is lower than
3209 * this single queue's last outputted id, store the queue's one,
3210 * unless the singlequeue output is at EOS */
3211 if ((highid == G_MAXUINT32) || (sq->oldid > highid))
3214 gst_object_unref (srcpad);
3217 if (highid == G_MAXUINT32 || lowest < highid)
3218 mq->highid = lowest;
3220 mq->highid = highid;
3222 GST_LOG_OBJECT (mq, "Highid is now : %u, lowest non-linked %u", mq->highid,
3226 /* WITH LOCK TAKEN */
3228 compute_high_time (GstMultiQueue * mq, guint groupid)
3230 /* The high-time is either the highest last time among the linked
3231 * pads, or if all pads are not-linked, it's the lowest nex time of
3234 GstClockTimeDiff highest = GST_CLOCK_STIME_NONE;
3235 GstClockTimeDiff lowest = GST_CLOCK_STIME_NONE;
3236 GstClockTimeDiff group_high = GST_CLOCK_STIME_NONE;
3237 GstClockTimeDiff group_low = GST_CLOCK_STIME_NONE;
3238 GstClockTimeDiff res;
3239 /* Number of streams which belong to groupid */
3240 guint group_count = 0;
3242 if (!mq->sync_by_running_time)
3243 /* return GST_CLOCK_STIME_NONE; */
3246 for (tmp = mq->queues; tmp; tmp = tmp->next) {
3247 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
3248 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
3251 GST_INFO_OBJECT (mq,
3252 "srcpad has been removed already... ignoring single queue");
3257 GST_LOG_ID (sq->debug_id,
3258 "inspecting (group:%d) , next_time:%" GST_STIME_FORMAT
3259 ", last_time:%" GST_STIME_FORMAT ", srcresult:%s", sq->groupid,
3260 GST_STIME_ARGS (sq->next_time), GST_STIME_ARGS (sq->last_time),
3261 gst_flow_get_name (sq->srcresult));
3263 if (sq->groupid == groupid)
3266 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
3267 /* No need to consider queues which are not waiting */
3268 if (!GST_CLOCK_STIME_IS_VALID (sq->next_time)) {
3269 GST_LOG_ID (sq->debug_id, "Not waiting - ignoring");
3270 gst_object_unref (srcpad);
3274 if (lowest == GST_CLOCK_STIME_NONE || sq->next_time < lowest)
3275 lowest = sq->next_time;
3276 if (sq->groupid == groupid && (group_low == GST_CLOCK_STIME_NONE
3277 || sq->next_time < group_low))
3278 group_low = sq->next_time;
3279 } else if (!GST_PAD_IS_EOS (srcpad) && sq->srcresult != GST_FLOW_EOS) {
3280 /* If we don't have a global high time, or the global high time
3281 * is lower than this single queue's last outputted time, store
3282 * the queue's one, unless the singlequeue output is at EOS. */
3283 if (highest == GST_CLOCK_STIME_NONE
3284 || (sq->last_time != GST_CLOCK_STIME_NONE && sq->last_time > highest))
3285 highest = sq->last_time;
3286 if (sq->groupid == groupid && (group_high == GST_CLOCK_STIME_NONE
3287 || (sq->last_time != GST_CLOCK_STIME_NONE
3288 && sq->last_time > group_high)))
3289 group_high = sq->last_time;
3292 "highest now %" GST_STIME_FORMAT " lowest %" GST_STIME_FORMAT,
3293 GST_STIME_ARGS (highest), GST_STIME_ARGS (lowest));
3294 if (sq->groupid == groupid)
3296 "grouphigh %" GST_STIME_FORMAT " grouplow %" GST_STIME_FORMAT,
3297 GST_STIME_ARGS (group_high), GST_STIME_ARGS (group_low));
3299 gst_object_unref (srcpad);
3302 if (highest == GST_CLOCK_STIME_NONE)
3303 mq->high_time = lowest;
3305 mq->high_time = highest;
3307 /* If there's only one stream of a given type, use the global high */
3308 if (group_count < 2)
3309 res = GST_CLOCK_STIME_NONE;
3310 else if (group_high == GST_CLOCK_STIME_NONE)
3315 GST_LOG_OBJECT (mq, "group count %d for groupid %u", group_count, groupid);
3317 "MQ High time is now : %" GST_STIME_FORMAT ", group %d high time %"
3318 GST_STIME_FORMAT ", lowest non-linked %" GST_STIME_FORMAT,
3319 GST_STIME_ARGS (mq->high_time), groupid, GST_STIME_ARGS (mq->high_time),
3320 GST_STIME_ARGS (lowest));
3322 for (tmp = mq->queues; tmp; tmp = tmp->next) {
3323 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
3324 if (groupid == sq->groupid)
3325 sq->group_high_time = res;
3329 #define IS_FILLED(q, format, value) (((q)->max_size.format) != 0 && \
3330 ((q)->max_size.format) <= (value))
3332 #ifdef TIZEN_FEATURE_MQ_MODIFICATION_EXTRA_SIZE_TIME
3333 #define IS_FILLED_EXTRA(q, format, value) ((((q)->extra_size.format) != 0) && (((q)->max_size.format) != 0) && \
3334 (((q)->extra_size.format)+((q)->max_size.format)) <= (value))
3337 * GstSingleQueue functions
3340 single_queue_overrun_cb (GstDataQueue * dq, GstSingleQueue * sq)
3343 GstDataQueueSize size;
3344 gboolean filled = TRUE;
3345 gboolean empty_found = FALSE;
3346 GstMultiQueue *mq = g_weak_ref_get (&sq->mqueue);
3349 GST_ERROR ("No multique set anymore, not doing anything");
3354 gst_data_queue_get_level (sq->queue, &size);
3356 GST_LOG_ID (sq->debug_id,
3357 "EOS %d, visible %u/%u, bytes %u/%u, time %"
3358 G_GUINT64_FORMAT "/%" G_GUINT64_FORMAT, sq->is_eos, size.visible,
3359 sq->max_size.visible, size.bytes, sq->max_size.bytes, sq->cur_time,
3362 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
3364 /* check if we reached the hard time/bytes limits;
3365 time limit is only taken into account for non-sparse streams */
3366 if (sq->is_eos || IS_FILLED (sq, bytes, size.bytes) ||
3367 (!sq->is_sparse && IS_FILLED (sq, time, sq->cur_time))) {
3371 /* Search for empty queues */
3372 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
3373 GstSingleQueue *oq = (GstSingleQueue *) tmp->data;
3378 if (oq->srcresult == GST_FLOW_NOT_LINKED) {
3379 GST_LOG_ID (sq->debug_id, "Queue is not-linked");
3383 GST_LOG_ID (oq->debug_id, "Checking queue");
3384 if (gst_data_queue_is_empty (oq->queue) && !oq->is_sparse) {
3385 GST_LOG_ID (oq->debug_id, "Queue is empty");
3391 /* if hard limits are not reached then we allow one more buffer in the full
3392 * queue, but only if any of the other singelqueues are empty */
3394 if (IS_FILLED (sq, visible, size.visible)) {
3395 sq->max_size.visible = size.visible + 1;
3396 GST_DEBUG_ID (sq->debug_id,
3397 "Bumping max visible to %d", sq->max_size.visible);
3403 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
3404 gst_object_unref (mq);
3406 /* Overrun is always forwarded, since this is blocking the upstream element */
3408 GST_DEBUG_ID (sq->debug_id, "Queue is filled, signalling overrun");
3409 g_signal_emit (mq, gst_multi_queue_signals[SIGNAL_OVERRUN], 0);
3414 single_queue_underrun_cb (GstDataQueue * dq, GstSingleQueue * sq)
3416 gboolean empty = TRUE;
3417 GstMultiQueue *mq = g_weak_ref_get (&sq->mqueue);
3421 GST_ERROR ("No multique set anymore, not doing anything");
3426 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
3427 GST_LOG_ID (sq->debug_id, "Single Queue is empty but not-linked");
3428 gst_object_unref (mq);
3431 GST_LOG_ID (sq->debug_id,
3432 "Single Queue is empty, Checking other single queues");
3435 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
3436 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
3437 GstSingleQueue *oq = (GstSingleQueue *) tmp->data;
3439 if (gst_data_queue_is_full (oq->queue)) {
3440 GstDataQueueSize size;
3442 gst_data_queue_get_level (oq->queue, &size);
3443 if (IS_FILLED (oq, visible, size.visible)) {
3444 oq->max_size.visible = size.visible + 1;
3445 GST_DEBUG_ID (oq->debug_id,
3446 "queue is filled, bumping its max visible to %d",
3447 oq->max_size.visible);
3448 gst_data_queue_limits_changed (oq->queue);
3451 if (!gst_data_queue_is_empty (oq->queue) || oq->is_sparse)
3454 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
3455 gst_object_unref (mq);
3458 GST_DEBUG_OBJECT (mq, "All queues are empty, signalling it");
3459 g_signal_emit (mq, gst_multi_queue_signals[SIGNAL_UNDERRUN], 0);
3464 single_queue_check_full (GstDataQueue * dataq, guint visible, guint bytes,
3465 guint64 time, GstSingleQueue * sq)
3468 GstMultiQueue *mq = g_weak_ref_get (&sq->mqueue);
3471 GST_ERROR ("No multique set anymore, let's say we are full");
3476 GST_DEBUG_ID (sq->debug_id,
3477 "visible %u/%u, bytes %u/%u, time %" G_GUINT64_FORMAT "/%"
3478 G_GUINT64_FORMAT, visible, sq->max_size.visible, bytes,
3479 sq->max_size.bytes, sq->cur_time, sq->max_size.time);
3481 /* we are always filled on EOS */
3482 if (sq->is_eos || sq->is_segment_done) {
3487 /* we never go past the max visible items unless we are in buffering mode */
3488 if (!mq->use_buffering && IS_FILLED (sq, visible, visible)) {
3493 /* check time or bytes */
3494 #ifdef TIZEN_FEATURE_MQ_MODIFICATION_EXTRA_SIZE_TIME
3495 res = IS_FILLED_EXTRA (sq, time, sq->cur_time) || IS_FILLED (sq, bytes, bytes);
3497 res = IS_FILLED (sq, bytes, bytes);
3499 /* We only care about limits in time if we're not a sparse stream or
3500 * we're not syncing by running time */
3501 if (!sq->is_sparse || !mq->sync_by_running_time) {
3502 /* If unlinked, take into account the extra unlinked cache time */
3503 if (mq->sync_by_running_time && sq->srcresult == GST_FLOW_NOT_LINKED) {
3504 if (sq->cur_time > mq->unlinked_cache_time)
3505 res |= IS_FILLED (sq, time, sq->cur_time - mq->unlinked_cache_time);
3509 res |= IS_FILLED (sq, time, sq->cur_time);
3512 gst_object_unref (mq);
3518 gst_single_queue_flush_queue (GstSingleQueue * sq, gboolean full)
3520 GstDataQueueItem *sitem;
3521 GstMultiQueueItem *mitem;
3522 gboolean was_flushing = FALSE;
3523 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
3524 GstMultiQueue *mq = g_weak_ref_get (&sq->mqueue);
3526 while (!gst_data_queue_is_empty (sq->queue)) {
3527 GstMiniObject *data;
3529 /* FIXME: If this fails here although the queue is not empty,
3530 * we're flushing... but we want to rescue all sticky
3531 * events nonetheless.
3533 if (!gst_data_queue_pop (sq->queue, &sitem)) {
3534 was_flushing = TRUE;
3535 gst_data_queue_set_flushing (sq->queue, FALSE);
3539 mitem = (GstMultiQueueItem *) sitem;
3541 data = sitem->object;
3543 if (!full && !mitem->is_query && GST_IS_EVENT (data)
3544 && srcpad && GST_EVENT_IS_STICKY (data)
3545 && GST_EVENT_TYPE (data) != GST_EVENT_SEGMENT
3546 && GST_EVENT_TYPE (data) != GST_EVENT_EOS) {
3547 gst_pad_store_sticky_event (srcpad, GST_EVENT_CAST (data));
3550 sitem->destroy (sitem);
3552 gst_clear_object (&srcpad);
3554 gst_data_queue_flush (sq->queue);
3556 gst_data_queue_set_flushing (sq->queue, TRUE);
3559 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
3560 update_buffering (mq, sq);
3561 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
3562 gst_multi_queue_post_buffering (mq);
3563 gst_object_unref (mq);
3568 gst_single_queue_unref (GstSingleQueue * sq)
3570 if (g_atomic_int_dec_and_test (&sq->refcount)) {
3572 gst_data_queue_flush (sq->queue);
3573 g_object_unref (sq->queue);
3574 g_cond_clear (&sq->turn);
3575 g_cond_clear (&sq->query_handled);
3576 g_weak_ref_clear (&sq->sinkpad);
3577 g_weak_ref_clear (&sq->srcpad);
3578 g_weak_ref_clear (&sq->mqueue);
3579 #ifndef GST_DISABLE_GST_DEBUG
3580 g_free (sq->debug_id);
3587 static GstSingleQueue *
3588 gst_single_queue_ref (GstSingleQueue * squeue)
3590 g_atomic_int_inc (&squeue->refcount);
3595 static GstSingleQueue *
3596 gst_single_queue_new (GstMultiQueue * mqueue, guint id)
3598 GstPad *srcpad, *sinkpad;
3600 GstPadTemplate *templ;
3603 guint temp_id = (id == -1) ? 0 : id;
3605 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
3607 /* Find an unused queue ID, if possible the passed one */
3608 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
3609 GstSingleQueue *sq2 = (GstSingleQueue *) tmp->data;
3610 /* This works because the IDs are sorted in ascending order */
3611 if (sq2->id == temp_id) {
3612 /* If this ID was requested by the caller return NULL,
3613 * otherwise just get us the next one */
3615 temp_id = sq2->id + 1;
3617 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
3620 } else if (sq2->id > temp_id) {
3625 sq = g_new0 (GstSingleQueue, 1);
3626 g_atomic_int_set (&sq->refcount, 1);
3630 #ifndef GST_DISABLE_GST_DEBUG
3632 g_strdup_printf ("%s:queue_%d", GST_OBJECT_NAME (mqueue), temp_id);
3634 sq->groupid = DEFAULT_PAD_GROUP_ID;
3635 sq->group_high_time = GST_CLOCK_STIME_NONE;
3637 mqueue->queues = g_list_insert_before (mqueue->queues, tmp, sq);
3638 mqueue->queues_cookie++;
3640 /* copy over max_size and extra_size so we don't need to take the lock
3641 * any longer when checking if the queue is full. */
3642 sq->max_size.visible = mqueue->max_size.visible;
3643 sq->max_size.bytes = mqueue->max_size.bytes;
3644 sq->max_size.time = mqueue->max_size.time;
3646 sq->extra_size.visible = mqueue->extra_size.visible;
3647 sq->extra_size.bytes = mqueue->extra_size.bytes;
3648 sq->extra_size.time = mqueue->extra_size.time;
3650 GST_DEBUG_OBJECT (mqueue, "Creating GstSingleQueue id:%d", sq->id);
3652 g_weak_ref_init (&sq->mqueue, mqueue);
3653 sq->srcresult = GST_FLOW_FLUSHING;
3655 sq->queue = gst_data_queue_new ((GstDataQueueCheckFullFunction)
3656 single_queue_check_full,
3657 (GstDataQueueFullCallback) single_queue_overrun_cb,
3658 (GstDataQueueEmptyCallback) single_queue_underrun_cb, sq);
3660 sq->is_sparse = FALSE;
3661 sq->flushing = FALSE;
3663 gst_segment_init (&sq->sink_segment, GST_FORMAT_TIME);
3664 gst_segment_init (&sq->src_segment, GST_FORMAT_TIME);
3668 sq->next_time = GST_CLOCK_STIME_NONE;
3669 sq->last_time = GST_CLOCK_STIME_NONE;
3670 g_cond_init (&sq->turn);
3671 g_cond_init (&sq->query_handled);
3673 sq->sinktime = GST_CLOCK_STIME_NONE;
3674 sq->srctime = GST_CLOCK_STIME_NONE;
3675 sq->sink_tainted = TRUE;
3676 sq->src_tainted = TRUE;
3678 sq->sink_stream_gid = sq->src_stream_gid = GST_GROUP_ID_INVALID;
3679 sq->sink_stream_gid_changed = FALSE;
3680 sq->src_stream_gid_changed = FALSE;
3682 name = g_strdup_printf ("sink_%u", sq->id);
3683 templ = gst_static_pad_template_get (&sinktemplate);
3684 sinkpad = g_object_new (GST_TYPE_MULTIQUEUE_PAD, "name", name,
3685 "direction", templ->direction, "template", templ, NULL);
3686 g_weak_ref_init (&sq->sinkpad, sinkpad);
3687 gst_object_unref (templ);
3690 GST_MULTIQUEUE_PAD (sinkpad)->sq = sq;
3692 gst_pad_set_chain_function (sinkpad,
3693 GST_DEBUG_FUNCPTR (gst_multi_queue_chain));
3694 gst_pad_set_activatemode_function (sinkpad,
3695 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_activate_mode));
3696 gst_pad_set_event_full_function (sinkpad,
3697 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_event));
3698 gst_pad_set_query_function (sinkpad,
3699 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_query));
3700 gst_pad_set_iterate_internal_links_function (sinkpad,
3701 GST_DEBUG_FUNCPTR (gst_multi_queue_iterate_internal_links));
3702 GST_OBJECT_FLAG_SET (sinkpad, GST_PAD_FLAG_PROXY_CAPS);
3704 name = g_strdup_printf ("src_%u", sq->id);
3705 templ = gst_static_pad_template_get (&srctemplate);
3706 srcpad = g_object_new (GST_TYPE_MULTIQUEUE_PAD, "name", name,
3707 "direction", templ->direction, "template", templ, NULL);
3708 g_weak_ref_init (&sq->srcpad, srcpad);
3709 gst_object_unref (templ);
3712 GST_MULTIQUEUE_PAD (srcpad)->sq = gst_single_queue_ref (sq);
3714 gst_pad_set_activatemode_function (srcpad,
3715 GST_DEBUG_FUNCPTR (gst_multi_queue_src_activate_mode));
3716 gst_pad_set_event_function (srcpad,
3717 GST_DEBUG_FUNCPTR (gst_multi_queue_src_event));
3718 gst_pad_set_query_function (srcpad,
3719 GST_DEBUG_FUNCPTR (gst_multi_queue_src_query));
3720 gst_pad_set_iterate_internal_links_function (srcpad,
3721 GST_DEBUG_FUNCPTR (gst_multi_queue_iterate_internal_links));
3722 GST_OBJECT_FLAG_SET (srcpad, GST_PAD_FLAG_PROXY_CAPS);
3724 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
3726 /* only activate the pads when we are not in the NULL state
3727 * and add the pad under the state_lock to prevent state changes
3728 * between activating and adding */
3729 g_rec_mutex_lock (GST_STATE_GET_LOCK (mqueue));
3730 if (GST_STATE_TARGET (mqueue) != GST_STATE_NULL) {
3731 gst_pad_set_active (srcpad, TRUE);
3732 gst_pad_set_active (sinkpad, TRUE);
3734 gst_element_add_pad (GST_ELEMENT (mqueue), srcpad);
3735 gst_element_add_pad (GST_ELEMENT (mqueue), sinkpad);
3736 if (GST_STATE_TARGET (mqueue) != GST_STATE_NULL) {
3737 gst_single_queue_start (mqueue, sq);
3739 g_rec_mutex_unlock (GST_STATE_GET_LOCK (mqueue));
3741 GST_DEBUG_ID (sq->debug_id, "GstSingleQueue created and pads added");