2 * Copyright (C) 2006 Edward Hervey <edward@fluendo.com>
3 * Copyright (C) 2007 Jan Schmidt <jan@fluendo.com>
4 * Copyright (C) 2007 Wim Taymans <wim@fluendo.com>
5 * Copyright (C) 2011 Sebastian Dröge <sebastian.droege@collabora.co.uk>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Library General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Library General Public License for more details.
19 * You should have received a copy of the GNU Library General Public
20 * License along with this library; if not, write to the
21 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
22 * Boston, MA 02110-1301, USA.
26 * SECTION:element-multiqueue
28 * @see_also: #GstQueue
30 * Multiqueue is similar to a normal #GstQueue with the following additional
33 * 1) Multiple streamhandling
35 * * The element handles queueing data on more than one stream at once. To
36 * achieve such a feature it has request sink pads (sink%u) and
37 * 'sometimes' src pads (src%u). When requesting a given sinkpad with gst_element_request_pad(),
38 * the associated srcpad for that stream will be created.
39 * Example: requesting sink1 will generate src1.
41 * 2) Non-starvation on multiple stream
43 * * If more than one stream is used with the element, the streams' queues
44 * will be dynamically grown (up to a limit), in order to ensure that no
45 * stream is risking data starvation. This guarantees that at any given
46 * time there are at least N bytes queued and available for each individual
47 * stream. If an EOS event comes through a srcpad, the associated queue will be
48 * considered as 'not-empty' in the queue-size-growing algorithm.
50 * 3) Non-linked srcpads graceful handling
52 * * In order to better support dynamic switching between streams, the multiqueue
53 * (unlike the current GStreamer queue) continues to push buffers on non-linked
54 * pads rather than shutting down. In addition, to prevent a non-linked stream from very quickly consuming all
55 * available buffers and thus 'racing ahead' of the other streams, the element
56 * must ensure that buffers and inlined events for a non-linked stream are pushed
57 * in the same order as they were received, relative to the other streams
58 * controlled by the element. This means that a buffer cannot be pushed to a
59 * non-linked pad any sooner than buffers in any other stream which were received
62 * Data is queued until one of the limits specified by the
63 * #GstMultiQueue:max-size-buffers, #GstMultiQueue:max-size-bytes and/or
64 * #GstMultiQueue:max-size-time properties has been reached. Any attempt to push
65 * more buffers into the queue will block the pushing thread until more space
66 * becomes available. #GstMultiQueue:extra-size-buffers,
69 * #GstMultiQueue:extra-size-bytes and #GstMultiQueue:extra-size-time are
72 * The default queue size limits are 5 buffers, 10MB of data, or
73 * two second worth of data, whichever is reached first. Note that the number
74 * of buffers will dynamically grow depending on the fill level of
77 * The #GstMultiQueue::underrun signal is emitted when all of the queues
78 * are empty. The #GstMultiQueue::overrun signal is emitted when one of the
80 * Both signals are emitted from the context of the streaming thread.
82 * When using #GstMultiQueue:sync-by-running-time the unlinked streams will
83 * be throttled by the highest running-time of linked streams. This allows
84 * further relinking of those unlinked streams without them being in the
85 * future (i.e. to achieve gapless playback).
86 * When dealing with streams which have got different consumption requirements
87 * downstream (ex: video decoders which will consume more buffer (in time) than
88 * audio decoders), it is recommended to group streams of the same type
89 * by using the pad "group-id" property. This will further throttle streams
90 * in time within that group.
99 #include "gstmultiqueue.h"
100 #include <gst/glib-compat-private.h>
104 * @sinkpad: associated sink #GstPad
105 * @srcpad: associated source #GstPad
107 * Structure containing all information and properties about
110 typedef struct _GstSingleQueue GstSingleQueue;
112 struct _GstSingleQueue
114 /* unique identifier of the queue */
116 /* group of streams to which this queue belongs to */
118 GstClockTimeDiff group_high_time;
120 GstMultiQueue *mqueue;
125 /* flowreturn of previous srcpad push */
126 GstFlowReturn srcresult;
127 /* If something was actually pushed on
128 * this pad after flushing/pad activation
129 * and the srcresult corresponds to something
135 GstSegment sink_segment;
136 GstSegment src_segment;
137 gboolean has_src_segment; /* preferred over initializing the src_segment to
138 * UNDEFINED as this doesn't requires adding ifs
139 * in every segment usage */
141 /* position of src/sink */
142 GstClockTimeDiff sinktime, srctime;
143 /* cached input value, used for interleave */
144 GstClockTimeDiff cached_sinktime;
145 /* TRUE if either position needs to be recalculated */
146 gboolean sink_tainted, src_tainted;
150 GstDataQueueSize max_size, extra_size;
151 GstClockTime cur_time;
153 gboolean is_segment_done;
158 /* Protected by global lock */
159 guint32 nextid; /* ID of the next object waiting to be pushed */
160 guint32 oldid; /* ID of the last object pushed (last in a series) */
161 guint32 last_oldid; /* Previously observed old_id, reset to MAXUINT32 on flush */
162 GstClockTimeDiff next_time; /* End running time of next buffer to be pushed */
163 GstClockTimeDiff last_time; /* Start running time of last pushed buffer */
164 GCond turn; /* SingleQueue turn waiting conditional */
166 /* for serialized queries */
169 GstQuery *last_handled_query;
171 /* For interleave calculation */
172 GThread *thread; /* Streaming thread of SingleQueue */
173 GstClockTime interleave; /* Calculated interleve within the thread */
177 /* Extension of GstDataQueueItem structure for our usage */
178 typedef struct _GstMultiQueueItem GstMultiQueueItem;
180 struct _GstMultiQueueItem
182 GstMiniObject *object;
187 GDestroyNotify destroy;
193 static GstSingleQueue *gst_single_queue_new (GstMultiQueue * mqueue, guint id);
194 static void gst_single_queue_free (GstSingleQueue * squeue);
196 static void wake_up_next_non_linked (GstMultiQueue * mq);
197 static void compute_high_id (GstMultiQueue * mq);
198 static void compute_high_time (GstMultiQueue * mq, guint groupid);
199 static void single_queue_overrun_cb (GstDataQueue * dq, GstSingleQueue * sq);
200 static void single_queue_underrun_cb (GstDataQueue * dq, GstSingleQueue * sq);
202 static void update_buffering (GstMultiQueue * mq, GstSingleQueue * sq);
203 static void gst_multi_queue_post_buffering (GstMultiQueue * mq);
204 static void recheck_buffering_status (GstMultiQueue * mq);
206 static void gst_single_queue_flush_queue (GstSingleQueue * sq, gboolean full);
208 static void calculate_interleave (GstMultiQueue * mq, GstSingleQueue * sq);
210 static GstStaticPadTemplate sinktemplate = GST_STATIC_PAD_TEMPLATE ("sink_%u",
213 GST_STATIC_CAPS_ANY);
215 static GstStaticPadTemplate srctemplate = GST_STATIC_PAD_TEMPLATE ("src_%u",
218 GST_STATIC_CAPS_ANY);
220 GST_DEBUG_CATEGORY_STATIC (multi_queue_debug);
221 #define GST_CAT_DEFAULT (multi_queue_debug)
223 /* Signals and args */
231 /* default limits, we try to keep up to 2 seconds of data and if there is not
232 * time, up to 10 MB. The number of buffers is dynamically scaled to make sure
233 * there is data in the queues. Normally, the byte and time limits are not hit
234 * in theses conditions. */
235 #define DEFAULT_MAX_SIZE_BYTES 10 * 1024 * 1024 /* 10 MB */
236 #define DEFAULT_MAX_SIZE_BUFFERS 5
237 #define DEFAULT_MAX_SIZE_TIME 2 * GST_SECOND
239 /* second limits. When we hit one of the above limits we are probably dealing
240 * with a badly muxed file and we scale the limits to these emergency values.
241 * This is currently not yet implemented.
242 * Since we dynamically scale the queue buffer size up to the limits but avoid
243 * going above the max-size-buffers when we can, we don't really need this
244 * additional extra size. */
245 #define DEFAULT_EXTRA_SIZE_BYTES 10 * 1024 * 1024 /* 10 MB */
246 #define DEFAULT_EXTRA_SIZE_BUFFERS 5
247 #ifdef TIZEN_FEATURE_MQ_MODIFICATION_EXTRA_SIZE_TIME
248 #define DEFAULT_EXTRA_SIZE_TIME 10 * GST_SECOND
250 #define DEFAULT_EXTRA_SIZE_TIME 3 * GST_SECOND
253 #define DEFAULT_USE_BUFFERING FALSE
254 #define DEFAULT_LOW_WATERMARK 0.01
255 #define DEFAULT_HIGH_WATERMARK 0.99
256 #define DEFAULT_SYNC_BY_RUNNING_TIME FALSE
257 #define DEFAULT_USE_INTERLEAVE FALSE
258 #define DEFAULT_UNLINKED_CACHE_TIME 250 * GST_MSECOND
260 #define DEFAULT_MINIMUM_INTERLEAVE (250 * GST_MSECOND)
265 PROP_EXTRA_SIZE_BYTES,
266 PROP_EXTRA_SIZE_BUFFERS,
267 PROP_EXTRA_SIZE_TIME,
269 PROP_MAX_SIZE_BUFFERS,
271 #ifdef TIZEN_FEATURE_MQ_MODIFICATION
272 PROP_CURR_SIZE_BYTES,
279 PROP_SYNC_BY_RUNNING_TIME,
281 PROP_UNLINKED_CACHE_TIME,
282 PROP_MINIMUM_INTERLEAVE,
286 /* Explanation for buffer levels and percentages:
288 * The buffering_level functions here return a value in a normalized range
289 * that specifies the current fill level of a queue. The range goes from 0 to
290 * MAX_BUFFERING_LEVEL. The low/high watermarks also use this same range.
292 * This is not to be confused with the buffering_percent value, which is
293 * a *relative* quantity - relative to the low/high watermarks.
294 * buffering_percent = 0% means overall buffering_level is at the low watermark.
295 * buffering_percent = 100% means overall buffering_level is at the high watermark.
296 * buffering_percent is used for determining if the fill level has reached
297 * the high watermark, and for producing BUFFERING messages. This value
298 * always uses a 0..100 range (since it is a percentage).
300 * To avoid future confusions, whenever "buffering level" is mentioned, it
301 * refers to the absolute level which is in the 0..MAX_BUFFERING_LEVEL
302 * range. Whenever "buffering_percent" is mentioned, it refers to the
303 * percentage value that is relative to the low/high watermark. */
305 /* Using a buffering level range of 0..1000000 to allow for a
306 * resolution in ppm (1 ppm = 0.0001%) */
307 #define MAX_BUFFERING_LEVEL 1000000
309 /* How much 1% makes up in the buffer level range */
310 #define BUF_LEVEL_PERCENT_FACTOR ((MAX_BUFFERING_LEVEL) / 100)
312 /* GstMultiQueuePad */
314 #define DEFAULT_PAD_GROUP_ID 0
322 #define GST_TYPE_MULTIQUEUE_PAD (gst_multiqueue_pad_get_type())
323 #define GST_MULTIQUEUE_PAD(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_MULTIQUEUE_PAD,GstMultiQueuePad))
324 #define GST_IS_MULTIQUEUE_PAD(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_MULTIQUEUE_PAD))
325 #define GST_MULTIQUEUE_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass) ,GST_TYPE_MULTIQUEUE_PAD,GstMultiQueuePadClass))
326 #define GST_IS_MULTIQUEUE_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass) ,GST_TYPE_MULTIQUEUE_PAD))
327 #define GST_MULTIQUEUE_PAD_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS((obj) ,GST_TYPE_MULTIQUEUE_PAD,GstMultiQueuePadClass))
329 struct _GstMultiQueuePad
336 struct _GstMultiQueuePadClass
338 GstPadClass parent_class;
341 GType gst_multiqueue_pad_get_type (void);
343 G_DEFINE_TYPE (GstMultiQueuePad, gst_multiqueue_pad, GST_TYPE_PAD);
345 gst_multiqueue_pad_get_property (GObject * object, guint prop_id,
346 GValue * value, GParamSpec * pspec)
348 GstMultiQueuePad *pad = GST_MULTIQUEUE_PAD (object);
351 case PROP_PAD_GROUP_ID:
353 g_value_set_uint (value, pad->sq->groupid);
356 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
362 gst_multiqueue_pad_set_property (GObject * object, guint prop_id,
363 const GValue * value, GParamSpec * pspec)
365 GstMultiQueuePad *pad = GST_MULTIQUEUE_PAD (object);
368 case PROP_PAD_GROUP_ID:
369 GST_OBJECT_LOCK (pad);
371 pad->sq->groupid = g_value_get_uint (value);
372 GST_OBJECT_UNLOCK (pad);
375 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
381 gst_multiqueue_pad_class_init (GstMultiQueuePadClass * klass)
383 GObjectClass *gobject_class = (GObjectClass *) klass;
385 gobject_class->set_property = gst_multiqueue_pad_set_property;
386 gobject_class->get_property = gst_multiqueue_pad_get_property;
389 * GstMultiQueuePad:group-id:
391 * Group to which this pad belongs.
395 g_object_class_install_property (gobject_class, PROP_PAD_GROUP_ID,
396 g_param_spec_uint ("group-id", "Group ID",
397 "Group to which this pad belongs", 0, G_MAXUINT32,
398 DEFAULT_PAD_GROUP_ID, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
402 gst_multiqueue_pad_init (GstMultiQueuePad * pad)
408 #define GST_MULTI_QUEUE_MUTEX_LOCK(q) G_STMT_START { \
409 g_mutex_lock (&q->qlock); \
412 #define GST_MULTI_QUEUE_MUTEX_UNLOCK(q) G_STMT_START { \
413 g_mutex_unlock (&q->qlock); \
416 #define SET_PERCENT(mq, perc) G_STMT_START { \
417 if (perc != mq->buffering_percent) { \
418 mq->buffering_percent = perc; \
419 mq->buffering_percent_changed = TRUE; \
420 GST_DEBUG_OBJECT (mq, "buffering %d percent", perc); \
424 /* Convenience function */
425 static inline GstClockTimeDiff
426 my_segment_to_running_time (GstSegment * segment, GstClockTime val)
428 GstClockTimeDiff res = GST_CLOCK_STIME_NONE;
430 if (GST_CLOCK_TIME_IS_VALID (val)) {
432 gst_segment_to_running_time_full (segment, GST_FORMAT_TIME, val, &val);
441 static void gst_multi_queue_finalize (GObject * object);
442 static void gst_multi_queue_set_property (GObject * object,
443 guint prop_id, const GValue * value, GParamSpec * pspec);
444 static void gst_multi_queue_get_property (GObject * object,
445 guint prop_id, GValue * value, GParamSpec * pspec);
447 static GstPad *gst_multi_queue_request_new_pad (GstElement * element,
448 GstPadTemplate * temp, const gchar * name, const GstCaps * caps);
449 static void gst_multi_queue_release_pad (GstElement * element, GstPad * pad);
450 static GstStateChangeReturn gst_multi_queue_change_state (GstElement *
451 element, GstStateChange transition);
453 static void gst_multi_queue_loop (GstPad * pad);
456 GST_DEBUG_CATEGORY_INIT (multi_queue_debug, "multiqueue", 0, "multiqueue element");
457 #define gst_multi_queue_parent_class parent_class
458 G_DEFINE_TYPE_WITH_CODE (GstMultiQueue, gst_multi_queue, GST_TYPE_ELEMENT,
461 static guint gst_multi_queue_signals[LAST_SIGNAL] = { 0 };
464 gst_multi_queue_class_init (GstMultiQueueClass * klass)
466 GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
467 GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
469 gobject_class->set_property = gst_multi_queue_set_property;
470 gobject_class->get_property = gst_multi_queue_get_property;
475 * GstMultiQueue::underrun:
476 * @multiqueue: the multiqueue instance
478 * This signal is emitted from the streaming thread when there is
479 * no data in any of the queues inside the multiqueue instance (underrun).
481 * This indicates either starvation or EOS from the upstream data sources.
483 gst_multi_queue_signals[SIGNAL_UNDERRUN] =
484 g_signal_new ("underrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
485 G_STRUCT_OFFSET (GstMultiQueueClass, underrun), NULL, NULL,
486 g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0);
489 * GstMultiQueue::overrun:
490 * @multiqueue: the multiqueue instance
492 * Reports that one of the queues in the multiqueue is full (overrun).
493 * A queue is full if the total amount of data inside it (num-buffers, time,
494 * size) is higher than the boundary values which can be set through the
495 * GObject properties.
497 * This can be used as an indicator of pre-roll.
499 gst_multi_queue_signals[SIGNAL_OVERRUN] =
500 g_signal_new ("overrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
501 G_STRUCT_OFFSET (GstMultiQueueClass, overrun), NULL, NULL,
502 g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0);
506 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_BYTES,
507 g_param_spec_uint ("max-size-bytes", "Max. size (kB)",
508 "Max. amount of data in the queue (bytes, 0=disable)",
509 0, G_MAXUINT, DEFAULT_MAX_SIZE_BYTES,
510 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
511 G_PARAM_STATIC_STRINGS));
512 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_BUFFERS,
513 g_param_spec_uint ("max-size-buffers", "Max. size (buffers)",
514 "Max. number of buffers in the queue (0=disable)", 0, G_MAXUINT,
515 DEFAULT_MAX_SIZE_BUFFERS,
516 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
517 G_PARAM_STATIC_STRINGS));
518 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_TIME,
519 g_param_spec_uint64 ("max-size-time", "Max. size (ns)",
520 "Max. amount of data in the queue (in ns, 0=disable)", 0, G_MAXUINT64,
521 DEFAULT_MAX_SIZE_TIME, G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
522 G_PARAM_STATIC_STRINGS));
523 #ifdef TIZEN_FEATURE_MQ_MODIFICATION
524 g_object_class_install_property (gobject_class, PROP_CURR_SIZE_BYTES,
525 g_param_spec_uint ("curr-size-bytes", "Current buffered size (kB)",
526 "buffered amount of data in the queue (bytes)", 0, G_MAXUINT,
527 0, G_PARAM_READABLE | GST_PARAM_MUTABLE_PLAYING |
528 G_PARAM_STATIC_STRINGS));
530 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_BYTES,
531 g_param_spec_uint ("extra-size-bytes", "Extra Size (kB)",
532 "Amount of data the queues can grow if one of them is empty (bytes, 0=disable)"
533 " (NOT IMPLEMENTED)",
534 0, G_MAXUINT, DEFAULT_EXTRA_SIZE_BYTES,
535 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
536 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_BUFFERS,
537 g_param_spec_uint ("extra-size-buffers", "Extra Size (buffers)",
538 "Amount of buffers the queues can grow if one of them is empty (0=disable)"
539 " (NOT IMPLEMENTED)",
540 0, G_MAXUINT, DEFAULT_EXTRA_SIZE_BUFFERS,
541 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
542 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_TIME,
543 g_param_spec_uint64 ("extra-size-time", "Extra Size (ns)",
544 "Amount of time the queues can grow if one of them is empty (in ns, 0=disable)"
545 " (NOT IMPLEMENTED)",
546 0, G_MAXUINT64, DEFAULT_EXTRA_SIZE_TIME,
547 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
550 * GstMultiQueue:use-buffering:
552 * Enable the buffering option in multiqueue so that BUFFERING messages are
553 * emitted based on low-/high-percent thresholds.
555 g_object_class_install_property (gobject_class, PROP_USE_BUFFERING,
556 g_param_spec_boolean ("use-buffering", "Use buffering",
557 "Emit GST_MESSAGE_BUFFERING based on low-/high-percent thresholds",
558 DEFAULT_USE_BUFFERING, G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
559 G_PARAM_STATIC_STRINGS));
561 * GstMultiQueue:low-percent:
563 * Low threshold percent for buffering to start.
565 g_object_class_install_property (gobject_class, PROP_LOW_PERCENT,
566 g_param_spec_int ("low-percent", "Low percent",
567 "Low threshold for buffering to start. Only used if use-buffering is True "
568 "(Deprecated: use low-watermark instead)",
569 0, 100, DEFAULT_LOW_WATERMARK * 100,
570 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
572 * GstMultiQueue:high-percent:
574 * High threshold percent for buffering to finish.
576 g_object_class_install_property (gobject_class, PROP_HIGH_PERCENT,
577 g_param_spec_int ("high-percent", "High percent",
578 "High threshold for buffering to finish. Only used if use-buffering is True "
579 "(Deprecated: use high-watermark instead)",
580 0, 100, DEFAULT_HIGH_WATERMARK * 100,
581 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
583 * GstMultiQueue:low-watermark:
585 * Low threshold watermark for buffering to start.
589 g_object_class_install_property (gobject_class, PROP_LOW_WATERMARK,
590 g_param_spec_double ("low-watermark", "Low watermark",
591 "Low threshold for buffering to start. Only used if use-buffering is True",
592 0.0, 1.0, DEFAULT_LOW_WATERMARK,
593 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
595 * GstMultiQueue:high-watermark:
597 * High threshold watermark for buffering to finish.
601 g_object_class_install_property (gobject_class, PROP_HIGH_WATERMARK,
602 g_param_spec_double ("high-watermark", "High watermark",
603 "High threshold for buffering to finish. Only used if use-buffering is True",
604 0.0, 1.0, DEFAULT_HIGH_WATERMARK,
605 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
608 * GstMultiQueue:sync-by-running-time:
610 * If enabled multiqueue will synchronize deactivated or not-linked streams
611 * to the activated and linked streams by taking the running time.
612 * Otherwise multiqueue will synchronize the deactivated or not-linked
613 * streams by keeping the order in which buffers and events arrived compared
614 * to active and linked streams.
616 g_object_class_install_property (gobject_class, PROP_SYNC_BY_RUNNING_TIME,
617 g_param_spec_boolean ("sync-by-running-time", "Sync By Running Time",
618 "Synchronize deactivated or not-linked streams by running time",
619 DEFAULT_SYNC_BY_RUNNING_TIME,
620 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
622 g_object_class_install_property (gobject_class, PROP_USE_INTERLEAVE,
623 g_param_spec_boolean ("use-interleave", "Use interleave",
624 "Adjust time limits based on input interleave",
625 DEFAULT_USE_INTERLEAVE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
627 g_object_class_install_property (gobject_class, PROP_UNLINKED_CACHE_TIME,
628 g_param_spec_uint64 ("unlinked-cache-time", "Unlinked cache time (ns)",
629 "Extra buffering in time for unlinked streams (if 'sync-by-running-time')",
630 0, G_MAXUINT64, DEFAULT_UNLINKED_CACHE_TIME,
631 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
632 G_PARAM_STATIC_STRINGS));
634 g_object_class_install_property (gobject_class, PROP_MINIMUM_INTERLEAVE,
635 g_param_spec_uint64 ("min-interleave-time", "Minimum interleave time",
636 "Minimum extra buffering for deinterleaving (size of the queues) when use-interleave=true",
637 0, G_MAXUINT64, DEFAULT_MINIMUM_INTERLEAVE,
638 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
639 G_PARAM_STATIC_STRINGS));
641 gobject_class->finalize = gst_multi_queue_finalize;
643 gst_element_class_set_static_metadata (gstelement_class,
645 "Generic", "Multiple data queue", "Edward Hervey <edward@fluendo.com>");
646 gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
647 &sinktemplate, GST_TYPE_MULTIQUEUE_PAD);
648 gst_element_class_add_static_pad_template (gstelement_class, &srctemplate);
650 gstelement_class->request_new_pad =
651 GST_DEBUG_FUNCPTR (gst_multi_queue_request_new_pad);
652 gstelement_class->release_pad =
653 GST_DEBUG_FUNCPTR (gst_multi_queue_release_pad);
654 gstelement_class->change_state =
655 GST_DEBUG_FUNCPTR (gst_multi_queue_change_state);
659 gst_multi_queue_init (GstMultiQueue * mqueue)
661 mqueue->nbqueues = 0;
662 mqueue->queues = NULL;
664 mqueue->max_size.bytes = DEFAULT_MAX_SIZE_BYTES;
665 mqueue->max_size.visible = DEFAULT_MAX_SIZE_BUFFERS;
666 mqueue->max_size.time = DEFAULT_MAX_SIZE_TIME;
668 mqueue->extra_size.bytes = DEFAULT_EXTRA_SIZE_BYTES;
669 mqueue->extra_size.visible = DEFAULT_EXTRA_SIZE_BUFFERS;
670 mqueue->extra_size.time = DEFAULT_EXTRA_SIZE_TIME;
672 mqueue->use_buffering = DEFAULT_USE_BUFFERING;
673 mqueue->low_watermark = DEFAULT_LOW_WATERMARK * MAX_BUFFERING_LEVEL;
674 mqueue->high_watermark = DEFAULT_HIGH_WATERMARK * MAX_BUFFERING_LEVEL;
676 mqueue->sync_by_running_time = DEFAULT_SYNC_BY_RUNNING_TIME;
677 mqueue->use_interleave = DEFAULT_USE_INTERLEAVE;
678 mqueue->min_interleave_time = DEFAULT_MINIMUM_INTERLEAVE;
679 mqueue->unlinked_cache_time = DEFAULT_UNLINKED_CACHE_TIME;
683 mqueue->high_time = GST_CLOCK_STIME_NONE;
685 g_mutex_init (&mqueue->qlock);
686 g_mutex_init (&mqueue->buffering_post_lock);
690 gst_multi_queue_finalize (GObject * object)
692 GstMultiQueue *mqueue = GST_MULTI_QUEUE (object);
694 g_list_foreach (mqueue->queues, (GFunc) gst_single_queue_free, NULL);
695 g_list_free (mqueue->queues);
696 mqueue->queues = NULL;
697 mqueue->queues_cookie++;
699 /* free/unref instance data */
700 g_mutex_clear (&mqueue->qlock);
701 g_mutex_clear (&mqueue->buffering_post_lock);
703 G_OBJECT_CLASS (parent_class)->finalize (object);
706 #define SET_CHILD_PROPERTY(mq,format) G_STMT_START { \
707 GList * tmp = mq->queues; \
709 GstSingleQueue *q = (GstSingleQueue*)tmp->data; \
710 q->max_size.format = mq->max_size.format; \
711 update_buffering (mq, q); \
712 gst_data_queue_limits_changed (q->queue); \
713 tmp = g_list_next(tmp); \
718 gst_multi_queue_set_property (GObject * object, guint prop_id,
719 const GValue * value, GParamSpec * pspec)
721 GstMultiQueue *mq = GST_MULTI_QUEUE (object);
724 case PROP_MAX_SIZE_BYTES:
725 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
726 mq->max_size.bytes = g_value_get_uint (value);
727 SET_CHILD_PROPERTY (mq, bytes);
728 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
729 gst_multi_queue_post_buffering (mq);
731 case PROP_MAX_SIZE_BUFFERS:
734 gint new_size = g_value_get_uint (value);
736 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
738 mq->max_size.visible = new_size;
742 GstDataQueueSize size;
743 GstSingleQueue *q = (GstSingleQueue *) tmp->data;
744 gst_data_queue_get_level (q->queue, &size);
746 GST_DEBUG_OBJECT (mq, "Queue %d: Requested buffers size: %d,"
747 " current: %d, current max %d", q->id, new_size, size.visible,
748 q->max_size.visible);
750 /* do not reduce max size below current level if the single queue
751 * has grown because of empty queue */
753 q->max_size.visible = new_size;
754 } else if (q->max_size.visible == 0) {
755 q->max_size.visible = MAX (new_size, size.visible);
756 } else if (new_size > size.visible) {
757 q->max_size.visible = new_size;
759 update_buffering (mq, q);
760 gst_data_queue_limits_changed (q->queue);
761 tmp = g_list_next (tmp);
764 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
765 gst_multi_queue_post_buffering (mq);
769 case PROP_MAX_SIZE_TIME:
770 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
771 mq->max_size.time = g_value_get_uint64 (value);
772 SET_CHILD_PROPERTY (mq, time);
773 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
774 gst_multi_queue_post_buffering (mq);
776 case PROP_EXTRA_SIZE_BYTES:
777 mq->extra_size.bytes = g_value_get_uint (value);
779 case PROP_EXTRA_SIZE_BUFFERS:
780 mq->extra_size.visible = g_value_get_uint (value);
782 case PROP_EXTRA_SIZE_TIME:
783 mq->extra_size.time = g_value_get_uint64 (value);
785 case PROP_USE_BUFFERING:
786 mq->use_buffering = g_value_get_boolean (value);
787 recheck_buffering_status (mq);
789 case PROP_LOW_PERCENT:
790 mq->low_watermark = g_value_get_int (value) * BUF_LEVEL_PERCENT_FACTOR;
791 /* Recheck buffering status - the new low_watermark value might
792 * be above the current fill level. If the old low_watermark one
793 * was below the current level, this means that mq->buffering is
794 * disabled and needs to be re-enabled. */
795 recheck_buffering_status (mq);
797 case PROP_HIGH_PERCENT:
798 mq->high_watermark = g_value_get_int (value) * BUF_LEVEL_PERCENT_FACTOR;
799 recheck_buffering_status (mq);
801 case PROP_LOW_WATERMARK:
802 mq->low_watermark = g_value_get_double (value) * MAX_BUFFERING_LEVEL;
803 recheck_buffering_status (mq);
805 case PROP_HIGH_WATERMARK:
806 mq->high_watermark = g_value_get_double (value) * MAX_BUFFERING_LEVEL;
807 recheck_buffering_status (mq);
809 case PROP_SYNC_BY_RUNNING_TIME:
810 mq->sync_by_running_time = g_value_get_boolean (value);
812 case PROP_USE_INTERLEAVE:
813 mq->use_interleave = g_value_get_boolean (value);
815 case PROP_UNLINKED_CACHE_TIME:
816 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
817 mq->unlinked_cache_time = g_value_get_uint64 (value);
818 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
819 gst_multi_queue_post_buffering (mq);
821 case PROP_MINIMUM_INTERLEAVE:
822 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
823 mq->min_interleave_time = g_value_get_uint64 (value);
824 if (mq->use_interleave)
825 calculate_interleave (mq, NULL);
826 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
829 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
834 #ifdef TIZEN_FEATURE_MQ_MODIFICATION
836 get_current_size_bytes (GstMultiQueue * mq)
839 guint current_size_bytes = 0;
841 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
842 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
843 GstDataQueueSize size;
845 gst_data_queue_get_level (sq->queue, &size);
847 current_size_bytes += size.bytes;
849 GST_DEBUG_OBJECT (mq,
850 "queue %d: bytes %u/%u, time %" G_GUINT64_FORMAT "/%"
851 G_GUINT64_FORMAT, sq->id, size.bytes, sq->max_size.bytes,
852 sq->cur_time, sq->max_size.time);
855 GST_INFO_OBJECT (mq, "current_size_bytes : %u", current_size_bytes);
857 return current_size_bytes;
862 gst_multi_queue_get_property (GObject * object, guint prop_id,
863 GValue * value, GParamSpec * pspec)
865 GstMultiQueue *mq = GST_MULTI_QUEUE (object);
867 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
870 case PROP_EXTRA_SIZE_BYTES:
871 g_value_set_uint (value, mq->extra_size.bytes);
873 case PROP_EXTRA_SIZE_BUFFERS:
874 g_value_set_uint (value, mq->extra_size.visible);
876 case PROP_EXTRA_SIZE_TIME:
877 g_value_set_uint64 (value, mq->extra_size.time);
879 case PROP_MAX_SIZE_BYTES:
880 g_value_set_uint (value, mq->max_size.bytes);
882 case PROP_MAX_SIZE_BUFFERS:
883 g_value_set_uint (value, mq->max_size.visible);
885 case PROP_MAX_SIZE_TIME:
886 g_value_set_uint64 (value, mq->max_size.time);
888 #ifdef TIZEN_FEATURE_MQ_MODIFICATION
889 case PROP_CURR_SIZE_BYTES:
890 g_value_set_uint (value, get_current_size_bytes(mq));
893 case PROP_USE_BUFFERING:
894 g_value_set_boolean (value, mq->use_buffering);
896 case PROP_LOW_PERCENT:
897 g_value_set_int (value, mq->low_watermark / BUF_LEVEL_PERCENT_FACTOR);
899 case PROP_HIGH_PERCENT:
900 g_value_set_int (value, mq->high_watermark / BUF_LEVEL_PERCENT_FACTOR);
902 case PROP_LOW_WATERMARK:
903 g_value_set_double (value, mq->low_watermark /
904 (gdouble) MAX_BUFFERING_LEVEL);
906 case PROP_HIGH_WATERMARK:
907 g_value_set_double (value, mq->high_watermark /
908 (gdouble) MAX_BUFFERING_LEVEL);
910 case PROP_SYNC_BY_RUNNING_TIME:
911 g_value_set_boolean (value, mq->sync_by_running_time);
913 case PROP_USE_INTERLEAVE:
914 g_value_set_boolean (value, mq->use_interleave);
916 case PROP_UNLINKED_CACHE_TIME:
917 g_value_set_uint64 (value, mq->unlinked_cache_time);
919 case PROP_MINIMUM_INTERLEAVE:
920 g_value_set_uint64 (value, mq->min_interleave_time);
923 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
927 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
931 gst_multi_queue_iterate_internal_links (GstPad * pad, GstObject * parent)
933 GstIterator *it = NULL;
935 GstSingleQueue *squeue;
936 GstMultiQueue *mq = GST_MULTI_QUEUE (parent);
939 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
940 squeue = gst_pad_get_element_private (pad);
944 if (squeue->sinkpad == pad)
945 opad = gst_object_ref (squeue->srcpad);
946 else if (squeue->srcpad == pad)
947 opad = gst_object_ref (squeue->sinkpad);
951 g_value_init (&val, GST_TYPE_PAD);
952 g_value_set_object (&val, opad);
953 it = gst_iterator_new_single (GST_TYPE_PAD, &val);
954 g_value_unset (&val);
956 gst_object_unref (opad);
959 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
970 gst_multi_queue_request_new_pad (GstElement * element, GstPadTemplate * temp,
971 const gchar * name, const GstCaps * caps)
973 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
974 GstSingleQueue *squeue;
979 sscanf (name + 4, "_%u", &temp_id);
980 GST_LOG_OBJECT (element, "name : %s (id %d)", GST_STR_NULL (name), temp_id);
983 /* Create a new single queue, add the sink and source pad and return the sink pad */
984 squeue = gst_single_queue_new (mqueue, temp_id);
986 new_pad = squeue ? squeue->sinkpad : NULL;
988 GST_DEBUG_OBJECT (mqueue, "Returning pad %" GST_PTR_FORMAT, new_pad);
994 gst_multi_queue_release_pad (GstElement * element, GstPad * pad)
996 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
997 GstSingleQueue *sq = NULL;
1000 GST_LOG_OBJECT (element, "pad %s:%s", GST_DEBUG_PAD_NAME (pad));
1002 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
1003 /* Find which single queue it belongs to, knowing that it should be a sinkpad */
1004 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
1005 sq = (GstSingleQueue *) tmp->data;
1007 if (sq->sinkpad == pad)
1012 GST_WARNING_OBJECT (mqueue, "That pad doesn't belong to this element ???");
1013 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1017 /* FIXME: The removal of the singlequeue should probably not happen until it
1018 * finishes draining */
1020 /* remove it from the list */
1021 mqueue->queues = g_list_delete_link (mqueue->queues, tmp);
1022 mqueue->queues_cookie++;
1024 /* FIXME : recompute next-non-linked */
1025 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1027 /* delete SingleQueue */
1028 gst_data_queue_set_flushing (sq->queue, TRUE);
1030 gst_pad_set_active (sq->srcpad, FALSE);
1031 gst_pad_set_active (sq->sinkpad, FALSE);
1032 gst_pad_set_element_private (sq->srcpad, NULL);
1033 gst_pad_set_element_private (sq->sinkpad, NULL);
1034 gst_element_remove_pad (element, sq->srcpad);
1035 gst_element_remove_pad (element, sq->sinkpad);
1036 gst_single_queue_free (sq);
1039 static GstStateChangeReturn
1040 gst_multi_queue_change_state (GstElement * element, GstStateChange transition)
1042 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
1043 GstSingleQueue *sq = NULL;
1044 GstStateChangeReturn result;
1046 switch (transition) {
1047 case GST_STATE_CHANGE_READY_TO_PAUSED:{
1050 /* Set all pads to non-flushing */
1051 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
1052 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
1053 sq = (GstSingleQueue *) tmp->data;
1054 sq->flushing = FALSE;
1057 /* the visible limit might not have been set on single queues that have grown because of other queueus were empty */
1058 SET_CHILD_PROPERTY (mqueue, visible);
1060 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1061 gst_multi_queue_post_buffering (mqueue);
1065 #ifdef TIZEN_FEATURE_MQ_MODIFICATION
1066 /* to stop buffering during playing state */
1067 case GST_STATE_CHANGE_PAUSED_TO_PLAYING:{
1068 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
1069 mqueue->buffering = FALSE;
1070 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1071 gst_multi_queue_post_buffering (mqueue);
1075 case GST_STATE_CHANGE_PAUSED_TO_READY:{
1078 /* Un-wait all waiting pads */
1079 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
1080 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
1081 sq = (GstSingleQueue *) tmp->data;
1082 sq->flushing = TRUE;
1083 g_cond_signal (&sq->turn);
1085 sq->last_query = FALSE;
1086 g_cond_signal (&sq->query_handled);
1088 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1095 result = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
1097 switch (transition) {
1106 gst_single_queue_start (GstMultiQueue * mq, GstSingleQueue * sq)
1108 GST_LOG_OBJECT (mq, "SingleQueue %d : starting task", sq->id);
1109 return gst_pad_start_task (sq->srcpad,
1110 (GstTaskFunction) gst_multi_queue_loop, sq->srcpad, NULL);
1114 gst_single_queue_pause (GstMultiQueue * mq, GstSingleQueue * sq)
1118 GST_LOG_OBJECT (mq, "SingleQueue %d : pausing task", sq->id);
1119 result = gst_pad_pause_task (sq->srcpad);
1120 sq->sink_tainted = sq->src_tainted = TRUE;
1125 gst_single_queue_stop (GstMultiQueue * mq, GstSingleQueue * sq)
1129 GST_LOG_OBJECT (mq, "SingleQueue %d : stopping task", sq->id);
1130 result = gst_pad_stop_task (sq->srcpad);
1131 sq->sink_tainted = sq->src_tainted = TRUE;
1136 gst_single_queue_flush (GstMultiQueue * mq, GstSingleQueue * sq, gboolean flush,
1139 GST_DEBUG_OBJECT (mq, "flush %s queue %d", (flush ? "start" : "stop"),
1143 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1144 sq->srcresult = GST_FLOW_FLUSHING;
1145 gst_data_queue_set_flushing (sq->queue, TRUE);
1147 sq->flushing = TRUE;
1149 /* wake up non-linked task */
1150 GST_LOG_OBJECT (mq, "SingleQueue %d : waking up eventually waiting task",
1152 g_cond_signal (&sq->turn);
1153 sq->last_query = FALSE;
1154 g_cond_signal (&sq->query_handled);
1155 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1157 gst_single_queue_flush_queue (sq, full);
1159 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1160 gst_segment_init (&sq->sink_segment, GST_FORMAT_TIME);
1161 gst_segment_init (&sq->src_segment, GST_FORMAT_TIME);
1162 sq->has_src_segment = FALSE;
1163 /* All pads start off not-linked for a smooth kick-off */
1164 sq->srcresult = GST_FLOW_OK;
1167 sq->max_size.visible = mq->max_size.visible;
1169 sq->is_segment_done = FALSE;
1172 sq->last_oldid = G_MAXUINT32;
1173 sq->next_time = GST_CLOCK_STIME_NONE;
1174 sq->last_time = GST_CLOCK_STIME_NONE;
1175 sq->cached_sinktime = GST_CLOCK_STIME_NONE;
1176 sq->group_high_time = GST_CLOCK_STIME_NONE;
1177 gst_data_queue_set_flushing (sq->queue, FALSE);
1179 /* We will become active again on the next buffer/gap */
1182 /* Reset high time to be recomputed next */
1183 mq->high_time = GST_CLOCK_STIME_NONE;
1185 sq->flushing = FALSE;
1186 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1190 /* WITH LOCK TAKEN */
1192 get_buffering_level (GstSingleQueue * sq)
1194 GstDataQueueSize size;
1195 gint buffering_level, tmp;
1197 gst_data_queue_get_level (sq->queue, &size);
1199 GST_DEBUG_OBJECT (sq->mqueue,
1200 "queue %d: visible %u/%u, bytes %u/%u, time %" G_GUINT64_FORMAT "/%"
1201 G_GUINT64_FORMAT, sq->id, size.visible, sq->max_size.visible,
1202 size.bytes, sq->max_size.bytes, sq->cur_time, sq->max_size.time);
1204 /* get bytes and time buffer levels and take the max */
1205 if (sq->is_eos || sq->is_segment_done || sq->srcresult == GST_FLOW_NOT_LINKED
1207 buffering_level = MAX_BUFFERING_LEVEL;
1209 buffering_level = 0;
1210 if (sq->max_size.time > 0) {
1212 gst_util_uint64_scale (sq->cur_time,
1213 MAX_BUFFERING_LEVEL, sq->max_size.time);
1214 buffering_level = MAX (buffering_level, tmp);
1216 if (sq->max_size.bytes > 0) {
1218 gst_util_uint64_scale_int (size.bytes,
1219 MAX_BUFFERING_LEVEL, sq->max_size.bytes);
1220 buffering_level = MAX (buffering_level, tmp);
1224 return buffering_level;
1227 /* WITH LOCK TAKEN */
1229 update_buffering (GstMultiQueue * mq, GstSingleQueue * sq)
1231 gint buffering_level, percent;
1233 /* nothing to dowhen we are not in buffering mode */
1234 if (!mq->use_buffering)
1237 buffering_level = get_buffering_level (sq);
1239 /* scale so that if buffering_level equals the high watermark,
1240 * the percentage is 100% */
1241 percent = gst_util_uint64_scale (buffering_level, 100, mq->high_watermark);
1246 if (mq->buffering) {
1247 if (buffering_level >= mq->high_watermark) {
1248 mq->buffering = FALSE;
1250 /* make sure it increases */
1251 percent = MAX (mq->buffering_percent, percent);
1253 SET_PERCENT (mq, percent);
1256 gboolean is_buffering = TRUE;
1258 for (iter = mq->queues; iter; iter = g_list_next (iter)) {
1259 GstSingleQueue *oq = (GstSingleQueue *) iter->data;
1261 if (get_buffering_level (oq) >= mq->high_watermark) {
1262 is_buffering = FALSE;
1268 if (is_buffering && buffering_level < mq->low_watermark) {
1269 mq->buffering = TRUE;
1270 SET_PERCENT (mq, percent);
1276 gst_multi_queue_post_buffering (GstMultiQueue * mq)
1278 GstMessage *msg = NULL;
1280 g_mutex_lock (&mq->buffering_post_lock);
1281 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1282 if (mq->buffering_percent_changed) {
1283 gint percent = mq->buffering_percent;
1285 mq->buffering_percent_changed = FALSE;
1287 GST_DEBUG_OBJECT (mq, "Going to post buffering: %d%%", percent);
1288 msg = gst_message_new_buffering (GST_OBJECT_CAST (mq), percent);
1290 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1293 gst_element_post_message (GST_ELEMENT_CAST (mq), msg);
1295 g_mutex_unlock (&mq->buffering_post_lock);
1299 recheck_buffering_status (GstMultiQueue * mq)
1301 if (!mq->use_buffering && mq->buffering) {
1302 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1303 mq->buffering = FALSE;
1304 GST_DEBUG_OBJECT (mq,
1305 "Buffering property disabled, but queue was still buffering; "
1306 "setting buffering percentage to 100%%");
1307 SET_PERCENT (mq, 100);
1308 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1311 if (mq->use_buffering) {
1315 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1317 /* force buffering percentage to be recalculated */
1318 old_perc = mq->buffering_percent;
1319 mq->buffering_percent = 0;
1323 GstSingleQueue *q = (GstSingleQueue *) tmp->data;
1324 update_buffering (mq, q);
1325 gst_data_queue_limits_changed (q->queue);
1326 tmp = g_list_next (tmp);
1329 GST_DEBUG_OBJECT (mq,
1330 "Recalculated buffering percentage: old: %d%% new: %d%%",
1331 old_perc, mq->buffering_percent);
1333 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1336 gst_multi_queue_post_buffering (mq);
1340 calculate_interleave (GstMultiQueue * mq, GstSingleQueue * sq)
1342 GstClockTimeDiff low, high;
1343 GstClockTime interleave, other_interleave = 0;
1346 low = high = GST_CLOCK_STIME_NONE;
1347 interleave = mq->interleave;
1348 /* Go over all single queues and calculate lowest/highest value */
1349 for (tmp = mq->queues; tmp; tmp = tmp->next) {
1350 GstSingleQueue *oq = (GstSingleQueue *) tmp->data;
1351 /* Ignore sparse streams for interleave calculation */
1354 /* If a stream is not active yet (hasn't received any buffers), set
1355 * a maximum interleave to allow it to receive more data */
1358 "queue %d is not active yet, forcing interleave to 5s", oq->id);
1359 mq->interleave = 5 * GST_SECOND;
1360 /* Update max-size time */
1361 mq->max_size.time = mq->interleave;
1362 SET_CHILD_PROPERTY (mq, time);
1366 /* Calculate within each streaming thread */
1367 if (sq && sq->thread != oq->thread) {
1368 if (oq->interleave > other_interleave)
1369 other_interleave = oq->interleave;
1373 if (GST_CLOCK_STIME_IS_VALID (oq->cached_sinktime)) {
1374 if (low == GST_CLOCK_STIME_NONE || oq->cached_sinktime < low)
1375 low = oq->cached_sinktime;
1376 if (high == GST_CLOCK_STIME_NONE || oq->cached_sinktime > high)
1377 high = oq->cached_sinktime;
1380 "queue %d , sinktime:%" GST_STIME_FORMAT " low:%" GST_STIME_FORMAT
1381 " high:%" GST_STIME_FORMAT, oq->id,
1382 GST_STIME_ARGS (oq->cached_sinktime), GST_STIME_ARGS (low),
1383 GST_STIME_ARGS (high));
1386 if (GST_CLOCK_STIME_IS_VALID (low) && GST_CLOCK_STIME_IS_VALID (high)) {
1387 interleave = high - low;
1388 /* Padding of interleave and minimum value */
1389 interleave = (150 * interleave / 100) + mq->min_interleave_time;
1391 sq->interleave = interleave;
1393 interleave = MAX (interleave, other_interleave);
1395 /* Update the stored interleave if:
1396 * * No data has arrived yet (high == low)
1397 * * Or it went higher
1398 * * Or it went lower and we've gone past the previous interleave needed */
1399 if (high == low || interleave > mq->interleave ||
1400 ((mq->last_interleave_update + (2 * MIN (GST_SECOND,
1401 mq->interleave)) < low)
1402 && interleave < (mq->interleave * 3 / 4))) {
1403 /* Update the interleave */
1404 mq->interleave = interleave;
1405 mq->last_interleave_update = high;
1406 /* Update max-size time */
1407 mq->max_size.time = mq->interleave;
1408 SET_CHILD_PROPERTY (mq, time);
1413 GST_DEBUG_OBJECT (mq,
1414 "low:%" GST_STIME_FORMAT " high:%" GST_STIME_FORMAT " interleave:%"
1415 GST_TIME_FORMAT " mq->interleave:%" GST_TIME_FORMAT
1416 " last_interleave_update:%" GST_STIME_FORMAT, GST_STIME_ARGS (low),
1417 GST_STIME_ARGS (high), GST_TIME_ARGS (interleave),
1418 GST_TIME_ARGS (mq->interleave),
1419 GST_STIME_ARGS (mq->last_interleave_update));
1423 /* calculate the diff between running time on the sink and src of the queue.
1424 * This is the total amount of time in the queue.
1425 * WITH LOCK TAKEN */
1427 update_time_level (GstMultiQueue * mq, GstSingleQueue * sq)
1429 GstClockTimeDiff sink_time, src_time;
1431 if (sq->sink_tainted) {
1432 sink_time = sq->sinktime = my_segment_to_running_time (&sq->sink_segment,
1433 sq->sink_segment.position);
1435 GST_DEBUG_OBJECT (mq,
1436 "queue %d sink_segment.position:%" GST_TIME_FORMAT ", sink_time:%"
1437 GST_STIME_FORMAT, sq->id, GST_TIME_ARGS (sq->sink_segment.position),
1438 GST_STIME_ARGS (sink_time));
1440 if (G_UNLIKELY (sq->last_time == GST_CLOCK_STIME_NONE)) {
1441 /* If the single queue still doesn't have a last_time set, this means
1442 * that nothing has been pushed out yet.
1443 * In order for the high_time computation to be as efficient as possible,
1444 * we set the last_time */
1445 sq->last_time = sink_time;
1447 if (G_UNLIKELY (sink_time != GST_CLOCK_STIME_NONE)) {
1448 /* if we have a time, we become untainted and use the time */
1449 sq->sink_tainted = FALSE;
1450 if (mq->use_interleave) {
1451 sq->cached_sinktime = sink_time;
1452 calculate_interleave (mq, sq);
1456 sink_time = sq->sinktime;
1458 if (sq->src_tainted) {
1459 GstSegment *segment;
1462 if (sq->has_src_segment) {
1463 segment = &sq->src_segment;
1464 position = sq->src_segment.position;
1467 * If the src pad had no segment yet, use the sink segment
1468 * to avoid signalling overrun if the received sink segment has a
1469 * a position > max-size-time while the src pad time would be the default=0
1471 * This can happen when switching pads on chained/adaptive streams and the
1472 * new chain has a segment with a much larger position
1474 segment = &sq->sink_segment;
1475 position = sq->sink_segment.position;
1478 src_time = sq->srctime = my_segment_to_running_time (segment, position);
1479 /* if we have a time, we become untainted and use the time */
1480 if (G_UNLIKELY (src_time != GST_CLOCK_STIME_NONE)) {
1481 sq->src_tainted = FALSE;
1484 src_time = sq->srctime;
1486 GST_DEBUG_OBJECT (mq,
1487 "queue %d, sink %" GST_STIME_FORMAT ", src %" GST_STIME_FORMAT, sq->id,
1488 GST_STIME_ARGS (sink_time), GST_STIME_ARGS (src_time));
1490 /* This allows for streams with out of order timestamping - sometimes the
1491 * emerging timestamp is later than the arriving one(s) */
1492 if (G_LIKELY (GST_CLOCK_STIME_IS_VALID (sink_time) &&
1493 GST_CLOCK_STIME_IS_VALID (src_time) && sink_time > src_time))
1494 sq->cur_time = sink_time - src_time;
1498 /* updating the time level can change the buffering state */
1499 update_buffering (mq, sq);
1504 /* take a SEGMENT event and apply the values to segment, updating the time
1505 * level of queue. */
1507 apply_segment (GstMultiQueue * mq, GstSingleQueue * sq, GstEvent * event,
1508 GstSegment * segment)
1510 gst_event_copy_segment (event, segment);
1512 /* now configure the values, we use these to track timestamps on the
1514 if (segment->format != GST_FORMAT_TIME) {
1515 /* non-time format, pretent the current time segment is closed with a
1516 * 0 start and unknown stop time. */
1517 segment->format = GST_FORMAT_TIME;
1522 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1524 /* Make sure we have a valid initial segment position (and not garbage
1526 if (segment->rate > 0.0)
1527 segment->position = segment->start;
1529 segment->position = segment->stop;
1530 if (segment == &sq->sink_segment)
1531 sq->sink_tainted = TRUE;
1533 sq->has_src_segment = TRUE;
1534 sq->src_tainted = TRUE;
1537 GST_DEBUG_OBJECT (mq,
1538 "queue %d, configured SEGMENT %" GST_SEGMENT_FORMAT, sq->id, segment);
1540 /* segment can update the time level of the queue */
1541 update_time_level (mq, sq);
1543 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1544 gst_multi_queue_post_buffering (mq);
1547 /* take a buffer and update segment, updating the time level of the queue. */
1549 apply_buffer (GstMultiQueue * mq, GstSingleQueue * sq, GstClockTime timestamp,
1550 GstClockTime duration, GstSegment * segment)
1552 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1554 /* if no timestamp is set, assume it's continuous with the previous
1556 if (timestamp == GST_CLOCK_TIME_NONE)
1557 timestamp = segment->position;
1560 if (duration != GST_CLOCK_TIME_NONE)
1561 timestamp += duration;
1563 GST_DEBUG_OBJECT (mq, "queue %d, %s position updated to %" GST_TIME_FORMAT,
1564 sq->id, segment == &sq->sink_segment ? "sink" : "src",
1565 GST_TIME_ARGS (timestamp));
1567 segment->position = timestamp;
1569 if (segment == &sq->sink_segment)
1570 sq->sink_tainted = TRUE;
1572 sq->src_tainted = TRUE;
1574 /* calc diff with other end */
1575 update_time_level (mq, sq);
1576 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1577 gst_multi_queue_post_buffering (mq);
1581 apply_gap (GstMultiQueue * mq, GstSingleQueue * sq, GstEvent * event,
1582 GstSegment * segment)
1584 GstClockTime timestamp;
1585 GstClockTime duration;
1587 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1589 gst_event_parse_gap (event, ×tamp, &duration);
1591 if (GST_CLOCK_TIME_IS_VALID (timestamp)) {
1593 if (GST_CLOCK_TIME_IS_VALID (duration)) {
1594 timestamp += duration;
1597 segment->position = timestamp;
1599 if (segment == &sq->sink_segment)
1600 sq->sink_tainted = TRUE;
1602 sq->src_tainted = TRUE;
1604 /* calc diff with other end */
1605 update_time_level (mq, sq);
1608 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1609 gst_multi_queue_post_buffering (mq);
1612 static GstClockTimeDiff
1613 get_running_time (GstSegment * segment, GstMiniObject * object, gboolean end)
1615 GstClockTimeDiff time = GST_CLOCK_STIME_NONE;
1617 if (GST_IS_BUFFER (object)) {
1618 GstBuffer *buf = GST_BUFFER_CAST (object);
1619 GstClockTime btime = GST_BUFFER_DTS_OR_PTS (buf);
1621 if (GST_CLOCK_TIME_IS_VALID (btime)) {
1622 if (end && GST_BUFFER_DURATION_IS_VALID (buf))
1623 btime += GST_BUFFER_DURATION (buf);
1624 time = my_segment_to_running_time (segment, btime);
1626 } else if (GST_IS_BUFFER_LIST (object)) {
1627 GstBufferList *list = GST_BUFFER_LIST_CAST (object);
1631 n = gst_buffer_list_length (list);
1632 for (i = 0; i < n; i++) {
1634 buf = gst_buffer_list_get (list, i);
1635 btime = GST_BUFFER_DTS_OR_PTS (buf);
1636 if (GST_CLOCK_TIME_IS_VALID (btime)) {
1637 if (end && GST_BUFFER_DURATION_IS_VALID (buf))
1638 btime += GST_BUFFER_DURATION (buf);
1639 time = my_segment_to_running_time (segment, btime);
1646 } else if (GST_IS_EVENT (object)) {
1647 GstEvent *event = GST_EVENT_CAST (object);
1649 /* For newsegment events return the running time of the start position */
1650 if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
1651 const GstSegment *new_segment;
1653 gst_event_parse_segment (event, &new_segment);
1654 if (new_segment->format == GST_FORMAT_TIME) {
1656 my_segment_to_running_time ((GstSegment *) new_segment,
1657 new_segment->start);
1666 static GstFlowReturn
1667 gst_single_queue_push_one (GstMultiQueue * mq, GstSingleQueue * sq,
1668 GstMiniObject * object, gboolean * allow_drop)
1670 GstFlowReturn result = sq->srcresult;
1672 if (GST_IS_BUFFER (object)) {
1674 GstClockTime timestamp, duration;
1676 buffer = GST_BUFFER_CAST (object);
1677 timestamp = GST_BUFFER_DTS_OR_PTS (buffer);
1678 duration = GST_BUFFER_DURATION (buffer);
1680 apply_buffer (mq, sq, timestamp, duration, &sq->src_segment);
1682 /* Applying the buffer may have made the queue non-full again, unblock it if needed */
1683 gst_data_queue_limits_changed (sq->queue);
1685 if (G_UNLIKELY (*allow_drop)) {
1686 GST_DEBUG_OBJECT (mq,
1687 "SingleQueue %d : Dropping EOS buffer %p with ts %" GST_TIME_FORMAT,
1688 sq->id, buffer, GST_TIME_ARGS (timestamp));
1689 gst_buffer_unref (buffer);
1691 GST_DEBUG_OBJECT (mq,
1692 "SingleQueue %d : Pushing buffer %p with ts %" GST_TIME_FORMAT,
1693 sq->id, buffer, GST_TIME_ARGS (timestamp));
1694 result = gst_pad_push (sq->srcpad, buffer);
1696 } else if (GST_IS_EVENT (object)) {
1699 event = GST_EVENT_CAST (object);
1701 switch (GST_EVENT_TYPE (event)) {
1702 case GST_EVENT_SEGMENT_DONE:
1703 *allow_drop = FALSE;
1706 result = GST_FLOW_EOS;
1707 if (G_UNLIKELY (*allow_drop))
1708 *allow_drop = FALSE;
1710 case GST_EVENT_STREAM_START:
1711 result = GST_FLOW_OK;
1712 if (G_UNLIKELY (*allow_drop))
1713 *allow_drop = FALSE;
1715 case GST_EVENT_SEGMENT:
1716 apply_segment (mq, sq, event, &sq->src_segment);
1717 /* Applying the segment may have made the queue non-full again, unblock it if needed */
1718 gst_data_queue_limits_changed (sq->queue);
1719 if (G_UNLIKELY (*allow_drop)) {
1720 result = GST_FLOW_OK;
1721 *allow_drop = FALSE;
1725 apply_gap (mq, sq, event, &sq->src_segment);
1726 /* Applying the gap may have made the queue non-full again, unblock it if needed */
1727 gst_data_queue_limits_changed (sq->queue);
1733 if (G_UNLIKELY (*allow_drop)) {
1734 GST_DEBUG_OBJECT (mq,
1735 "SingleQueue %d : Dropping EOS event %p of type %s",
1736 sq->id, event, GST_EVENT_TYPE_NAME (event));
1737 gst_event_unref (event);
1739 GST_DEBUG_OBJECT (mq,
1740 "SingleQueue %d : Pushing event %p of type %s",
1741 sq->id, event, GST_EVENT_TYPE_NAME (event));
1743 gst_pad_push_event (sq->srcpad, event);
1745 } else if (GST_IS_QUERY (object)) {
1749 query = GST_QUERY_CAST (object);
1751 if (G_UNLIKELY (*allow_drop)) {
1752 GST_DEBUG_OBJECT (mq,
1753 "SingleQueue %d : Dropping EOS query %p", sq->id, query);
1754 gst_query_unref (query);
1757 res = gst_pad_peer_query (sq->srcpad, query);
1760 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1761 sq->last_query = res;
1762 sq->last_handled_query = query;
1763 g_cond_signal (&sq->query_handled);
1764 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1766 g_warning ("Unexpected object in singlequeue %u (refcounting problem?)",
1774 static GstMiniObject *
1775 gst_multi_queue_item_steal_object (GstMultiQueueItem * item)
1780 item->object = NULL;
1786 gst_multi_queue_item_destroy (GstMultiQueueItem * item)
1788 if (!item->is_query && item->object)
1789 gst_mini_object_unref (item->object);
1790 g_slice_free (GstMultiQueueItem, item);
1793 /* takes ownership of passed mini object! */
1794 static GstMultiQueueItem *
1795 gst_multi_queue_buffer_item_new (GstMiniObject * object, guint32 curid)
1797 GstMultiQueueItem *item;
1799 item = g_slice_new (GstMultiQueueItem);
1800 item->object = object;
1801 item->destroy = (GDestroyNotify) gst_multi_queue_item_destroy;
1802 item->posid = curid;
1803 item->is_query = GST_IS_QUERY (object);
1805 item->size = gst_buffer_get_size (GST_BUFFER_CAST (object));
1806 item->duration = GST_BUFFER_DURATION (object);
1807 if (item->duration == GST_CLOCK_TIME_NONE)
1809 item->visible = TRUE;
1813 static GstMultiQueueItem *
1814 gst_multi_queue_mo_item_new (GstMiniObject * object, guint32 curid)
1816 GstMultiQueueItem *item;
1818 item = g_slice_new (GstMultiQueueItem);
1819 item->object = object;
1820 item->destroy = (GDestroyNotify) gst_multi_queue_item_destroy;
1821 item->posid = curid;
1822 item->is_query = GST_IS_QUERY (object);
1826 item->visible = FALSE;
1830 /* Each main loop attempts to push buffers until the return value
1831 * is not-linked. not-linked pads are not allowed to push data beyond
1832 * any linked pads, so they don't 'rush ahead of the pack'.
1835 gst_multi_queue_loop (GstPad * pad)
1838 GstMultiQueueItem *item;
1839 GstDataQueueItem *sitem;
1841 GstMiniObject *object = NULL;
1843 GstFlowReturn result;
1844 GstClockTimeDiff next_time;
1846 gboolean do_update_buffering = FALSE;
1847 gboolean dropping = FALSE;
1849 sq = (GstSingleQueue *) gst_pad_get_element_private (pad);
1853 GST_DEBUG_OBJECT (mq, "SingleQueue %d : trying to pop an object", sq->id);
1858 /* Get something from the queue, blocking until that happens, or we get
1860 if (!(gst_data_queue_pop (sq->queue, &sitem)))
1863 item = (GstMultiQueueItem *) sitem;
1864 newid = item->posid;
1866 /* steal the object and destroy the item */
1867 object = gst_multi_queue_item_steal_object (item);
1868 gst_multi_queue_item_destroy (item);
1870 is_buffer = GST_IS_BUFFER (object);
1872 /* Get running time of the item. Events will have GST_CLOCK_STIME_NONE */
1873 next_time = get_running_time (&sq->src_segment, object, FALSE);
1875 GST_LOG_OBJECT (mq, "SingleQueue %d : newid:%d , oldid:%d",
1876 sq->id, newid, sq->last_oldid);
1878 /* If we're not-linked, we do some extra work because we might need to
1879 * wait before pushing. If we're linked but there's a gap in the IDs,
1880 * or it's the first loop, or we just passed the previous highid,
1881 * we might need to wake some sleeping pad up, so there's extra work
1883 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1884 if (sq->srcresult == GST_FLOW_NOT_LINKED
1885 || (sq->last_oldid == G_MAXUINT32) || (newid != (sq->last_oldid + 1))
1886 || sq->last_oldid > mq->highid) {
1887 GST_LOG_OBJECT (mq, "CHECKING sq->srcresult: %s",
1888 gst_flow_get_name (sq->srcresult));
1890 /* Check again if we're flushing after the lock is taken,
1891 * the flush flag might have been changed in the meantime */
1893 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1897 /* Update the nextid so other threads know when to wake us up */
1899 /* Take into account the extra cache time since we're unlinked */
1900 if (GST_CLOCK_STIME_IS_VALID (next_time))
1901 next_time += mq->unlinked_cache_time;
1902 sq->next_time = next_time;
1904 /* Update the oldid (the last ID we output) for highid tracking */
1905 if (sq->last_oldid != G_MAXUINT32)
1906 sq->oldid = sq->last_oldid;
1908 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
1909 gboolean should_wait;
1910 /* Go to sleep until it's time to push this buffer */
1912 /* Recompute the highid */
1913 compute_high_id (mq);
1914 /* Recompute the high time */
1915 compute_high_time (mq, sq->groupid);
1917 GST_DEBUG_OBJECT (mq,
1918 "groupid %d high_time %" GST_STIME_FORMAT " next_time %"
1919 GST_STIME_FORMAT, sq->groupid, GST_STIME_ARGS (sq->group_high_time),
1920 GST_STIME_ARGS (next_time));
1922 if (mq->sync_by_running_time) {
1923 if (sq->group_high_time == GST_CLOCK_STIME_NONE) {
1924 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
1925 (mq->high_time == GST_CLOCK_STIME_NONE
1926 || next_time > mq->high_time);
1928 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
1929 next_time > sq->group_high_time;
1932 should_wait = newid > mq->highid;
1934 while (should_wait && sq->srcresult == GST_FLOW_NOT_LINKED) {
1936 GST_DEBUG_OBJECT (mq,
1937 "queue %d sleeping for not-linked wakeup with "
1938 "newid %u, highid %u, next_time %" GST_STIME_FORMAT
1939 ", high_time %" GST_STIME_FORMAT, sq->id, newid, mq->highid,
1940 GST_STIME_ARGS (next_time), GST_STIME_ARGS (sq->group_high_time));
1942 /* Wake up all non-linked pads before we sleep */
1943 wake_up_next_non_linked (mq);
1946 g_cond_wait (&sq->turn, &mq->qlock);
1950 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1954 /* Recompute the high time and ID */
1955 compute_high_time (mq, sq->groupid);
1956 compute_high_id (mq);
1958 GST_DEBUG_OBJECT (mq, "queue %d woken from sleeping for not-linked "
1959 "wakeup with newid %u, highid %u, next_time %" GST_STIME_FORMAT
1960 ", high_time %" GST_STIME_FORMAT " mq high_time %" GST_STIME_FORMAT,
1961 sq->id, newid, mq->highid,
1962 GST_STIME_ARGS (next_time), GST_STIME_ARGS (sq->group_high_time),
1963 GST_STIME_ARGS (mq->high_time));
1965 if (mq->sync_by_running_time) {
1966 if (sq->group_high_time == GST_CLOCK_STIME_NONE) {
1967 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
1968 (mq->high_time == GST_CLOCK_STIME_NONE
1969 || next_time > mq->high_time);
1971 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
1972 next_time > sq->group_high_time;
1975 should_wait = newid > mq->highid;
1978 /* Re-compute the high_id in case someone else pushed */
1979 compute_high_id (mq);
1980 compute_high_time (mq, sq->groupid);
1982 compute_high_id (mq);
1983 compute_high_time (mq, sq->groupid);
1984 /* Wake up all non-linked pads */
1985 wake_up_next_non_linked (mq);
1987 /* We're done waiting, we can clear the nextid and nexttime */
1989 sq->next_time = GST_CLOCK_STIME_NONE;
1991 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1996 GST_LOG_OBJECT (mq, "sq:%d BEFORE PUSHING sq->srcresult: %s", sq->id,
1997 gst_flow_get_name (sq->srcresult));
1999 /* Update time stats */
2000 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2001 next_time = get_running_time (&sq->src_segment, object, TRUE);
2002 if (GST_CLOCK_STIME_IS_VALID (next_time)) {
2003 if (sq->last_time == GST_CLOCK_STIME_NONE || sq->last_time < next_time)
2004 sq->last_time = next_time;
2005 if (mq->high_time == GST_CLOCK_STIME_NONE || mq->high_time <= next_time) {
2006 /* Wake up all non-linked pads now that we advanced the high time */
2007 mq->high_time = next_time;
2008 wake_up_next_non_linked (mq);
2011 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2013 /* Try to push out the new object */
2014 result = gst_single_queue_push_one (mq, sq, object, &dropping);
2017 /* Check if we pushed something already and if this is
2018 * now a switch from an active to a non-active stream.
2020 * If it is, we reset all the waiting streams, let them
2021 * push another buffer to see if they're now active again.
2022 * This allows faster switching between streams and prevents
2023 * deadlocks if downstream does any waiting too.
2025 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2026 if (sq->pushed && sq->srcresult == GST_FLOW_OK
2027 && result == GST_FLOW_NOT_LINKED) {
2030 GST_LOG_OBJECT (mq, "SingleQueue %d : Changed from active to non-active",
2033 compute_high_id (mq);
2034 compute_high_time (mq, sq->groupid);
2035 do_update_buffering = TRUE;
2037 /* maybe no-one is waiting */
2038 if (mq->numwaiting > 0) {
2039 /* Else figure out which singlequeue(s) need waking up */
2040 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
2041 GstSingleQueue *sq2 = (GstSingleQueue *) tmp->data;
2043 if (sq2->srcresult == GST_FLOW_NOT_LINKED) {
2044 GST_LOG_OBJECT (mq, "Waking up singlequeue %d", sq2->id);
2045 sq2->pushed = FALSE;
2046 sq2->srcresult = GST_FLOW_OK;
2047 g_cond_signal (&sq2->turn);
2056 /* now hold on a bit;
2057 * can not simply throw this result to upstream, because
2058 * that might already be onto another segment, so we have to make
2059 * sure we are relaying the correct info wrt proper segment */
2060 if (result == GST_FLOW_EOS && !dropping &&
2061 sq->srcresult != GST_FLOW_NOT_LINKED) {
2062 GST_DEBUG_OBJECT (mq, "starting EOS drop on sq %d", sq->id);
2064 /* pretend we have not seen EOS yet for upstream's sake */
2065 result = sq->srcresult;
2066 } else if (dropping && gst_data_queue_is_empty (sq->queue)) {
2067 /* queue empty, so stop dropping
2068 * we can commit the result we have now,
2069 * which is either OK after a segment, or EOS */
2070 GST_DEBUG_OBJECT (mq, "committed EOS drop on sq %d", sq->id);
2072 result = GST_FLOW_EOS;
2074 sq->srcresult = result;
2075 sq->last_oldid = newid;
2077 if (do_update_buffering)
2078 update_buffering (mq, sq);
2080 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2081 gst_multi_queue_post_buffering (mq);
2083 GST_LOG_OBJECT (mq, "sq:%d AFTER PUSHING sq->srcresult: %s (is_eos:%d)",
2084 sq->id, gst_flow_get_name (sq->srcresult), GST_PAD_IS_EOS (sq->srcpad));
2086 /* Need to make sure wake up any sleeping pads when we exit */
2087 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2088 if (mq->numwaiting > 0 && (GST_PAD_IS_EOS (sq->srcpad)
2089 || sq->srcresult == GST_FLOW_EOS)) {
2090 compute_high_time (mq, sq->groupid);
2091 compute_high_id (mq);
2092 wake_up_next_non_linked (mq);
2094 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2099 if (result != GST_FLOW_OK && result != GST_FLOW_NOT_LINKED
2100 && result != GST_FLOW_EOS)
2107 if (object && !GST_IS_QUERY (object))
2108 gst_mini_object_unref (object);
2110 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2111 sq->last_query = FALSE;
2112 g_cond_signal (&sq->query_handled);
2114 /* Post an error message if we got EOS while downstream
2115 * has returned an error flow return. After EOS there
2116 * will be no further buffer which could propagate the
2118 if ((sq->is_eos || sq->is_segment_done) && sq->srcresult < GST_FLOW_EOS) {
2119 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2120 GST_ELEMENT_FLOW_ERROR (mq, sq->srcresult);
2122 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2125 /* upstream needs to see fatal result ASAP to shut things down,
2126 * but might be stuck in one of our other full queues;
2127 * so empty this one and trigger dynamic queue growth. At
2128 * this point the srcresult is not OK, NOT_LINKED
2129 * or EOS, i.e. a real failure */
2130 gst_single_queue_flush_queue (sq, FALSE);
2131 single_queue_underrun_cb (sq->queue, sq);
2132 gst_data_queue_set_flushing (sq->queue, TRUE);
2133 gst_pad_pause_task (sq->srcpad);
2134 GST_CAT_LOG_OBJECT (multi_queue_debug, mq,
2135 "SingleQueue[%d] task paused, reason:%s",
2136 sq->id, gst_flow_get_name (sq->srcresult));
2142 * gst_multi_queue_chain:
2144 * This is similar to GstQueue's chain function, except:
2145 * _ we don't have leak behaviours,
2146 * _ we push with a unique id (curid)
2148 static GstFlowReturn
2149 gst_multi_queue_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
2153 GstMultiQueueItem *item;
2155 GstClockTime timestamp, duration;
2157 sq = gst_pad_get_element_private (pad);
2160 /* if eos, we are always full, so avoid hanging incoming indefinitely */
2166 /* Get a unique incrementing id */
2167 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2169 timestamp = GST_BUFFER_DTS_OR_PTS (buffer);
2170 duration = GST_BUFFER_DURATION (buffer);
2173 "SingleQueue %d : about to enqueue buffer %p with id %d (pts:%"
2174 GST_TIME_FORMAT " dts:%" GST_TIME_FORMAT " dur:%" GST_TIME_FORMAT ")",
2175 sq->id, buffer, curid, GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
2176 GST_TIME_ARGS (GST_BUFFER_DTS (buffer)), GST_TIME_ARGS (duration));
2178 item = gst_multi_queue_buffer_item_new (GST_MINI_OBJECT_CAST (buffer), curid);
2180 /* Update interleave before pushing data into queue */
2181 if (mq->use_interleave) {
2182 GstClockTime val = timestamp;
2183 GstClockTimeDiff dval;
2185 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2186 if (val == GST_CLOCK_TIME_NONE)
2187 val = sq->sink_segment.position;
2188 if (duration != GST_CLOCK_TIME_NONE)
2191 dval = my_segment_to_running_time (&sq->sink_segment, val);
2192 if (GST_CLOCK_STIME_IS_VALID (dval)) {
2193 sq->cached_sinktime = dval;
2194 GST_DEBUG_OBJECT (mq,
2195 "Queue %d cached sink time now %" G_GINT64_FORMAT " %"
2196 GST_STIME_FORMAT, sq->id, sq->cached_sinktime,
2197 GST_STIME_ARGS (sq->cached_sinktime));
2198 calculate_interleave (mq, sq);
2200 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2203 if (!(gst_data_queue_push (sq->queue, (GstDataQueueItem *) item)))
2206 /* update time level, we must do this after pushing the data in the queue so
2207 * that we never end up filling the queue first. */
2208 apply_buffer (mq, sq, timestamp, duration, &sq->sink_segment);
2211 return sq->srcresult;
2216 GST_LOG_OBJECT (mq, "SingleQueue %d : exit because task paused, reason: %s",
2217 sq->id, gst_flow_get_name (sq->srcresult));
2218 gst_multi_queue_item_destroy (item);
2223 GST_DEBUG_OBJECT (mq, "we are EOS, dropping buffer, return EOS");
2224 gst_buffer_unref (buffer);
2225 return GST_FLOW_EOS;
2230 gst_multi_queue_sink_activate_mode (GstPad * pad, GstObject * parent,
2231 GstPadMode mode, gboolean active)
2237 sq = (GstSingleQueue *) gst_pad_get_element_private (pad);
2238 mq = (GstMultiQueue *) gst_pad_get_parent (pad);
2240 /* mq is NULL if the pad is activated/deactivated before being
2241 * added to the multiqueue */
2243 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2246 case GST_PAD_MODE_PUSH:
2248 /* All pads start off linked until they push one buffer */
2249 sq->srcresult = GST_FLOW_OK;
2251 gst_data_queue_set_flushing (sq->queue, FALSE);
2253 sq->srcresult = GST_FLOW_FLUSHING;
2254 sq->last_query = FALSE;
2255 g_cond_signal (&sq->query_handled);
2256 gst_data_queue_set_flushing (sq->queue, TRUE);
2258 /* Wait until streaming thread has finished */
2260 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2261 GST_PAD_STREAM_LOCK (pad);
2263 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2264 gst_data_queue_flush (sq->queue);
2266 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2267 GST_PAD_STREAM_UNLOCK (pad);
2269 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2279 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2280 gst_object_unref (mq);
2286 static GstFlowReturn
2287 gst_multi_queue_sink_event (GstPad * pad, GstObject * parent, GstEvent * event)
2292 GstMultiQueueItem *item;
2293 gboolean res = TRUE;
2294 GstFlowReturn flowret = GST_FLOW_OK;
2296 GstEvent *sref = NULL;
2298 sq = (GstSingleQueue *) gst_pad_get_element_private (pad);
2299 mq = (GstMultiQueue *) parent;
2301 type = GST_EVENT_TYPE (event);
2304 case GST_EVENT_STREAM_START:
2306 if (mq->sync_by_running_time) {
2307 GstStreamFlags stream_flags;
2308 gst_event_parse_stream_flags (event, &stream_flags);
2309 if ((stream_flags & GST_STREAM_FLAG_SPARSE)) {
2310 GST_INFO_OBJECT (mq, "SingleQueue %d is a sparse stream", sq->id);
2311 sq->is_sparse = TRUE;
2315 sq->thread = g_thread_self ();
2317 /* Remove EOS flag */
2321 case GST_EVENT_FLUSH_START:
2322 GST_DEBUG_OBJECT (mq, "SingleQueue %d : received flush start event",
2325 res = gst_pad_push_event (sq->srcpad, event);
2327 gst_single_queue_flush (mq, sq, TRUE, FALSE);
2328 gst_single_queue_pause (mq, sq);
2331 case GST_EVENT_FLUSH_STOP:
2332 GST_DEBUG_OBJECT (mq, "SingleQueue %d : received flush stop event",
2335 res = gst_pad_push_event (sq->srcpad, event);
2337 gst_single_queue_flush (mq, sq, FALSE, FALSE);
2338 gst_single_queue_start (mq, sq);
2339 #ifdef TIZEN_FEATURE_MQ_MODIFICATION
2340 /* need to reset the buffering data after seeking */
2344 GstSingleQueue *q = (GstSingleQueue *) tmp->data;
2347 tmp = g_list_next (tmp);
2349 recheck_buffering_status (mq);
2353 case GST_EVENT_SEGMENT:
2354 sq->is_segment_done = FALSE;
2355 sref = gst_event_ref (event);
2358 /* take ref because the queue will take ownership and we need the event
2359 * afterwards to update the segment */
2360 sref = gst_event_ref (event);
2361 if (mq->use_interleave) {
2362 GstClockTime val, dur;
2364 gst_event_parse_gap (event, &val, &dur);
2365 if (GST_CLOCK_TIME_IS_VALID (val)) {
2366 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2367 if (GST_CLOCK_TIME_IS_VALID (dur))
2369 stime = my_segment_to_running_time (&sq->sink_segment, val);
2370 if (GST_CLOCK_STIME_IS_VALID (stime)) {
2371 sq->cached_sinktime = stime;
2372 calculate_interleave (mq, sq);
2374 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2380 if (!(GST_EVENT_IS_SERIALIZED (event))) {
2381 res = gst_pad_push_event (sq->srcpad, event);
2387 /* if eos, we are always full, so avoid hanging incoming indefinitely */
2391 /* Get an unique incrementing id. */
2392 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2394 item = gst_multi_queue_mo_item_new ((GstMiniObject *) event, curid);
2396 GST_DEBUG_OBJECT (mq,
2397 "SingleQueue %d : Enqueuing event %p of type %s with id %d",
2398 sq->id, event, GST_EVENT_TYPE_NAME (event), curid);
2400 if (!gst_data_queue_push (sq->queue, (GstDataQueueItem *) item))
2403 /* mark EOS when we received one, we must do that after putting the
2404 * buffer in the queue because EOS marks the buffer as filled. */
2406 case GST_EVENT_SEGMENT_DONE:
2407 sq->is_segment_done = TRUE;
2408 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2409 update_buffering (mq, sq);
2410 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2411 single_queue_overrun_cb (sq->queue, sq);
2412 gst_multi_queue_post_buffering (mq);
2415 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2418 /* Post an error message if we got EOS while downstream
2419 * has returned an error flow return. After EOS there
2420 * will be no further buffer which could propagate the
2422 if (sq->srcresult < GST_FLOW_EOS) {
2423 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2424 GST_ELEMENT_FLOW_ERROR (mq, sq->srcresult);
2426 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2429 /* EOS affects the buffering state */
2430 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2431 update_buffering (mq, sq);
2432 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2433 single_queue_overrun_cb (sq->queue, sq);
2434 gst_multi_queue_post_buffering (mq);
2436 case GST_EVENT_SEGMENT:
2437 apply_segment (mq, sq, sref, &sq->sink_segment);
2438 gst_event_unref (sref);
2439 /* a new segment allows us to accept more buffers if we got EOS
2440 * from downstream */
2441 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2442 if (sq->srcresult == GST_FLOW_EOS)
2443 sq->srcresult = GST_FLOW_OK;
2444 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2448 apply_gap (mq, sq, sref, &sq->sink_segment);
2449 gst_event_unref (sref);
2456 flowret = GST_FLOW_ERROR;
2457 GST_DEBUG_OBJECT (mq, "SingleQueue %d : returning %s", sq->id,
2458 gst_flow_get_name (flowret));
2463 GST_LOG_OBJECT (mq, "SingleQueue %d : exit because task paused, reason: %s",
2464 sq->id, gst_flow_get_name (sq->srcresult));
2466 gst_event_unref (sref);
2467 gst_multi_queue_item_destroy (item);
2468 return sq->srcresult;
2472 GST_DEBUG_OBJECT (mq, "we are EOS, dropping event, return GST_FLOW_EOS");
2473 gst_event_unref (event);
2474 return GST_FLOW_EOS;
2479 gst_multi_queue_sink_query (GstPad * pad, GstObject * parent, GstQuery * query)
2485 sq = (GstSingleQueue *) gst_pad_get_element_private (pad);
2486 mq = (GstMultiQueue *) parent;
2488 switch (GST_QUERY_TYPE (query)) {
2490 if (GST_QUERY_IS_SERIALIZED (query)) {
2492 GstMultiQueueItem *item;
2494 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2495 if (sq->srcresult != GST_FLOW_OK)
2498 /* serialized events go in the queue. We need to be certain that we
2499 * don't cause deadlocks waiting for the query return value. We check if
2500 * the queue is empty (nothing is blocking downstream and the query can
2501 * be pushed for sure) or we are not buffering. If we are buffering,
2502 * the pipeline waits to unblock downstream until our queue fills up
2503 * completely, which can not happen if we block on the query..
2504 * Therefore we only potentially block when we are not buffering. */
2505 if (!mq->use_buffering || gst_data_queue_is_empty (sq->queue)) {
2506 /* Get an unique incrementing id. */
2507 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2509 item = gst_multi_queue_mo_item_new ((GstMiniObject *) query, curid);
2511 GST_DEBUG_OBJECT (mq,
2512 "SingleQueue %d : Enqueuing query %p of type %s with id %d",
2513 sq->id, query, GST_QUERY_TYPE_NAME (query), curid);
2514 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2515 res = gst_data_queue_push (sq->queue, (GstDataQueueItem *) item);
2516 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2517 #ifdef TIZEN_FEATURE_MQ_MODIFICATION
2518 if (!res || sq->flushing) {
2519 gst_multi_queue_item_destroy (item);
2523 if (!res || sq->flushing)
2526 /* it might be that the query has been taken out of the queue
2527 * while we were unlocked. So, we need to check if the last
2528 * handled query is the same one than the one we just
2529 * pushed. If it is, we don't need to wait for the condition
2530 * variable, otherwise we wait for the condition variable to
2532 while (!sq->flushing && sq->srcresult == GST_FLOW_OK
2533 && sq->last_handled_query != query)
2534 g_cond_wait (&sq->query_handled, &mq->qlock);
2535 res = sq->last_query;
2536 sq->last_handled_query = NULL;
2538 GST_DEBUG_OBJECT (mq, "refusing query, we are buffering and the "
2539 "queue is not empty");
2542 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2544 /* default handling */
2545 res = gst_pad_query_default (pad, parent, query);
2553 GST_DEBUG_OBJECT (mq, "Flushing");
2554 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2560 gst_multi_queue_src_activate_mode (GstPad * pad, GstObject * parent,
2561 GstPadMode mode, gboolean active)
2567 sq = (GstSingleQueue *) gst_pad_get_element_private (pad);
2570 GST_DEBUG_OBJECT (mq, "SingleQueue %d", sq->id);
2573 case GST_PAD_MODE_PUSH:
2575 gst_single_queue_flush (mq, sq, FALSE, TRUE);
2576 result = parent ? gst_single_queue_start (mq, sq) : TRUE;
2578 gst_single_queue_flush (mq, sq, TRUE, TRUE);
2579 result = gst_single_queue_stop (mq, sq);
2590 gst_multi_queue_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
2592 GstSingleQueue *sq = gst_pad_get_element_private (pad);
2593 GstMultiQueue *mq = sq->mqueue;
2596 switch (GST_EVENT_TYPE (event)) {
2597 case GST_EVENT_RECONFIGURE:
2598 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2599 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2600 sq->srcresult = GST_FLOW_OK;
2601 g_cond_signal (&sq->turn);
2603 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2605 ret = gst_pad_push_event (sq->sinkpad, event);
2608 ret = gst_pad_push_event (sq->sinkpad, event);
2616 gst_multi_queue_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
2620 /* FIXME, Handle position offset depending on queue size */
2621 switch (GST_QUERY_TYPE (query)) {
2623 /* default handling */
2624 res = gst_pad_query_default (pad, parent, query);
2631 * Next-non-linked functions
2634 /* WITH LOCK TAKEN */
2636 wake_up_next_non_linked (GstMultiQueue * mq)
2640 /* maybe no-one is waiting */
2641 if (mq->numwaiting < 1)
2644 if (mq->sync_by_running_time && GST_CLOCK_STIME_IS_VALID (mq->high_time)) {
2645 /* Else figure out which singlequeue(s) need waking up */
2646 for (tmp = mq->queues; tmp; tmp = tmp->next) {
2647 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
2648 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2649 GstClockTimeDiff high_time;
2651 if (GST_CLOCK_STIME_IS_VALID (sq->group_high_time))
2652 high_time = sq->group_high_time;
2654 high_time = mq->high_time;
2656 if (GST_CLOCK_STIME_IS_VALID (sq->next_time) &&
2657 GST_CLOCK_STIME_IS_VALID (high_time)
2658 && sq->next_time <= high_time) {
2659 GST_LOG_OBJECT (mq, "Waking up singlequeue %d", sq->id);
2660 g_cond_signal (&sq->turn);
2665 /* Else figure out which singlequeue(s) need waking up */
2666 for (tmp = mq->queues; tmp; tmp = tmp->next) {
2667 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
2668 if (sq->srcresult == GST_FLOW_NOT_LINKED &&
2669 sq->nextid != 0 && sq->nextid <= mq->highid) {
2670 GST_LOG_OBJECT (mq, "Waking up singlequeue %d", sq->id);
2671 g_cond_signal (&sq->turn);
2677 /* WITH LOCK TAKEN */
2679 compute_high_id (GstMultiQueue * mq)
2681 /* The high-id is either the highest id among the linked pads, or if all
2682 * pads are not-linked, it's the lowest not-linked pad */
2684 guint32 lowest = G_MAXUINT32;
2685 guint32 highid = G_MAXUINT32;
2687 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
2688 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
2690 GST_LOG_OBJECT (mq, "inspecting sq:%d , nextid:%d, oldid:%d, srcresult:%s",
2691 sq->id, sq->nextid, sq->oldid, gst_flow_get_name (sq->srcresult));
2693 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2694 /* No need to consider queues which are not waiting */
2695 if (sq->nextid == 0) {
2696 GST_LOG_OBJECT (mq, "sq:%d is not waiting - ignoring", sq->id);
2700 if (sq->nextid < lowest)
2701 lowest = sq->nextid;
2702 } else if (!GST_PAD_IS_EOS (sq->srcpad) && sq->srcresult != GST_FLOW_EOS) {
2703 /* If we don't have a global highid, or the global highid is lower than
2704 * this single queue's last outputted id, store the queue's one,
2705 * unless the singlequeue output is at EOS */
2706 if ((highid == G_MAXUINT32) || (sq->oldid > highid))
2711 if (highid == G_MAXUINT32 || lowest < highid)
2712 mq->highid = lowest;
2714 mq->highid = highid;
2716 GST_LOG_OBJECT (mq, "Highid is now : %u, lowest non-linked %u", mq->highid,
2720 /* WITH LOCK TAKEN */
2722 compute_high_time (GstMultiQueue * mq, guint groupid)
2724 /* The high-time is either the highest last time among the linked
2725 * pads, or if all pads are not-linked, it's the lowest nex time of
2728 GstClockTimeDiff highest = GST_CLOCK_STIME_NONE;
2729 GstClockTimeDiff lowest = GST_CLOCK_STIME_NONE;
2730 GstClockTimeDiff group_high = GST_CLOCK_STIME_NONE;
2731 GstClockTimeDiff group_low = GST_CLOCK_STIME_NONE;
2732 GstClockTimeDiff res;
2733 /* Number of streams which belong to groupid */
2734 guint group_count = 0;
2736 if (!mq->sync_by_running_time)
2737 /* return GST_CLOCK_STIME_NONE; */
2740 for (tmp = mq->queues; tmp; tmp = tmp->next) {
2741 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
2744 "inspecting sq:%d (group:%d) , next_time:%" GST_STIME_FORMAT
2745 ", last_time:%" GST_STIME_FORMAT ", srcresult:%s", sq->id, sq->groupid,
2746 GST_STIME_ARGS (sq->next_time), GST_STIME_ARGS (sq->last_time),
2747 gst_flow_get_name (sq->srcresult));
2749 if (sq->groupid == groupid)
2752 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2753 /* No need to consider queues which are not waiting */
2754 if (!GST_CLOCK_STIME_IS_VALID (sq->next_time)) {
2755 GST_LOG_OBJECT (mq, "sq:%d is not waiting - ignoring", sq->id);
2759 if (lowest == GST_CLOCK_STIME_NONE || sq->next_time < lowest)
2760 lowest = sq->next_time;
2761 if (sq->groupid == groupid && (group_low == GST_CLOCK_STIME_NONE
2762 || sq->next_time < group_low))
2763 group_low = sq->next_time;
2764 } else if (!GST_PAD_IS_EOS (sq->srcpad) && sq->srcresult != GST_FLOW_EOS) {
2765 /* If we don't have a global high time, or the global high time
2766 * is lower than this single queue's last outputted time, store
2767 * the queue's one, unless the singlequeue output is at EOS. */
2768 if (highest == GST_CLOCK_STIME_NONE
2769 || (sq->last_time != GST_CLOCK_STIME_NONE && sq->last_time > highest))
2770 highest = sq->last_time;
2771 if (sq->groupid == groupid && (group_high == GST_CLOCK_STIME_NONE
2772 || (sq->last_time != GST_CLOCK_STIME_NONE
2773 && sq->last_time > group_high)))
2774 group_high = sq->last_time;
2777 "highest now %" GST_STIME_FORMAT " lowest %" GST_STIME_FORMAT,
2778 GST_STIME_ARGS (highest), GST_STIME_ARGS (lowest));
2779 if (sq->groupid == groupid)
2781 "grouphigh %" GST_STIME_FORMAT " grouplow %" GST_STIME_FORMAT,
2782 GST_STIME_ARGS (group_high), GST_STIME_ARGS (group_low));
2785 if (highest == GST_CLOCK_STIME_NONE)
2786 mq->high_time = lowest;
2788 mq->high_time = highest;
2790 /* If there's only one stream of a given type, use the global high */
2791 if (group_count < 2)
2792 res = GST_CLOCK_STIME_NONE;
2793 else if (group_high == GST_CLOCK_STIME_NONE)
2798 GST_LOG_OBJECT (mq, "group count %d for groupid %u", group_count, groupid);
2800 "MQ High time is now : %" GST_STIME_FORMAT ", group %d high time %"
2801 GST_STIME_FORMAT ", lowest non-linked %" GST_STIME_FORMAT,
2802 GST_STIME_ARGS (mq->high_time), groupid, GST_STIME_ARGS (mq->high_time),
2803 GST_STIME_ARGS (lowest));
2805 for (tmp = mq->queues; tmp; tmp = tmp->next) {
2806 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
2807 if (groupid == sq->groupid)
2808 sq->group_high_time = res;
2812 #define IS_FILLED(q, format, value) (((q)->max_size.format) != 0 && \
2813 ((q)->max_size.format) <= (value))
2815 #ifdef TIZEN_FEATURE_MQ_MODIFICATION_EXTRA_SIZE_TIME
2816 #define IS_FILLED_EXTRA(q, format, value) ((((q)->extra_size.format) != 0) && (((q)->max_size.format) != 0) && \
2817 (((q)->extra_size.format)+((q)->max_size.format)) <= (value))
2820 * GstSingleQueue functions
2823 single_queue_overrun_cb (GstDataQueue * dq, GstSingleQueue * sq)
2825 GstMultiQueue *mq = sq->mqueue;
2827 GstDataQueueSize size;
2828 gboolean filled = TRUE;
2829 gboolean empty_found = FALSE;
2831 gst_data_queue_get_level (sq->queue, &size);
2834 "Single Queue %d: EOS %d, visible %u/%u, bytes %u/%u, time %"
2835 G_GUINT64_FORMAT "/%" G_GUINT64_FORMAT, sq->id, sq->is_eos, size.visible,
2836 sq->max_size.visible, size.bytes, sq->max_size.bytes, sq->cur_time,
2839 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2841 /* check if we reached the hard time/bytes limits;
2842 time limit is only taken into account for non-sparse streams */
2843 if (sq->is_eos || IS_FILLED (sq, bytes, size.bytes) ||
2844 (!sq->is_sparse && IS_FILLED (sq, time, sq->cur_time))) {
2848 /* Search for empty queues */
2849 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
2850 GstSingleQueue *oq = (GstSingleQueue *) tmp->data;
2855 if (oq->srcresult == GST_FLOW_NOT_LINKED) {
2856 GST_LOG_OBJECT (mq, "Queue %d is not-linked", oq->id);
2860 GST_LOG_OBJECT (mq, "Checking Queue %d", oq->id);
2861 if (gst_data_queue_is_empty (oq->queue) && !oq->is_sparse) {
2862 GST_LOG_OBJECT (mq, "Queue %d is empty", oq->id);
2868 /* if hard limits are not reached then we allow one more buffer in the full
2869 * queue, but only if any of the other singelqueues are empty */
2871 if (IS_FILLED (sq, visible, size.visible)) {
2872 sq->max_size.visible = size.visible + 1;
2873 GST_DEBUG_OBJECT (mq,
2874 "Bumping single queue %d max visible to %d",
2875 sq->id, sq->max_size.visible);
2881 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2883 /* Overrun is always forwarded, since this is blocking the upstream element */
2885 GST_DEBUG_OBJECT (mq, "Queue %d is filled, signalling overrun", sq->id);
2886 g_signal_emit (mq, gst_multi_queue_signals[SIGNAL_OVERRUN], 0);
2891 single_queue_underrun_cb (GstDataQueue * dq, GstSingleQueue * sq)
2893 gboolean empty = TRUE;
2894 GstMultiQueue *mq = sq->mqueue;
2897 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2898 GST_LOG_OBJECT (mq, "Single Queue %d is empty but not-linked", sq->id);
2902 "Single Queue %d is empty, Checking other single queues", sq->id);
2905 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2906 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
2907 GstSingleQueue *oq = (GstSingleQueue *) tmp->data;
2909 if (gst_data_queue_is_full (oq->queue)) {
2910 GstDataQueueSize size;
2912 gst_data_queue_get_level (oq->queue, &size);
2913 if (IS_FILLED (oq, visible, size.visible)) {
2914 oq->max_size.visible = size.visible + 1;
2915 GST_DEBUG_OBJECT (mq,
2916 "queue %d is filled, bumping its max visible to %d", oq->id,
2917 oq->max_size.visible);
2918 gst_data_queue_limits_changed (oq->queue);
2921 if (!gst_data_queue_is_empty (oq->queue) || oq->is_sparse)
2924 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2927 GST_DEBUG_OBJECT (mq, "All queues are empty, signalling it");
2928 g_signal_emit (mq, gst_multi_queue_signals[SIGNAL_UNDERRUN], 0);
2933 single_queue_check_full (GstDataQueue * dataq, guint visible, guint bytes,
2934 guint64 time, GstSingleQueue * sq)
2937 GstMultiQueue *mq = sq->mqueue;
2939 GST_DEBUG_OBJECT (mq,
2940 "queue %d: visible %u/%u, bytes %u/%u, time %" G_GUINT64_FORMAT "/%"
2941 G_GUINT64_FORMAT, sq->id, visible, sq->max_size.visible, bytes,
2942 sq->max_size.bytes, sq->cur_time, sq->max_size.time);
2944 /* we are always filled on EOS */
2945 if (sq->is_eos || sq->is_segment_done)
2948 /* we never go past the max visible items unless we are in buffering mode */
2949 if (!mq->use_buffering && IS_FILLED (sq, visible, visible))
2952 /* check time or bytes */
2953 #ifdef TIZEN_FEATURE_MQ_MODIFICATION_EXTRA_SIZE_TIME
2954 res = IS_FILLED_EXTRA (sq, time, sq->cur_time) || IS_FILLED (sq, bytes, bytes);
2956 res = IS_FILLED (sq, bytes, bytes);
2958 /* We only care about limits in time if we're not a sparse stream or
2959 * we're not syncing by running time */
2960 if (!sq->is_sparse || !mq->sync_by_running_time) {
2961 /* If unlinked, take into account the extra unlinked cache time */
2962 if (mq->sync_by_running_time && sq->srcresult == GST_FLOW_NOT_LINKED) {
2963 if (sq->cur_time > mq->unlinked_cache_time)
2964 res |= IS_FILLED (sq, time, sq->cur_time - mq->unlinked_cache_time);
2968 res |= IS_FILLED (sq, time, sq->cur_time);
2975 gst_single_queue_flush_queue (GstSingleQueue * sq, gboolean full)
2977 GstDataQueueItem *sitem;
2978 GstMultiQueueItem *mitem;
2979 gboolean was_flushing = FALSE;
2981 while (!gst_data_queue_is_empty (sq->queue)) {
2982 GstMiniObject *data;
2984 /* FIXME: If this fails here although the queue is not empty,
2985 * we're flushing... but we want to rescue all sticky
2986 * events nonetheless.
2988 if (!gst_data_queue_pop (sq->queue, &sitem)) {
2989 was_flushing = TRUE;
2990 gst_data_queue_set_flushing (sq->queue, FALSE);
2994 mitem = (GstMultiQueueItem *) sitem;
2996 data = sitem->object;
2998 if (!full && !mitem->is_query && GST_IS_EVENT (data)
2999 && GST_EVENT_IS_STICKY (data)
3000 && GST_EVENT_TYPE (data) != GST_EVENT_SEGMENT
3001 && GST_EVENT_TYPE (data) != GST_EVENT_EOS) {
3002 gst_pad_store_sticky_event (sq->srcpad, GST_EVENT_CAST (data));
3005 sitem->destroy (sitem);
3008 gst_data_queue_flush (sq->queue);
3010 gst_data_queue_set_flushing (sq->queue, TRUE);
3012 GST_MULTI_QUEUE_MUTEX_LOCK (sq->mqueue);
3013 update_buffering (sq->mqueue, sq);
3014 GST_MULTI_QUEUE_MUTEX_UNLOCK (sq->mqueue);
3015 gst_multi_queue_post_buffering (sq->mqueue);
3019 gst_single_queue_free (GstSingleQueue * sq)
3022 gst_data_queue_flush (sq->queue);
3023 g_object_unref (sq->queue);
3024 g_cond_clear (&sq->turn);
3025 g_cond_clear (&sq->query_handled);
3029 static GstSingleQueue *
3030 gst_single_queue_new (GstMultiQueue * mqueue, guint id)
3033 GstMultiQueuePad *mqpad;
3034 GstPadTemplate *templ;
3037 guint temp_id = (id == -1) ? 0 : id;
3039 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
3041 /* Find an unused queue ID, if possible the passed one */
3042 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
3043 GstSingleQueue *sq2 = (GstSingleQueue *) tmp->data;
3044 /* This works because the IDs are sorted in ascending order */
3045 if (sq2->id == temp_id) {
3046 /* If this ID was requested by the caller return NULL,
3047 * otherwise just get us the next one */
3049 temp_id = sq2->id + 1;
3051 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
3054 } else if (sq2->id > temp_id) {
3059 sq = g_new0 (GstSingleQueue, 1);
3062 sq->groupid = DEFAULT_PAD_GROUP_ID;
3063 sq->group_high_time = GST_CLOCK_STIME_NONE;
3065 mqueue->queues = g_list_insert_before (mqueue->queues, tmp, sq);
3066 mqueue->queues_cookie++;
3068 /* copy over max_size and extra_size so we don't need to take the lock
3069 * any longer when checking if the queue is full. */
3070 sq->max_size.visible = mqueue->max_size.visible;
3071 sq->max_size.bytes = mqueue->max_size.bytes;
3072 sq->max_size.time = mqueue->max_size.time;
3074 sq->extra_size.visible = mqueue->extra_size.visible;
3075 sq->extra_size.bytes = mqueue->extra_size.bytes;
3076 sq->extra_size.time = mqueue->extra_size.time;
3078 GST_DEBUG_OBJECT (mqueue, "Creating GstSingleQueue id:%d", sq->id);
3080 sq->mqueue = mqueue;
3081 sq->srcresult = GST_FLOW_FLUSHING;
3083 sq->queue = gst_data_queue_new ((GstDataQueueCheckFullFunction)
3084 single_queue_check_full,
3085 (GstDataQueueFullCallback) single_queue_overrun_cb,
3086 (GstDataQueueEmptyCallback) single_queue_underrun_cb, sq);
3088 sq->is_sparse = FALSE;
3089 sq->flushing = FALSE;
3091 gst_segment_init (&sq->sink_segment, GST_FORMAT_TIME);
3092 gst_segment_init (&sq->src_segment, GST_FORMAT_TIME);
3096 sq->next_time = GST_CLOCK_STIME_NONE;
3097 sq->last_time = GST_CLOCK_STIME_NONE;
3098 g_cond_init (&sq->turn);
3099 g_cond_init (&sq->query_handled);
3101 sq->sinktime = GST_CLOCK_STIME_NONE;
3102 sq->srctime = GST_CLOCK_STIME_NONE;
3103 sq->sink_tainted = TRUE;
3104 sq->src_tainted = TRUE;
3106 name = g_strdup_printf ("sink_%u", sq->id);
3107 templ = gst_static_pad_template_get (&sinktemplate);
3108 sq->sinkpad = g_object_new (GST_TYPE_MULTIQUEUE_PAD, "name", name,
3109 "direction", templ->direction, "template", templ, NULL);
3110 gst_object_unref (templ);
3113 mqpad = (GstMultiQueuePad *) sq->sinkpad;
3116 gst_pad_set_chain_function (sq->sinkpad,
3117 GST_DEBUG_FUNCPTR (gst_multi_queue_chain));
3118 gst_pad_set_activatemode_function (sq->sinkpad,
3119 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_activate_mode));
3120 gst_pad_set_event_full_function (sq->sinkpad,
3121 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_event));
3122 gst_pad_set_query_function (sq->sinkpad,
3123 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_query));
3124 gst_pad_set_iterate_internal_links_function (sq->sinkpad,
3125 GST_DEBUG_FUNCPTR (gst_multi_queue_iterate_internal_links));
3126 GST_OBJECT_FLAG_SET (sq->sinkpad, GST_PAD_FLAG_PROXY_CAPS);
3128 name = g_strdup_printf ("src_%u", sq->id);
3129 sq->srcpad = gst_pad_new_from_static_template (&srctemplate, name);
3132 gst_pad_set_activatemode_function (sq->srcpad,
3133 GST_DEBUG_FUNCPTR (gst_multi_queue_src_activate_mode));
3134 gst_pad_set_event_function (sq->srcpad,
3135 GST_DEBUG_FUNCPTR (gst_multi_queue_src_event));
3136 gst_pad_set_query_function (sq->srcpad,
3137 GST_DEBUG_FUNCPTR (gst_multi_queue_src_query));
3138 gst_pad_set_iterate_internal_links_function (sq->srcpad,
3139 GST_DEBUG_FUNCPTR (gst_multi_queue_iterate_internal_links));
3140 GST_OBJECT_FLAG_SET (sq->srcpad, GST_PAD_FLAG_PROXY_CAPS);
3142 gst_pad_set_element_private (sq->sinkpad, (gpointer) sq);
3143 gst_pad_set_element_private (sq->srcpad, (gpointer) sq);
3145 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
3147 /* only activate the pads when we are not in the NULL state
3148 * and add the pad under the state_lock to prevend state changes
3149 * between activating and adding */
3150 g_rec_mutex_lock (GST_STATE_GET_LOCK (mqueue));
3151 if (GST_STATE_TARGET (mqueue) != GST_STATE_NULL) {
3152 gst_pad_set_active (sq->srcpad, TRUE);
3153 gst_pad_set_active (sq->sinkpad, TRUE);
3155 gst_element_add_pad (GST_ELEMENT (mqueue), sq->srcpad);
3156 gst_element_add_pad (GST_ELEMENT (mqueue), sq->sinkpad);
3157 if (GST_STATE_TARGET (mqueue) != GST_STATE_NULL) {
3158 gst_single_queue_start (mqueue, sq);
3160 g_rec_mutex_unlock (GST_STATE_GET_LOCK (mqueue));
3162 GST_DEBUG_OBJECT (mqueue, "GstSingleQueue [%d] created and pads added",