2 * Copyright (C) 1999,2000 Erik Walthinsen <omega@cse.ogi.edu>
3 * 2000 Wim Taymans <wtay@chello.be>
4 * 2003 Colin Walters <cwalters@gnome.org>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Library General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Library General Public License for more details.
18 * You should have received a copy of the GNU Library General Public
19 * License along with this library; if not, write to the
20 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
21 * Boston, MA 02111-1307, USA.
25 #include "gst_private.h"
28 #include "gstscheduler.h"
33 static GstStaticPadTemplate sinktemplate = GST_STATIC_PAD_TEMPLATE ("sink",
38 static GstStaticPadTemplate srctemplate = GST_STATIC_PAD_TEMPLATE ("src",
43 GST_DEBUG_CATEGORY_STATIC (queue_dataflow);
44 #define GST_CAT_DEFAULT (queue_dataflow)
46 #define STATUS(queue, msg) \
47 GST_CAT_LOG_OBJECT (queue_dataflow, queue, \
48 "(%s:%s) " msg ": %u of %u-%u buffers, %u of %u-%u " \
49 "bytes, %" G_GUINT64_FORMAT " of %" G_GUINT64_FORMAT \
50 "-%" G_GUINT64_FORMAT " ns, %u elements", \
51 GST_DEBUG_PAD_NAME (pad), \
52 queue->cur_level.buffers, \
53 queue->min_threshold.buffers, \
54 queue->max_size.buffers, \
55 queue->cur_level.bytes, \
56 queue->min_threshold.bytes, \
57 queue->max_size.bytes, \
58 queue->cur_level.time, \
59 queue->min_threshold.time, \
60 queue->max_size.time, \
63 static GstElementDetails gst_queue_details = GST_ELEMENT_DETAILS ("Queue",
66 "Erik Walthinsen <omega@cse.ogi.edu>");
69 /* Queue signals and args */
81 /* FIXME: don't we have another way of doing this
82 * "Gstreamer format" (frame/byte/time) queries? */
83 ARG_CUR_LEVEL_BUFFERS,
89 ARG_MIN_THRESHOLD_BUFFERS,
90 ARG_MIN_THRESHOLD_BYTES,
91 ARG_MIN_THRESHOLD_TIME,
98 #define GST_QUEUE_MUTEX_LOCK G_STMT_START { \
99 GST_CAT_LOG_OBJECT (queue_dataflow, queue, \
100 "locking qlock from thread %p", \
102 g_mutex_lock (queue->qlock); \
103 GST_CAT_LOG_OBJECT (queue_dataflow, queue, \
104 "locked qlock from thread %p", \
108 #define GST_QUEUE_MUTEX_UNLOCK G_STMT_START { \
109 GST_CAT_LOG_OBJECT (queue_dataflow, queue, \
110 "unlocking qlock from thread %p", \
112 g_mutex_unlock (queue->qlock); \
116 typedef struct _GstQueueEventResponse
119 gboolean ret, handled;
121 GstQueueEventResponse;
123 static void gst_queue_base_init (GstQueueClass * klass);
124 static void gst_queue_class_init (GstQueueClass * klass);
125 static void gst_queue_init (GstQueue * queue);
126 static void gst_queue_finalize (GObject * object);
128 static void gst_queue_set_property (GObject * object,
129 guint prop_id, const GValue * value, GParamSpec * pspec);
130 static void gst_queue_get_property (GObject * object,
131 guint prop_id, GValue * value, GParamSpec * pspec);
133 static void gst_queue_chain (GstPad * pad, GstData * data);
134 static GstData *gst_queue_get (GstPad * pad);
136 static gboolean gst_queue_handle_src_event (GstPad * pad, GstEvent * event);
137 static gboolean gst_queue_handle_src_query (GstPad * pad,
138 GstQueryType type, GstFormat * fmt, gint64 * value);
140 static GstCaps *gst_queue_getcaps (GstPad * pad);
141 static GstPadLinkReturn
142 gst_queue_link_sink (GstPad * pad, const GstCaps * caps);
143 static GstPadLinkReturn gst_queue_link_src (GstPad * pad, const GstCaps * caps);
144 static void gst_queue_locked_flush (GstQueue * queue);
146 static GstElementStateReturn gst_queue_change_state (GstElement * element);
147 static gboolean gst_queue_release_locks (GstElement * element);
150 #define GST_TYPE_QUEUE_LEAKY (queue_leaky_get_type ())
153 queue_leaky_get_type (void)
155 static GType queue_leaky_type = 0;
156 static GEnumValue queue_leaky[] = {
157 {GST_QUEUE_NO_LEAK, "0", "Not Leaky"},
158 {GST_QUEUE_LEAK_UPSTREAM, "1", "Leaky on Upstream"},
159 {GST_QUEUE_LEAK_DOWNSTREAM, "2", "Leaky on Downstream"},
163 if (!queue_leaky_type) {
164 queue_leaky_type = g_enum_register_static ("GstQueueLeaky", queue_leaky);
166 return queue_leaky_type;
169 static GstElementClass *parent_class = NULL;
170 static guint gst_queue_signals[LAST_SIGNAL] = { 0 };
173 gst_queue_get_type (void)
175 static GType queue_type = 0;
178 static const GTypeInfo queue_info = {
179 sizeof (GstQueueClass),
180 (GBaseInitFunc) gst_queue_base_init,
182 (GClassInitFunc) gst_queue_class_init,
187 (GInstanceInitFunc) gst_queue_init,
191 queue_type = g_type_register_static (GST_TYPE_ELEMENT,
192 "GstQueue", &queue_info, 0);
193 GST_DEBUG_CATEGORY_INIT (queue_dataflow, "queue_dataflow", 0,
194 "dataflow inside the queue element");
201 gst_queue_base_init (GstQueueClass * klass)
203 GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
205 gst_element_class_add_pad_template (gstelement_class,
206 gst_static_pad_template_get (&srctemplate));
207 gst_element_class_add_pad_template (gstelement_class,
208 gst_static_pad_template_get (&sinktemplate));
209 gst_element_class_set_details (gstelement_class, &gst_queue_details);
213 gst_queue_class_init (GstQueueClass * klass)
215 GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
216 GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
218 parent_class = g_type_class_peek_parent (klass);
220 gobject_class->set_property = GST_DEBUG_FUNCPTR (gst_queue_set_property);
221 gobject_class->get_property = GST_DEBUG_FUNCPTR (gst_queue_get_property);
224 gst_queue_signals[SIGNAL_UNDERRUN] =
225 g_signal_new ("underrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
226 G_STRUCT_OFFSET (GstQueueClass, underrun), NULL, NULL,
227 g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0);
228 gst_queue_signals[SIGNAL_RUNNING] =
229 g_signal_new ("running", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
230 G_STRUCT_OFFSET (GstQueueClass, running), NULL, NULL,
231 g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0);
232 gst_queue_signals[SIGNAL_OVERRUN] =
233 g_signal_new ("overrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
234 G_STRUCT_OFFSET (GstQueueClass, overrun), NULL, NULL,
235 g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0);
238 g_object_class_install_property (gobject_class, ARG_CUR_LEVEL_BYTES,
239 g_param_spec_uint ("current-level-bytes", "Current level (kB)",
240 "Current amount of data in the queue (bytes)",
241 0, G_MAXUINT, 0, G_PARAM_READABLE));
242 g_object_class_install_property (gobject_class, ARG_CUR_LEVEL_BUFFERS,
243 g_param_spec_uint ("current-level-buffers", "Current level (buffers)",
244 "Current number of buffers in the queue",
245 0, G_MAXUINT, 0, G_PARAM_READABLE));
246 g_object_class_install_property (gobject_class, ARG_CUR_LEVEL_TIME,
247 g_param_spec_uint64 ("current-level-time", "Current level (ns)",
248 "Current amount of data in the queue (in ns)",
249 0, G_MAXUINT64, 0, G_PARAM_READABLE));
251 g_object_class_install_property (gobject_class, ARG_MAX_SIZE_BYTES,
252 g_param_spec_uint ("max-size-bytes", "Max. size (kB)",
253 "Max. amount of data in the queue (bytes, 0=disable)",
254 0, G_MAXUINT, 0, G_PARAM_READWRITE));
255 g_object_class_install_property (gobject_class, ARG_MAX_SIZE_BUFFERS,
256 g_param_spec_uint ("max-size-buffers", "Max. size (buffers)",
257 "Max. number of buffers in the queue (0=disable)",
258 0, G_MAXUINT, 0, G_PARAM_READWRITE));
259 g_object_class_install_property (gobject_class, ARG_MAX_SIZE_TIME,
260 g_param_spec_uint64 ("max-size-time", "Max. size (ns)",
261 "Max. amount of data in the queue (in ns, 0=disable)",
262 0, G_MAXUINT64, 0, G_PARAM_READWRITE));
264 g_object_class_install_property (gobject_class, ARG_MIN_THRESHOLD_BYTES,
265 g_param_spec_uint ("min-threshold-bytes", "Min. threshold (kB)",
266 "Min. amount of data in the queue to allow reading (bytes, 0=disable)",
267 0, G_MAXUINT, 0, G_PARAM_READWRITE));
268 g_object_class_install_property (gobject_class, ARG_MIN_THRESHOLD_BUFFERS,
269 g_param_spec_uint ("min-threshold-buffers", "Min. threshold (buffers)",
270 "Min. number of buffers in the queue to allow reading (0=disable)",
271 0, G_MAXUINT, 0, G_PARAM_READWRITE));
272 g_object_class_install_property (gobject_class, ARG_MIN_THRESHOLD_TIME,
273 g_param_spec_uint64 ("min-threshold-time", "Min. threshold (ns)",
274 "Min. amount of data in the queue to allow reading (in ns, 0=disable)",
275 0, G_MAXUINT64, 0, G_PARAM_READWRITE));
277 g_object_class_install_property (gobject_class, ARG_LEAKY,
278 g_param_spec_enum ("leaky", "Leaky",
279 "Where the queue leaks, if at all",
280 GST_TYPE_QUEUE_LEAKY, GST_QUEUE_NO_LEAK, G_PARAM_READWRITE));
281 g_object_class_install_property (gobject_class, ARG_MAY_DEADLOCK,
282 g_param_spec_boolean ("may_deadlock", "May Deadlock",
283 "The queue may deadlock if it's full and not PLAYING",
284 TRUE, G_PARAM_READWRITE));
285 g_object_class_install_property (gobject_class, ARG_BLOCK_TIMEOUT,
286 g_param_spec_uint64 ("block_timeout", "Timeout for Block",
287 "Nanoseconds until blocked queue times out and returns filler event. "
288 "Value of -1 disables timeout",
289 0, G_MAXUINT64, -1, G_PARAM_READWRITE));
291 /* set several parent class virtual functions */
292 gobject_class->finalize = GST_DEBUG_FUNCPTR (gst_queue_finalize);
294 gstelement_class->change_state = GST_DEBUG_FUNCPTR (gst_queue_change_state);
295 gstelement_class->release_locks = GST_DEBUG_FUNCPTR (gst_queue_release_locks);
299 gst_queue_init (GstQueue * queue)
301 /* scheduling on this kind of element is, well, interesting */
302 GST_FLAG_SET (queue, GST_ELEMENT_DECOUPLED);
303 GST_FLAG_SET (queue, GST_ELEMENT_EVENT_AWARE);
306 gst_pad_new_from_template (gst_static_pad_template_get (&sinktemplate),
308 gst_pad_set_chain_function (queue->sinkpad,
309 GST_DEBUG_FUNCPTR (gst_queue_chain));
310 gst_element_add_pad (GST_ELEMENT (queue), queue->sinkpad);
311 gst_pad_set_link_function (queue->sinkpad,
312 GST_DEBUG_FUNCPTR (gst_queue_link_sink));
313 gst_pad_set_getcaps_function (queue->sinkpad,
314 GST_DEBUG_FUNCPTR (gst_queue_getcaps));
315 gst_pad_set_active (queue->sinkpad, TRUE);
318 gst_pad_new_from_template (gst_static_pad_template_get (&srctemplate),
320 gst_pad_set_get_function (queue->srcpad, GST_DEBUG_FUNCPTR (gst_queue_get));
321 gst_element_add_pad (GST_ELEMENT (queue), queue->srcpad);
322 gst_pad_set_link_function (queue->srcpad,
323 GST_DEBUG_FUNCPTR (gst_queue_link_src));
324 gst_pad_set_getcaps_function (queue->srcpad,
325 GST_DEBUG_FUNCPTR (gst_queue_getcaps));
326 gst_pad_set_event_function (queue->srcpad,
327 GST_DEBUG_FUNCPTR (gst_queue_handle_src_event));
328 gst_pad_set_query_function (queue->srcpad,
329 GST_DEBUG_FUNCPTR (gst_queue_handle_src_query));
330 gst_pad_set_active (queue->srcpad, TRUE);
332 queue->cur_level.buffers = 0; /* no content */
333 queue->cur_level.bytes = 0; /* no content */
334 queue->cur_level.time = 0; /* no content */
335 queue->max_size.buffers = 100; /* 100 buffers */
336 queue->max_size.bytes = 10 * 1024 * 1024; /* 10 MB */
337 queue->max_size.time = GST_SECOND; /* 1 s. */
338 queue->min_threshold.buffers = 0; /* no threshold */
339 queue->min_threshold.bytes = 0; /* no threshold */
340 queue->min_threshold.time = 0; /* no threshold */
342 queue->leaky = GST_QUEUE_NO_LEAK;
343 queue->may_deadlock = TRUE;
344 queue->block_timeout = GST_CLOCK_TIME_NONE;
345 queue->interrupt = FALSE;
346 queue->flush = FALSE;
348 queue->qlock = g_mutex_new ();
349 queue->item_add = g_cond_new ();
350 queue->item_del = g_cond_new ();
351 queue->event_done = g_cond_new ();
352 queue->events = g_queue_new ();
353 queue->event_lock = g_mutex_new ();
354 queue->queue = g_queue_new ();
356 GST_CAT_DEBUG_OBJECT (GST_CAT_THREAD, queue,
357 "initialized queue's not_empty & not_full conditions");
360 /* called only once, as opposed to dispose */
362 gst_queue_finalize (GObject * object)
364 GstQueue *queue = GST_QUEUE (object);
366 GST_DEBUG_OBJECT (queue, "finalizing queue");
368 while (!g_queue_is_empty (queue->queue)) {
369 GstData *data = g_queue_pop_head (queue->queue);
371 gst_data_unref (data);
373 g_queue_free (queue->queue);
374 g_mutex_free (queue->qlock);
375 g_cond_free (queue->item_add);
376 g_cond_free (queue->item_del);
377 g_cond_free (queue->event_done);
378 g_mutex_lock (queue->event_lock);
379 while (!g_queue_is_empty (queue->events)) {
380 GstQueueEventResponse *er = g_queue_pop_head (queue->events);
382 gst_event_unref (er->event);
384 g_mutex_unlock (queue->event_lock);
385 g_mutex_free (queue->event_lock);
386 g_queue_free (queue->events);
388 if (G_OBJECT_CLASS (parent_class)->finalize)
389 G_OBJECT_CLASS (parent_class)->finalize (object);
393 gst_queue_getcaps (GstPad * pad)
397 queue = GST_QUEUE (gst_pad_get_parent (pad));
399 if (pad == queue->srcpad && queue->cur_level.bytes > 0) {
400 return gst_caps_copy (queue->negotiated_caps);
403 return gst_pad_proxy_getcaps (pad);
406 static GstPadLinkReturn
407 gst_queue_link_sink (GstPad * pad, const GstCaps * caps)
410 GstPadLinkReturn link_ret;
412 queue = GST_QUEUE (gst_pad_get_parent (pad));
414 if (queue->cur_level.bytes > 0) {
415 if (gst_caps_is_equal (caps, queue->negotiated_caps)) {
416 return GST_PAD_LINK_OK;
417 } else if (GST_STATE (queue) != GST_STATE_PLAYING) {
418 return GST_PAD_LINK_DELAYED;
421 /* Wait until the queue is empty before attempting the pad
423 GST_QUEUE_MUTEX_LOCK;
425 STATUS (queue, "waiting for queue to get empty");
426 while (queue->cur_level.bytes > 0) {
427 g_cond_wait (queue->item_del, queue->qlock);
428 if (queue->interrupt) {
429 GST_QUEUE_MUTEX_UNLOCK;
430 return GST_PAD_LINK_DELAYED;
433 STATUS (queue, "queue is now empty");
435 GST_QUEUE_MUTEX_UNLOCK;
438 link_ret = GST_PAD_LINK_OK;
440 link_ret = gst_pad_proxy_pad_link (pad, caps);
442 if (GST_PAD_LINK_SUCCESSFUL (link_ret)) {
443 /* we store an extra copy of the negotiated caps, just in case
444 * the pads become unnegotiated while we have buffers */
445 gst_caps_replace (&queue->negotiated_caps, gst_caps_copy (caps));
452 static GstPadLinkReturn
453 gst_queue_link_src (GstPad * pad, const GstCaps * caps)
456 GstPadLinkReturn link_ret;
458 queue = GST_QUEUE (gst_pad_get_parent (pad));
460 if (queue->cur_level.bytes > 0) {
461 if (gst_caps_is_equal (caps, queue->negotiated_caps)) {
462 return GST_PAD_LINK_OK;
464 return GST_PAD_LINK_REFUSED;
467 link_ret = gst_pad_proxy_pad_link (pad, caps);
469 link_ret = GST_PAD_LINK_OK;
471 if (GST_PAD_LINK_SUCCESSFUL (link_ret)) {
472 /* we store an extra copy of the negotiated caps, just in case
473 * the pads become unnegotiated while we have buffers */
474 gst_caps_replace (&queue->negotiated_caps, gst_caps_copy (caps));
481 gst_queue_locked_flush (GstQueue * queue)
483 while (!g_queue_is_empty (queue->queue)) {
484 GstData *data = g_queue_pop_head (queue->queue);
486 /* First loose the reference we added when putting that data in the queue */
487 gst_data_unref (data);
488 /* Then loose another reference because we are supposed to destroy that
489 data when flushing */
490 gst_data_unref (data);
492 queue->timeval = NULL;
493 queue->cur_level.buffers = 0;
494 queue->cur_level.bytes = 0;
495 queue->cur_level.time = 0;
497 /* make sure any pending buffers to be added are flushed too */
500 /* we deleted something... */
501 g_cond_signal (queue->item_del);
505 gst_queue_handle_pending_events (GstQueue * queue)
507 /* check for events to send upstream */
508 /* g_queue_get_length is glib 2.4, so don't depend on it yet, use ->length */
509 GST_CAT_DEBUG_OBJECT (queue_dataflow, queue,
510 "handling pending events, events queue of size %d",
511 queue->events->length);
512 g_mutex_lock (queue->event_lock);
513 while (!g_queue_is_empty (queue->events)) {
514 GstQueueEventResponse *er;
516 er = g_queue_pop_head (queue->events);
518 GST_CAT_DEBUG_OBJECT (queue_dataflow, queue,
519 "sending event %p (%d) from event response %p upstream",
520 er->event, GST_EVENT_TYPE (er->event), er);
522 /* change this to an assert when this file gets reviewed properly. */
523 GST_ELEMENT_ERROR (queue, CORE, EVENT, (NULL),
524 ("already handled event %p (%d) from event response %p upstream",
525 er->event, GST_EVENT_TYPE (er->event), er));
528 g_mutex_unlock (queue->event_lock);
529 er->ret = gst_pad_event_default (queue->srcpad, er->event);
531 g_cond_signal (queue->event_done);
532 g_mutex_lock (queue->event_lock);
533 GST_CAT_DEBUG_OBJECT (queue_dataflow, queue, "event sent");
535 g_mutex_unlock (queue->event_lock);
539 gst_queue_chain (GstPad * pad, GstData * data)
543 g_return_if_fail (pad != NULL);
544 g_return_if_fail (GST_IS_PAD (pad));
545 g_return_if_fail (data != NULL);
547 queue = GST_QUEUE (GST_OBJECT_PARENT (pad));
550 /* we have to lock the queue since we span threads */
551 GST_QUEUE_MUTEX_LOCK;
553 gst_queue_handle_pending_events (queue);
555 /* assume don't need to flush this buffer when the queue is filled */
556 queue->flush = FALSE;
558 if (GST_IS_EVENT (data)) {
559 switch (GST_EVENT_TYPE (data)) {
560 case GST_EVENT_FLUSH:
561 STATUS (queue, "received flush event");
562 gst_queue_locked_flush (queue);
563 STATUS (queue, "after flush");
566 STATUS (queue, "received EOS");
569 /* we put the event in the queue, we don't have to act ourselves */
570 GST_CAT_LOG_OBJECT (queue_dataflow, queue,
571 "adding event %p of type %d", data, GST_EVENT_TYPE (data));
576 if (GST_IS_BUFFER (data))
577 GST_CAT_LOG_OBJECT (queue_dataflow, queue,
578 "adding buffer %p of size %d", data, GST_BUFFER_SIZE (data));
580 /* We make space available if we're "full" according to whatever
581 * the user defined as "full". Note that this only applies to buffers.
582 * We always handle events and they don't count in our statistics. */
583 if (GST_IS_BUFFER (data) &&
584 ((queue->max_size.buffers > 0 &&
585 queue->cur_level.buffers >= queue->max_size.buffers) ||
586 (queue->max_size.bytes > 0 &&
587 queue->cur_level.bytes >= queue->max_size.bytes) ||
588 (queue->max_size.time > 0 &&
589 queue->cur_level.time >= queue->max_size.time))) {
590 GST_QUEUE_MUTEX_UNLOCK;
591 g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_OVERRUN], 0);
592 GST_QUEUE_MUTEX_LOCK;
594 /* how are we going to make space for this buffer? */
595 switch (queue->leaky) {
596 /* leak current buffer */
597 case GST_QUEUE_LEAK_UPSTREAM:
598 GST_CAT_DEBUG_OBJECT (queue_dataflow, queue,
599 "queue is full, leaking buffer on upstream end");
600 /* now we can clean up and exit right away */
601 GST_QUEUE_MUTEX_UNLOCK;
604 /* leak first buffer in the queue */
605 case GST_QUEUE_LEAK_DOWNSTREAM:{
606 /* this is a bit hacky. We'll manually iterate the list
607 * and find the first buffer from the head on. We'll
608 * unref that and "fix up" the GQueue object... */
610 GstData *leak = NULL;
612 GST_CAT_DEBUG_OBJECT (queue_dataflow, queue,
613 "queue is full, leaking buffer on downstream end");
615 for (item = queue->queue->head; item != NULL; item = item->next) {
616 if (GST_IS_BUFFER (item->data)) {
622 /* if we didn't find anything, it means we have no buffers
623 * in here. That cannot happen, since we had >= 1 bufs */
626 /* Now remove it from the list, fixing up the GQueue
627 * CHECKME: is a queue->head the first or the last item? */
628 item = g_list_delete_link (queue->queue->head, item);
629 queue->queue->head = g_list_first (item);
630 queue->queue->tail = g_list_last (item);
631 queue->queue->length--;
633 /* and unref the data at the end. Twice, because we keep a ref
634 * to make things read-only. Also keep our list uptodate. */
635 queue->cur_level.bytes -= GST_BUFFER_SIZE (data);
636 queue->cur_level.buffers--;
637 if (GST_BUFFER_DURATION (data) != GST_CLOCK_TIME_NONE)
638 queue->cur_level.time -= GST_BUFFER_DURATION (data);
640 gst_data_unref (data);
641 gst_data_unref (data);
646 g_warning ("Unknown leaky type, using default");
649 /* don't leak. Instead, wait for space to be available */
650 case GST_QUEUE_NO_LEAK:
651 STATUS (queue, "pre-full wait");
653 while ((queue->max_size.buffers > 0 &&
654 queue->cur_level.buffers >= queue->max_size.buffers) ||
655 (queue->max_size.bytes > 0 &&
656 queue->cur_level.bytes >= queue->max_size.bytes) ||
657 (queue->max_size.time > 0 &&
658 queue->cur_level.time >= queue->max_size.time)) {
659 /* if there's a pending state change for this queue
660 * or its manager, switch back to iterator so bottom
661 * half of state change executes */
662 if (queue->interrupt) {
665 GST_CAT_DEBUG_OBJECT (queue_dataflow, queue, "interrupted");
666 queue->interrupt = FALSE;
667 GST_QUEUE_MUTEX_UNLOCK;
668 sched = gst_pad_get_scheduler (queue->sinkpad);
669 if (!sched || gst_scheduler_interrupt (sched, GST_ELEMENT (queue))) {
672 /* if we got here because we were unlocked after a
673 * flush, we don't need to add the buffer to the
676 GST_CAT_DEBUG_OBJECT (queue_dataflow, queue,
677 "not adding pending buffer after flush");
680 GST_CAT_DEBUG_OBJECT (queue_dataflow, queue,
681 "adding pending buffer after interrupt");
685 if (GST_STATE (queue) != GST_STATE_PLAYING) {
686 /* this means the other end is shut down. Try to
687 * signal to resolve the error */
688 if (!queue->may_deadlock) {
689 GST_QUEUE_MUTEX_UNLOCK;
690 gst_data_unref (data);
691 GST_ELEMENT_ERROR (queue, CORE, THREAD, (NULL),
692 ("deadlock found, shutting down source pad elements"));
693 /* we don't go to out_unref here, since we want to
694 * unref the buffer *before* calling GST_ELEMENT_ERROR */
697 GST_CAT_WARNING_OBJECT (queue_dataflow, queue,
698 "%s: waiting for the app to restart "
699 "source pad elements", GST_ELEMENT_NAME (queue));
703 /* OK, we've got a serious issue here. Imagine the situation
704 * where the puller (next element) is sending an event here,
705 * so it cannot pull events from the queue, and we cannot
706 * push data further because the queue is 'full' and therefore,
707 * we wait here (and do not handle events): deadlock! to solve
708 * that, we handle pending upstream events here, too. */
709 gst_queue_handle_pending_events (queue);
711 STATUS (queue, "waiting for item_del signal from thread using qlock");
712 g_cond_wait (queue->item_del, queue->qlock);
713 STATUS (queue, "received item_del signal from thread using qlock");
716 STATUS (queue, "post-full wait");
717 GST_QUEUE_MUTEX_UNLOCK;
718 g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_RUNNING], 0);
719 GST_QUEUE_MUTEX_LOCK;
724 /* put the buffer on the tail of the list. We keep a reference,
725 * so that the data is read-only while in here. There's a good
726 * reason to do so: we have a size and time counter, and any
727 * modification to the content could change any of the two. */
729 g_queue_push_tail (queue->queue, data);
731 /* Note that we only add buffers (not events) to the statistics */
732 if (GST_IS_BUFFER (data)) {
733 queue->cur_level.buffers++;
734 queue->cur_level.bytes += GST_BUFFER_SIZE (data);
735 if (GST_BUFFER_DURATION (data) != GST_CLOCK_TIME_NONE)
736 queue->cur_level.time += GST_BUFFER_DURATION (data);
739 STATUS (queue, "+ level");
741 GST_CAT_LOG_OBJECT (queue_dataflow, queue, "signalling item_add");
742 g_cond_signal (queue->item_add);
743 GST_QUEUE_MUTEX_UNLOCK;
748 gst_data_unref (data);
753 gst_queue_get (GstPad * pad)
758 g_return_val_if_fail (pad != NULL, NULL);
759 g_return_val_if_fail (GST_IS_PAD (pad), NULL);
761 queue = GST_QUEUE (gst_pad_get_parent (pad));
764 /* have to lock for thread-safety */
765 GST_QUEUE_MUTEX_LOCK;
767 if (queue->queue->length == 0 ||
768 (queue->min_threshold.buffers > 0 &&
769 queue->cur_level.buffers < queue->min_threshold.buffers) ||
770 (queue->min_threshold.bytes > 0 &&
771 queue->cur_level.bytes < queue->min_threshold.bytes) ||
772 (queue->min_threshold.time > 0 &&
773 queue->cur_level.time < queue->min_threshold.time)) {
774 GST_QUEUE_MUTEX_UNLOCK;
775 g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_UNDERRUN], 0);
776 GST_QUEUE_MUTEX_LOCK;
778 STATUS (queue, "pre-empty wait");
779 while (queue->queue->length == 0 ||
780 (queue->min_threshold.buffers > 0 &&
781 queue->cur_level.buffers < queue->min_threshold.buffers) ||
782 (queue->min_threshold.bytes > 0 &&
783 queue->cur_level.bytes < queue->min_threshold.bytes) ||
784 (queue->min_threshold.time > 0 &&
785 queue->cur_level.time < queue->min_threshold.time)) {
786 /* if there's a pending state change for this queue or its
787 * manager, switch back to iterator so bottom half of state
788 * change executes. */
789 if (queue->interrupt) {
792 GST_CAT_DEBUG_OBJECT (queue_dataflow, queue, "interrupted");
793 queue->interrupt = FALSE;
794 GST_QUEUE_MUTEX_UNLOCK;
795 sched = gst_pad_get_scheduler (queue->srcpad);
796 if (!sched || gst_scheduler_interrupt (sched, GST_ELEMENT (queue)))
797 return GST_DATA (gst_event_new (GST_EVENT_INTERRUPT));
800 if (GST_STATE (queue) != GST_STATE_PLAYING) {
801 /* this means the other end is shut down */
802 if (!queue->may_deadlock) {
803 GST_QUEUE_MUTEX_UNLOCK;
804 GST_ELEMENT_ERROR (queue, CORE, THREAD, (NULL),
805 ("deadlock found, shutting down sink pad elements"));
808 GST_CAT_WARNING_OBJECT (queue_dataflow, queue,
809 "%s: waiting for the app to restart "
810 "source pad elements", GST_ELEMENT_NAME (queue));
814 STATUS (queue, "waiting for item_add");
816 if (queue->block_timeout != GST_CLOCK_TIME_NONE) {
819 g_get_current_time (&timeout);
820 g_time_val_add (&timeout, queue->block_timeout / 1000);
821 GST_LOG_OBJECT (queue, "g_cond_time_wait using qlock from thread %p",
823 if (!g_cond_timed_wait (queue->item_add, queue->qlock, &timeout)) {
824 GST_QUEUE_MUTEX_UNLOCK;
825 GST_CAT_WARNING_OBJECT (queue_dataflow, queue,
826 "Sending filler event");
827 return GST_DATA (gst_event_new_filler ());
830 GST_LOG_OBJECT (queue, "doing g_cond_wait using qlock from thread %p",
832 g_cond_wait (queue->item_add, queue->qlock);
833 GST_LOG_OBJECT (queue, "done g_cond_wait using qlock from thread %p",
836 STATUS (queue, "got item_add signal");
839 STATUS (queue, "post-empty wait");
840 GST_QUEUE_MUTEX_UNLOCK;
841 g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_RUNNING], 0);
842 GST_QUEUE_MUTEX_LOCK;
845 /* There's something in the list now, whatever it is */
846 data = g_queue_pop_head (queue->queue);
847 GST_CAT_LOG_OBJECT (queue_dataflow, queue,
848 "retrieved data %p from queue", data);
853 if (GST_IS_BUFFER (data)) {
854 /* Update statistics */
855 queue->cur_level.buffers--;
856 queue->cur_level.bytes -= GST_BUFFER_SIZE (data);
857 if (GST_BUFFER_DURATION (data) != GST_CLOCK_TIME_NONE)
858 queue->cur_level.time -= GST_BUFFER_DURATION (data);
861 /* Now that we're done, we can lose our own reference to
862 * the item, since we're no longer in danger. */
863 gst_data_unref (data);
865 STATUS (queue, "after _get()");
867 GST_CAT_LOG_OBJECT (queue_dataflow, queue, "signalling item_del");
868 g_cond_signal (queue->item_del);
869 GST_QUEUE_MUTEX_UNLOCK;
871 /* FIXME: I suppose this needs to be locked, since the EOS
872 * bit affects the pipeline state. However, that bit is
873 * locked too so it'd cause a deadlock. */
874 if (GST_IS_EVENT (data)) {
875 GstEvent *event = GST_EVENT (data);
877 switch (GST_EVENT_TYPE (event)) {
879 GST_CAT_DEBUG_OBJECT (queue_dataflow, queue,
880 "queue \"%s\" eos", GST_ELEMENT_NAME (queue));
881 gst_element_set_eos (GST_ELEMENT (queue));
893 gst_queue_handle_src_event (GstPad * pad, GstEvent * event)
895 GstQueue *queue = GST_QUEUE (gst_pad_get_parent (pad));
898 GST_CAT_DEBUG_OBJECT (queue_dataflow, queue, "got event %p (%d)",
899 event, GST_EVENT_TYPE (event));
900 GST_QUEUE_MUTEX_LOCK;
902 if (gst_element_get_state (GST_ELEMENT (queue)) == GST_STATE_PLAYING) {
903 GstQueueEventResponse er;
905 /* push the event to the queue and wait for upstream consumption */
908 g_mutex_lock (queue->event_lock);
909 GST_CAT_DEBUG_OBJECT (queue_dataflow, queue,
910 "putting event %p (%d) on internal queue", event,
911 GST_EVENT_TYPE (event));
912 g_queue_push_tail (queue->events, &er);
913 g_mutex_unlock (queue->event_lock);
914 GST_CAT_WARNING_OBJECT (queue_dataflow, queue,
915 "Preparing for loop for event handler");
916 /* see the chain function on why this is here - it prevents a deadlock */
917 g_cond_signal (queue->item_del);
918 while (!er.handled) {
921 g_get_current_time (&timeout);
922 g_time_val_add (&timeout, 500 * 1000); /* half a second */
923 GST_LOG_OBJECT (queue, "doing g_cond_wait using qlock from thread %p",
925 if (!g_cond_timed_wait (queue->event_done, queue->qlock, &timeout) &&
927 GST_CAT_WARNING_OBJECT (queue_dataflow, queue,
928 "timeout in upstream event handling, dropping event %p (%d)",
929 er.event, GST_EVENT_TYPE (er.event));
930 g_mutex_lock (queue->event_lock);
931 /* since this queue is for src events (ie upstream), this thread is
932 * the only one that is pushing stuff on it, so we're sure that
933 * it's still the tail element. FIXME: But in practice, we should use
934 * GList instead of GQueue for this so we can remove any element in
936 g_queue_pop_tail (queue->events);
937 g_mutex_unlock (queue->event_lock);
938 gst_event_unref (er.event);
943 GST_CAT_WARNING_OBJECT (queue_dataflow, queue, "Event handled");
946 res = gst_pad_event_default (pad, event);
948 switch (GST_EVENT_TYPE (event)) {
949 case GST_EVENT_FLUSH:
950 GST_CAT_DEBUG_OBJECT (queue_dataflow, queue,
951 "FLUSH event, flushing queue\n");
952 gst_queue_locked_flush (queue);
955 if (GST_EVENT_SEEK_FLAGS (event) & GST_SEEK_FLAG_FLUSH) {
956 gst_queue_locked_flush (queue);
963 GST_QUEUE_MUTEX_UNLOCK;
969 gst_queue_handle_src_query (GstPad * pad,
970 GstQueryType type, GstFormat * fmt, gint64 * value)
972 GstQueue *queue = GST_QUEUE (gst_pad_get_parent (pad));
975 if (!GST_PAD_PEER (queue->sinkpad))
977 res = gst_pad_query (GST_PAD_PEER (queue->sinkpad), type, fmt, value);
981 if (type == GST_QUERY_POSITION) {
982 /* FIXME: this code assumes that there's no discont in the queue */
984 case GST_FORMAT_BYTES:
985 *value -= queue->cur_level.bytes;
987 case GST_FORMAT_TIME:
988 *value -= queue->cur_level.time;
1000 gst_queue_release_locks (GstElement * element)
1004 queue = GST_QUEUE (element);
1006 GST_QUEUE_MUTEX_LOCK;
1007 queue->interrupt = TRUE;
1008 g_cond_signal (queue->item_add);
1009 g_cond_signal (queue->item_del);
1010 GST_QUEUE_MUTEX_UNLOCK;
1015 static GstElementStateReturn
1016 gst_queue_change_state (GstElement * element)
1019 GstElementStateReturn ret = GST_STATE_SUCCESS;
1021 queue = GST_QUEUE (element);
1023 GST_CAT_LOG_OBJECT (GST_CAT_STATES, element,
1024 "starting state change 0x%x", GST_STATE_TRANSITION (element));
1026 /* lock the queue so another thread (not in sync with this thread's state)
1027 * can't call this queue's _get (or whatever)
1029 GST_QUEUE_MUTEX_LOCK;
1031 switch (GST_STATE_TRANSITION (element)) {
1032 case GST_STATE_NULL_TO_READY:
1033 gst_queue_locked_flush (queue);
1035 case GST_STATE_PAUSED_TO_PLAYING:
1036 if (!GST_PAD_IS_LINKED (queue->sinkpad)) {
1037 GST_CAT_DEBUG_OBJECT (GST_CAT_STATES, queue,
1038 "queue %s is not linked", GST_ELEMENT_NAME (queue));
1039 /* FIXME can this be? */
1040 g_cond_signal (queue->item_add);
1042 ret = GST_STATE_FAILURE;
1045 GstScheduler *src_sched, *sink_sched;
1047 src_sched = gst_pad_get_scheduler (GST_PAD (queue->srcpad));
1048 sink_sched = gst_pad_get_scheduler (GST_PAD (queue->sinkpad));
1050 if (src_sched == sink_sched) {
1051 GST_CAT_DEBUG_OBJECT (GST_CAT_STATES, queue,
1052 "queue %s does not connect different schedulers",
1053 GST_ELEMENT_NAME (queue));
1055 g_warning ("queue %s does not connect different schedulers",
1056 GST_ELEMENT_NAME (queue));
1058 ret = GST_STATE_FAILURE;
1062 queue->interrupt = FALSE;
1064 case GST_STATE_PAUSED_TO_READY:
1065 gst_queue_locked_flush (queue);
1066 gst_caps_replace (&queue->negotiated_caps, NULL);
1072 GST_QUEUE_MUTEX_UNLOCK;
1074 if (GST_ELEMENT_CLASS (parent_class)->change_state)
1075 ret = GST_ELEMENT_CLASS (parent_class)->change_state (element);
1077 /* this is an ugly hack to make sure our pads are always active.
1078 * Reason for this is that pad activation for the queue element
1079 * depends on 2 schedulers (ugh) */
1080 gst_pad_set_active (queue->sinkpad, TRUE);
1081 gst_pad_set_active (queue->srcpad, TRUE);
1083 GST_CAT_LOG_OBJECT (GST_CAT_STATES, element, "done with state change");
1088 GST_QUEUE_MUTEX_UNLOCK;
1090 GST_CAT_LOG_OBJECT (GST_CAT_STATES, element, "done with state change");
1097 gst_queue_set_property (GObject * object,
1098 guint prop_id, const GValue * value, GParamSpec * pspec)
1100 GstQueue *queue = GST_QUEUE (object);
1102 /* someone could change levels here, and since this
1103 * affects the get/put funcs, we need to lock for safety. */
1104 GST_QUEUE_MUTEX_LOCK;
1107 case ARG_MAX_SIZE_BYTES:
1108 queue->max_size.bytes = g_value_get_uint (value);
1110 case ARG_MAX_SIZE_BUFFERS:
1111 queue->max_size.buffers = g_value_get_uint (value);
1113 case ARG_MAX_SIZE_TIME:
1114 queue->max_size.time = g_value_get_uint64 (value);
1116 case ARG_MIN_THRESHOLD_BYTES:
1117 queue->min_threshold.bytes = g_value_get_uint (value);
1119 case ARG_MIN_THRESHOLD_BUFFERS:
1120 queue->min_threshold.buffers = g_value_get_uint (value);
1122 case ARG_MIN_THRESHOLD_TIME:
1123 queue->min_threshold.time = g_value_get_uint64 (value);
1126 queue->leaky = g_value_get_enum (value);
1128 case ARG_MAY_DEADLOCK:
1129 queue->may_deadlock = g_value_get_boolean (value);
1131 case ARG_BLOCK_TIMEOUT:
1132 queue->block_timeout = g_value_get_uint64 (value);
1135 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
1139 GST_QUEUE_MUTEX_UNLOCK;
1143 gst_queue_get_property (GObject * object,
1144 guint prop_id, GValue * value, GParamSpec * pspec)
1146 GstQueue *queue = GST_QUEUE (object);
1149 case ARG_CUR_LEVEL_BYTES:
1150 g_value_set_uint (value, queue->cur_level.bytes);
1152 case ARG_CUR_LEVEL_BUFFERS:
1153 g_value_set_uint (value, queue->cur_level.buffers);
1155 case ARG_CUR_LEVEL_TIME:
1156 g_value_set_uint64 (value, queue->cur_level.time);
1158 case ARG_MAX_SIZE_BYTES:
1159 g_value_set_uint (value, queue->max_size.bytes);
1161 case ARG_MAX_SIZE_BUFFERS:
1162 g_value_set_uint (value, queue->max_size.buffers);
1164 case ARG_MAX_SIZE_TIME:
1165 g_value_set_uint64 (value, queue->max_size.time);
1167 case ARG_MIN_THRESHOLD_BYTES:
1168 g_value_set_uint (value, queue->min_threshold.bytes);
1170 case ARG_MIN_THRESHOLD_BUFFERS:
1171 g_value_set_uint (value, queue->min_threshold.buffers);
1173 case ARG_MIN_THRESHOLD_TIME:
1174 g_value_set_uint64 (value, queue->min_threshold.time);
1177 g_value_set_enum (value, queue->leaky);
1179 case ARG_MAY_DEADLOCK:
1180 g_value_set_boolean (value, queue->may_deadlock);
1182 case ARG_BLOCK_TIMEOUT:
1183 g_value_set_uint64 (value, queue->block_timeout);
1186 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);