2 * Copyright (C) 1999,2000 Erik Walthinsen <omega@cse.ogi.edu>
3 * 2000 Wim Taymans <wtay@chello.be>
4 * 2003 Colin Walters <cwalters@gnome.org>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Library General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Library General Public License for more details.
18 * You should have received a copy of the GNU Library General Public
19 * License along with this library; if not, write to the
20 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
21 * Boston, MA 02111-1307, USA.
25 #include "gst_private.h"
28 #include "gstscheduler.h"
32 static GstElementDetails gst_queue_details = GST_ELEMENT_DETAILS (
36 "Erik Walthinsen <omega@cse.ogi.edu>"
40 /* Queue signals and args */
50 /* FIXME: don't we have another way of doing this
51 * "Gstreamer format" (frame/byte/time) queries? */
52 ARG_CUR_LEVEL_BUFFERS,
58 ARG_MIN_TRESHOLD_BUFFERS,
59 ARG_MIN_TRESHOLD_BYTES,
60 ARG_MIN_TRESHOLD_TIME,
67 typedef struct _GstQueueEventResponse {
69 gboolean ret, handled;
70 } GstQueueEventResponse;
72 static void gst_queue_base_init (GstQueueClass *klass);
73 static void gst_queue_class_init (GstQueueClass *klass);
74 static void gst_queue_init (GstQueue *queue);
75 static void gst_queue_dispose (GObject *object);
77 static void gst_queue_set_property (GObject *object,
81 static void gst_queue_get_property (GObject *object,
86 static void gst_queue_chain (GstPad *pad,
88 static GstData *gst_queue_get (GstPad *pad);
90 static gboolean gst_queue_handle_src_event (GstPad *pad,
93 static GstCaps *gst_queue_getcaps (GstPad *pad);
94 static GstPadLinkReturn
95 gst_queue_link (GstPad *pad,
97 static void gst_queue_locked_flush (GstQueue *queue);
99 static GstElementStateReturn
100 gst_queue_change_state (GstElement *element);
101 static gboolean gst_queue_release_locks (GstElement *element);
104 #define GST_TYPE_QUEUE_LEAKY (queue_leaky_get_type ())
107 queue_leaky_get_type (void)
109 static GType queue_leaky_type = 0;
110 static GEnumValue queue_leaky[] = {
111 { GST_QUEUE_NO_LEAK, "0", "Not Leaky" },
112 { GST_QUEUE_LEAK_UPSTREAM, "1", "Leaky on Upstream" },
113 { GST_QUEUE_LEAK_DOWNSTREAM, "2", "Leaky on Downstream" },
116 if (!queue_leaky_type) {
117 queue_leaky_type = g_enum_register_static("GstQueueLeaky", queue_leaky);
119 return queue_leaky_type;
122 static GstElementClass *parent_class = NULL;
123 static guint gst_queue_signals[LAST_SIGNAL] = { 0 };
126 gst_queue_get_type (void)
128 static GType queue_type = 0;
131 static const GTypeInfo queue_info = {
132 sizeof (GstQueueClass),
133 (GBaseInitFunc) gst_queue_base_init,
135 (GClassInitFunc) gst_queue_class_init,
140 (GInstanceInitFunc) gst_queue_init,
144 queue_type = g_type_register_static (GST_TYPE_ELEMENT,
145 "GstQueue", &queue_info, 0);
152 gst_queue_base_init (GstQueueClass *klass)
154 GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
156 gst_element_class_set_details (gstelement_class, &gst_queue_details);
160 gst_queue_class_init (GstQueueClass *klass)
162 GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
163 GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
165 parent_class = g_type_class_peek_parent (klass);
168 gst_queue_signals[SIGNAL_UNDERRUN] =
169 g_signal_new ("underrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
170 G_STRUCT_OFFSET (GstQueueClass, underrun), NULL, NULL,
171 g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0);
172 gst_queue_signals[SIGNAL_RUNNING] =
173 g_signal_new ("running", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
174 G_STRUCT_OFFSET (GstQueueClass, running), NULL, NULL,
175 g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0);
176 gst_queue_signals[SIGNAL_OVERRUN] =
177 g_signal_new ("overrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
178 G_STRUCT_OFFSET (GstQueueClass, overrun), NULL, NULL,
179 g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0);
182 g_object_class_install_property (gobject_class, ARG_CUR_LEVEL_BYTES,
183 g_param_spec_uint ("current-level-bytes", "Current level (kB)",
184 "Current amount of data in the queue (bytes)",
185 0, G_MAXUINT, 0, G_PARAM_READABLE));
186 g_object_class_install_property (gobject_class, ARG_CUR_LEVEL_BUFFERS,
187 g_param_spec_uint ("current-level-buffers", "Current level (buffers)",
188 "Current number of buffers in the queue",
189 0, G_MAXUINT, 0, G_PARAM_READABLE));
190 g_object_class_install_property (gobject_class, ARG_CUR_LEVEL_TIME,
191 g_param_spec_uint64 ("current-level-time", "Current level (ns)",
192 "Current amount of data in the queue (in ns)",
193 0, G_MAXUINT64, 0, G_PARAM_READABLE));
195 g_object_class_install_property (gobject_class, ARG_MAX_SIZE_BYTES,
196 g_param_spec_uint ("max-size-bytes", "Max. size (kB)",
197 "Max. amount of data in the queue (bytes, 0=disable)",
198 0, G_MAXUINT, 0, G_PARAM_READWRITE));
199 g_object_class_install_property (gobject_class, ARG_MAX_SIZE_BUFFERS,
200 g_param_spec_uint ("max-size-buffers", "Max. size (buffers)",
201 "Max. number of buffers in the queue (0=disable)",
202 0, G_MAXUINT, 0, G_PARAM_READWRITE));
203 g_object_class_install_property (gobject_class, ARG_MAX_SIZE_TIME,
204 g_param_spec_uint64 ("max-size-time", "Max. size (ns)",
205 "Max. amount of data in the queue (in ns, 0=disable)",
206 0, G_MAXUINT64, 0, G_PARAM_READWRITE));
208 g_object_class_install_property (gobject_class, ARG_MIN_TRESHOLD_BYTES,
209 g_param_spec_uint ("min-treshold-bytes", "Min. treshold (kB)",
210 "Min. amount of data in the queue to allow reading (bytes, 0=disable)",
211 0, G_MAXUINT, 0, G_PARAM_READWRITE));
212 g_object_class_install_property (gobject_class, ARG_MIN_TRESHOLD_BUFFERS,
213 g_param_spec_uint ("min-treshold-buffers", "Min. treshold (buffers)",
214 "Min. number of buffers in the queue to allow reading (0=disable)",
215 0, G_MAXUINT, 0, G_PARAM_READWRITE));
216 g_object_class_install_property (gobject_class, ARG_MIN_TRESHOLD_TIME,
217 g_param_spec_uint64 ("min-treshold-time", "Min. treshold (ns)",
218 "Min. amount of data in the queue to allow reading (in ns, 0=disable)",
219 0, G_MAXUINT64, 0, G_PARAM_READWRITE));
221 g_object_class_install_property (gobject_class, ARG_LEAKY,
222 g_param_spec_enum ("leaky", "Leaky",
223 "Where the queue leaks, if at all",
224 GST_TYPE_QUEUE_LEAKY, GST_QUEUE_NO_LEAK, G_PARAM_READWRITE));
225 g_object_class_install_property (gobject_class, ARG_MAY_DEADLOCK,
226 g_param_spec_boolean ("may_deadlock", "May Deadlock",
227 "The queue may deadlock if it's full and not PLAYING",
228 TRUE, G_PARAM_READWRITE));
229 g_object_class_install_property (gobject_class, ARG_BLOCK_TIMEOUT,
230 g_param_spec_uint64 ("block_timeout", "Timeout for Block",
231 "Nanoseconds until blocked queue times out and returns filler event. "
232 "Value of -1 disables timeout",
233 0, G_MAXUINT64, -1, G_PARAM_READWRITE));
235 /* set several parent class virtual functions */
236 gobject_class->dispose = GST_DEBUG_FUNCPTR (gst_queue_dispose);
237 gobject_class->set_property = GST_DEBUG_FUNCPTR (gst_queue_set_property);
238 gobject_class->get_property = GST_DEBUG_FUNCPTR (gst_queue_get_property);
240 gstelement_class->change_state = GST_DEBUG_FUNCPTR (gst_queue_change_state);
241 gstelement_class->release_locks = GST_DEBUG_FUNCPTR (gst_queue_release_locks);
245 gst_queue_init (GstQueue *queue)
247 /* scheduling on this kind of element is, well, interesting */
248 GST_FLAG_SET (queue, GST_ELEMENT_DECOUPLED);
249 GST_FLAG_SET (queue, GST_ELEMENT_EVENT_AWARE);
251 queue->sinkpad = gst_pad_new ("sink", GST_PAD_SINK);
252 gst_pad_set_chain_function (queue->sinkpad, GST_DEBUG_FUNCPTR (gst_queue_chain));
253 gst_element_add_pad (GST_ELEMENT (queue), queue->sinkpad);
254 gst_pad_set_link_function (queue->sinkpad, GST_DEBUG_FUNCPTR (gst_queue_link));
255 gst_pad_set_getcaps_function (queue->sinkpad, GST_DEBUG_FUNCPTR (gst_queue_getcaps));
256 gst_pad_set_active (queue->sinkpad, TRUE);
258 queue->srcpad = gst_pad_new ("src", GST_PAD_SRC);
259 gst_pad_set_get_function (queue->srcpad, GST_DEBUG_FUNCPTR (gst_queue_get));
260 gst_element_add_pad (GST_ELEMENT (queue), queue->srcpad);
261 gst_pad_set_link_function (queue->srcpad, GST_DEBUG_FUNCPTR (gst_queue_link));
262 gst_pad_set_getcaps_function (queue->srcpad, GST_DEBUG_FUNCPTR (gst_queue_getcaps));
263 gst_pad_set_event_function (queue->srcpad, GST_DEBUG_FUNCPTR (gst_queue_handle_src_event));
264 gst_pad_set_active (queue->srcpad, TRUE);
266 queue->cur_level.buffers = 0; /* no content */
267 queue->cur_level.bytes = 0; /* no content */
268 queue->cur_level.time = 0; /* no content */
269 queue->max_size.buffers = 250; /* high limit */
270 queue->max_size.bytes = 0; /* unlimited */
271 queue->max_size.time = 0; /* unlimited */
272 queue->min_treshold.buffers = 0; /* no treshold */
273 queue->min_treshold.bytes = 0; /* no treshold */
274 queue->min_treshold.time = 0; /* no treshold */
276 queue->leaky = GST_QUEUE_NO_LEAK;
277 queue->may_deadlock = TRUE;
278 queue->block_timeout = GST_CLOCK_TIME_NONE;
279 queue->interrupt = FALSE;
280 queue->flush = FALSE;
282 queue->qlock = g_mutex_new ();
283 queue->item_add = g_cond_new ();
284 queue->item_del = g_cond_new ();
285 queue->event_done = g_cond_new ();
286 queue->events = g_queue_new ();
287 queue->queue = g_queue_new ();
289 GST_CAT_DEBUG_OBJECT (GST_CAT_THREAD, queue,
290 "initialized queue's not_empty & not_full conditions");
294 gst_queue_dispose (GObject *object)
296 GstQueue *queue = GST_QUEUE (object);
298 gst_element_set_state (GST_ELEMENT (queue), GST_STATE_NULL);
300 while (!g_queue_is_empty (queue->queue)) {
301 GstData *data = g_queue_pop_head (queue->queue);
302 gst_data_unref (data);
304 g_queue_free (queue->queue);
305 g_mutex_free (queue->qlock);
306 g_cond_free (queue->item_add);
307 g_cond_free (queue->item_del);
308 g_cond_free (queue->event_done);
309 while (!g_queue_is_empty (queue->events)) {
310 GstEvent *event = g_queue_pop_head (queue->events);
311 gst_event_unref (event);
314 if (G_OBJECT_CLASS (parent_class)->dispose)
315 G_OBJECT_CLASS (parent_class)->dispose (object);
319 gst_queue_getcaps (GstPad *pad)
323 queue = GST_QUEUE (gst_pad_get_parent (pad));
325 if (queue->cur_level.bytes > 0) {
326 return gst_caps_copy (queue->negotiated_caps);
329 return gst_pad_proxy_getcaps (pad);
332 static GstPadLinkReturn
333 gst_queue_link (GstPad *pad, const GstCaps *caps)
336 GstPadLinkReturn link_ret;
338 queue = GST_QUEUE (gst_pad_get_parent (pad));
340 if (queue->cur_level.bytes > 0) {
341 if (gst_caps_is_equal_fixed (caps, queue->negotiated_caps)) {
342 return GST_PAD_LINK_OK;
344 return GST_PAD_LINK_REFUSED;
347 link_ret = gst_pad_proxy_pad_link (pad, caps);
349 if (GST_PAD_LINK_SUCCESSFUL (link_ret)) {
350 /* we store an extra copy of the negotiated caps, just in case
351 * the pads become unnegotiated while we have buffers */
352 gst_caps_replace (&queue->negotiated_caps, gst_caps_copy (caps));
359 gst_queue_locked_flush (GstQueue *queue)
361 while (!g_queue_is_empty (queue->queue)) {
362 GstData *data = g_queue_pop_head (queue->queue);
363 gst_data_unref (data);
365 queue->timeval = NULL;
366 queue->cur_level.buffers = 0;
367 queue->cur_level.bytes = 0;
368 queue->cur_level.time = 0;
370 /* make sure any pending buffers to be added are flushed too */
373 /* we deleted something... */
374 g_cond_signal (queue->item_del);
378 gst_queue_handle_pending_events (GstQueue *queue)
380 /* check for events to send upstream */
381 while (!g_queue_is_empty (queue->events)){
382 GstQueueEventResponse *er = g_queue_pop_head (queue->events);
383 GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue, "sending event upstream");
384 er->ret = gst_pad_event_default (queue->srcpad, er->event);
386 g_cond_signal (queue->event_done);
387 GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue, "event sent");
391 #define STATUS(queue, msg) \
392 GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue, \
393 "(%s:%s) " msg ": %u of %u-%u buffers, %u of %u-%u " \
394 "bytes, %" G_GUINT64_FORMAT " of %" G_GUINT64_FORMAT \
395 "-%" G_GUINT64_FORMAT " ns, %u elements", \
396 GST_DEBUG_PAD_NAME (pad), \
397 queue->cur_level.buffers, \
398 queue->min_treshold.buffers, \
399 queue->max_size.buffers, \
400 queue->cur_level.bytes, \
401 queue->min_treshold.bytes, \
402 queue->max_size.bytes, \
403 queue->cur_level.time, \
404 queue->min_treshold.time, \
405 queue->max_size.time, \
406 queue->queue->length)
409 gst_queue_chain (GstPad *pad,
414 g_return_if_fail (pad != NULL);
415 g_return_if_fail (GST_IS_PAD (pad));
416 g_return_if_fail (data != NULL);
418 queue = GST_QUEUE (GST_OBJECT_PARENT (pad));
421 /* we have to lock the queue since we span threads */
422 GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue, "locking t:%p", g_thread_self ());
423 g_mutex_lock (queue->qlock);
424 GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue, "locked t:%p", g_thread_self ());
426 gst_queue_handle_pending_events (queue);
428 /* assume don't need to flush this buffer when the queue is filled */
429 queue->flush = FALSE;
431 if (GST_IS_EVENT (data)) {
432 switch (GST_EVENT_TYPE (data)) {
433 case GST_EVENT_FLUSH:
434 STATUS (queue, "received flush event");
435 gst_queue_locked_flush (queue);
436 STATUS (queue, "after flush");
439 STATUS (queue, "received EOS");
442 /* we put the event in the queue, we don't have to act ourselves */
443 GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue,
444 "adding event %p of type %d",
445 data, GST_EVENT_TYPE (data));
450 if (GST_IS_BUFFER (data))
451 GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue,
452 "adding buffer %p of size %d",
453 data, GST_BUFFER_SIZE (data));
455 /* We make space available if we're "full" according to whatever
456 * the user defined as "full". Note that this only applies to buffers.
457 * We always handle events and they don't count in our statistics. */
458 if (GST_IS_BUFFER (data) &&
459 ((queue->max_size.buffers > 0 &&
460 queue->cur_level.buffers >= queue->max_size.buffers) ||
461 (queue->max_size.bytes > 0 &&
462 queue->cur_level.bytes >= queue->max_size.bytes) ||
463 (queue->max_size.time > 0 &&
464 queue->cur_level.time >= queue->max_size.time))) {
465 g_mutex_unlock (queue->qlock);
466 g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_OVERRUN], 0);
467 g_mutex_lock (queue->qlock);
469 /* how are we going to make space for this buffer? */
470 switch (queue->leaky) {
471 /* leak current buffer */
472 case GST_QUEUE_LEAK_UPSTREAM:
473 GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue,
474 "queue is full, leaking buffer on upstream end");
475 /* now we can clean up and exit right away */
476 g_mutex_unlock (queue->qlock);
479 /* leak first buffer in the queue */
480 case GST_QUEUE_LEAK_DOWNSTREAM: {
481 /* this is a bit hacky. We'll manually iterate the list
482 * and find the first buffer from the head on. We'll
483 * unref that and "fix up" the GQueue object... */
485 GstData *leak = NULL;
487 GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue,
488 "queue is full, leaking buffer on downstream end");
490 for (item = queue->queue->head; item != NULL; item = item->next) {
491 if (GST_IS_BUFFER (item->data)) {
497 /* if we didn't find anything, it means we have no buffers
498 * in here. That cannot happen, since we had >= 1 bufs */
501 /* Now remove it from the list, fixing up the GQueue
502 * CHECKME: is a queue->head the first or the last item? */
503 item = g_list_delete_link (queue->queue->head, item);
504 queue->queue->head = g_list_first (item);
505 queue->queue->tail = g_list_last (item);
506 queue->queue->length--;
508 /* and unref the data at the end. Twice, because we keep a ref
509 * to make things read-only. Also keep our list uptodate. */
510 queue->cur_level.bytes -= GST_BUFFER_SIZE (data);
511 queue->cur_level.buffers --;
512 if (GST_BUFFER_DURATION (data) != GST_CLOCK_TIME_NONE)
513 queue->cur_level.time -= GST_BUFFER_DURATION (data);
515 gst_data_unref (data);
516 gst_data_unref (data);
521 g_warning ("Unknown leaky type, using default");
524 /* don't leak. Instead, wait for space to be available */
525 case GST_QUEUE_NO_LEAK:
526 STATUS (queue, "pre-full wait");
528 while ((queue->max_size.buffers > 0 &&
529 queue->cur_level.buffers >= queue->max_size.buffers) ||
530 (queue->max_size.bytes > 0 &&
531 queue->cur_level.bytes >= queue->max_size.bytes) ||
532 (queue->max_size.time > 0 &&
533 queue->cur_level.time >= queue->max_size.time)) {
534 /* if there's a pending state change for this queue
535 * or its manager, switch back to iterator so bottom
536 * half of state change executes */
537 if (queue->interrupt) {
538 GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue, "interrupted");
539 g_mutex_unlock (queue->qlock);
540 if (gst_scheduler_interrupt (gst_pad_get_scheduler (queue->sinkpad),
541 GST_ELEMENT (queue))) {
544 /* if we got here because we were unlocked after a
545 * flush, we don't need to add the buffer to the
548 GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue,
549 "not adding pending buffer after flush");
552 GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue,
553 "adding pending buffer after interrupt");
557 if (GST_STATE (queue) != GST_STATE_PLAYING) {
558 /* this means the other end is shut down. Try to
559 * signal to resolve the error */
560 if (!queue->may_deadlock) {
561 g_mutex_unlock (queue->qlock);
562 gst_data_unref (data);
563 gst_element_error (GST_ELEMENT (queue),
564 "deadlock found, source pad elements are shut down");
565 /* we don't go to out_unref here, since we want to
566 * unref the buffer *before* calling gst_element_error */
569 GST_CAT_WARNING_OBJECT (GST_CAT_DATAFLOW, queue,
570 "%s: waiting for the app to restart "
571 "source pad elements",
572 GST_ELEMENT_NAME (queue));
576 /* OK, we've got a serious issue here. Imagine the situation
577 * where the puller (next element) is sending an event here,
578 * so it cannot pull events from the queue, and we cannot
579 * push data further because the queue is 'full' and therefore,
580 * we wait here (and do not handle events): deadlock! to solve
581 * that, we handle pending upstream events here, too. */
582 gst_queue_handle_pending_events (queue);
584 STATUS (queue, "waiting for item_del signal");
585 g_cond_wait (queue->item_del, queue->qlock);
586 STATUS (queue, "received item_del signal");
589 STATUS (queue, "post-full wait");
590 g_mutex_unlock (queue->qlock);
591 g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_RUNNING], 0);
592 g_mutex_lock (queue->qlock);
597 /* put the buffer on the tail of the list. We keep a reference,
598 * so that the data is read-only while in here. There's a good
599 * reason to do so: we have a size and time counter, and any
600 * modification to the content could change any of the two. */
602 g_queue_push_tail (queue->queue, data);
604 /* Note that we only add buffers (not events) to the statistics */
605 if (GST_IS_BUFFER (data)) {
606 queue->cur_level.buffers++;
607 queue->cur_level.bytes += GST_BUFFER_SIZE (data);
608 if (GST_BUFFER_DURATION (data) != GST_CLOCK_TIME_NONE)
609 queue->cur_level.time += GST_BUFFER_DURATION (data);
612 STATUS (queue, "+ level");
614 GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue, "signalling item_add");
615 g_cond_signal (queue->item_add);
616 g_mutex_unlock (queue->qlock);
621 gst_data_unref (data);
626 gst_queue_get (GstPad *pad)
631 g_return_val_if_fail (pad != NULL, NULL);
632 g_return_val_if_fail (GST_IS_PAD (pad), NULL);
634 queue = GST_QUEUE (gst_pad_get_parent (pad));
637 /* have to lock for thread-safety */
638 GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue,
639 "locking t:%p", g_thread_self ());
640 g_mutex_lock (queue->qlock);
641 GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue,
642 "locked t:%p", g_thread_self ());
644 if (queue->queue->length == 0 ||
645 (queue->min_treshold.buffers > 0 &&
646 queue->cur_level.buffers < queue->min_treshold.buffers) ||
647 (queue->min_treshold.bytes > 0 &&
648 queue->cur_level.bytes < queue->min_treshold.bytes) ||
649 (queue->min_treshold.time > 0 &&
650 queue->cur_level.time < queue->min_treshold.time)) {
651 g_mutex_unlock (queue->qlock);
652 g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_UNDERRUN], 0);
653 g_mutex_lock (queue->qlock);
655 STATUS (queue, "pre-empty wait");
656 while (queue->queue->length == 0 ||
657 (queue->min_treshold.buffers > 0 &&
658 queue->cur_level.buffers < queue->min_treshold.buffers) ||
659 (queue->min_treshold.bytes > 0 &&
660 queue->cur_level.bytes < queue->min_treshold.bytes) ||
661 (queue->min_treshold.time > 0 &&
662 queue->cur_level.time < queue->min_treshold.time)) {
663 /* if there's a pending state change for this queue or its
664 * manager, switch back to iterator so bottom half of state
665 * change executes. */
666 if (queue->interrupt) {
667 GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue, "interrupted");
668 g_mutex_unlock (queue->qlock);
669 if (gst_scheduler_interrupt (gst_pad_get_scheduler (queue->srcpad),
670 GST_ELEMENT (queue)))
671 return GST_DATA (gst_event_new (GST_EVENT_INTERRUPT));
674 if (GST_STATE (queue) != GST_STATE_PLAYING) {
675 /* this means the other end is shut down */
676 if (!queue->may_deadlock) {
677 g_mutex_unlock (queue->qlock);
678 gst_element_error (GST_ELEMENT (queue),
679 "deadlock found, sink pad elements are shut down");
682 GST_CAT_WARNING_OBJECT (GST_CAT_DATAFLOW, queue,
683 "%s: waiting for the app to restart "
684 "source pad elements",
685 GST_ELEMENT_NAME (queue));
689 STATUS (queue, "waiting for item_add");
691 if (queue->block_timeout != GST_CLOCK_TIME_NONE) {
693 g_get_current_time (&timeout);
694 g_time_val_add (&timeout, queue->block_timeout / 1000);
695 if (!g_cond_timed_wait (queue->item_add, queue->qlock, &timeout)){
696 g_mutex_unlock (queue->qlock);
697 GST_CAT_WARNING_OBJECT (GST_CAT_DATAFLOW, queue,
698 "Sending filler event");
699 return GST_DATA (gst_event_new_filler ());
702 g_cond_wait (queue->item_add, queue->qlock);
704 STATUS (queue, "got item_add signal");
707 STATUS (queue, "post-empty wait");
708 g_mutex_unlock (queue->qlock);
709 g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_RUNNING], 0);
710 g_mutex_lock (queue->qlock);
713 /* There's something in the list now, whatever it is */
714 data = g_queue_pop_head (queue->queue);
715 GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue,
716 "retrieved data %p from queue", data);
718 if (GST_IS_BUFFER (data)) {
719 /* Update statistics */
720 queue->cur_level.buffers--;
721 queue->cur_level.bytes -= GST_BUFFER_SIZE (data);
722 if (GST_BUFFER_DURATION (data) != GST_CLOCK_TIME_NONE)
723 queue->cur_level.time -= GST_BUFFER_DURATION (data);
726 /* Now that we're done, we can lose our own reference to
727 * the item, since we're no longer in danger. */
728 gst_data_unref (data);
730 STATUS (queue, "after _get()");
732 GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue, "signalling item_del");
733 g_cond_signal (queue->item_del);
734 g_mutex_unlock (queue->qlock);
736 /* FIXME: I suppose this needs to be locked, since the EOS
737 * bit affects the pipeline state. However, that bit is
738 * locked too so it'd cause a deadlock. */
739 if (GST_IS_EVENT (data)) {
740 GstEvent *event = GST_EVENT (data);
741 switch (GST_EVENT_TYPE (event)) {
743 GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue,
745 GST_ELEMENT_NAME (queue));
746 gst_element_set_eos (GST_ELEMENT (queue));
758 gst_queue_handle_src_event (GstPad *pad,
761 GstQueue *queue = GST_QUEUE (gst_pad_get_parent (pad));
764 g_mutex_lock (queue->qlock);
766 if (gst_element_get_state (GST_ELEMENT (queue)) == GST_STATE_PLAYING) {
767 GstQueueEventResponse er;
769 /* push the event to the queue and wait for upstream consumption */
772 g_queue_push_tail (queue->events, &er);
773 GST_CAT_WARNING_OBJECT (GST_CAT_DATAFLOW, queue,
774 "Preparing for loop for event handler");
775 /* see the chain function on why this is here - it prevents a deadlock */
776 g_cond_signal (queue->item_del);
777 while (!er.handled) {
779 g_get_current_time (&timeout);
780 g_time_val_add (&timeout, 500 * 1000); /* half a second */
781 if (!g_cond_timed_wait (queue->event_done, queue->qlock, &timeout) &&
783 GST_CAT_WARNING_OBJECT (GST_CAT_DATAFLOW, queue,
784 "timeout in upstream event handling");
785 /* remove ourselves from the pending list. Since we're
786 * locked, others cannot reference this anymore. */
787 queue->queue->head = g_list_remove (queue->queue->head, &er);
788 queue->queue->head = g_list_first (queue->queue->head);
789 queue->queue->tail = g_list_last (queue->queue->head);
790 queue->queue->length--;
795 GST_CAT_WARNING_OBJECT (GST_CAT_DATAFLOW, queue,
799 res = gst_pad_event_default (pad, event);
801 switch (GST_EVENT_TYPE (event)) {
802 case GST_EVENT_FLUSH:
803 GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue,
804 "FLUSH event, flushing queue\n");
805 gst_queue_locked_flush (queue);
808 if (GST_EVENT_SEEK_FLAGS (event) & GST_SEEK_FLAG_FLUSH) {
809 gst_queue_locked_flush (queue);
816 g_mutex_unlock (queue->qlock);
822 gst_queue_release_locks (GstElement *element)
826 queue = GST_QUEUE (element);
828 g_mutex_lock (queue->qlock);
829 queue->interrupt = TRUE;
830 g_cond_signal (queue->item_add);
831 g_cond_signal (queue->item_del);
832 g_mutex_unlock (queue->qlock);
837 static GstElementStateReturn
838 gst_queue_change_state (GstElement *element)
841 GstElementStateReturn ret = GST_STATE_SUCCESS;
843 queue = GST_QUEUE (element);
845 GST_CAT_LOG_OBJECT (GST_CAT_STATES, element, "starting state change");
847 /* lock the queue so another thread (not in sync with this thread's state)
848 * can't call this queue's _get (or whatever)
850 g_mutex_lock (queue->qlock);
852 switch (GST_STATE_TRANSITION (element)) {
853 case GST_STATE_NULL_TO_READY:
854 gst_queue_locked_flush (queue);
856 case GST_STATE_PAUSED_TO_PLAYING:
857 if (!GST_PAD_IS_LINKED (queue->sinkpad)) {
858 GST_CAT_DEBUG_OBJECT (GST_CAT_STATES, queue,
859 "queue %s is not linked",
860 GST_ELEMENT_NAME (queue));
861 /* FIXME can this be? */
862 g_cond_signal (queue->item_add);
864 ret = GST_STATE_FAILURE;
867 GstScheduler *src_sched, *sink_sched;
869 src_sched = gst_pad_get_scheduler (GST_PAD (queue->srcpad));
870 sink_sched = gst_pad_get_scheduler (GST_PAD (queue->sinkpad));
872 if (src_sched == sink_sched) {
873 GST_CAT_DEBUG_OBJECT (GST_CAT_STATES, queue,
874 "queue %s does not connect different schedulers",
875 GST_ELEMENT_NAME (queue));
877 g_warning ("queue %s does not connect different schedulers",
878 GST_ELEMENT_NAME (queue));
880 ret = GST_STATE_FAILURE;
884 queue->interrupt = FALSE;
886 case GST_STATE_PAUSED_TO_READY:
887 gst_queue_locked_flush (queue);
893 if (GST_ELEMENT_CLASS (parent_class)->change_state)
894 ret = GST_ELEMENT_CLASS (parent_class)->change_state (element);
896 /* this is an ugly hack to make sure our pads are always active.
897 * Reason for this is that pad activation for the queue element
898 * depends on 2 schedulers (ugh) */
899 gst_pad_set_active (queue->sinkpad, TRUE);
900 gst_pad_set_active (queue->srcpad, TRUE);
903 g_mutex_unlock (queue->qlock);
905 GST_CAT_LOG_OBJECT (GST_CAT_STATES, element, "done with state change");
912 gst_queue_set_property (GObject *object,
917 GstQueue *queue = GST_QUEUE (object);
919 /* someone could change levels here, and since this
920 * affects the get/put funcs, we need to lock for safety. */
921 g_mutex_lock (queue->qlock);
924 case ARG_MAX_SIZE_BYTES:
925 queue->max_size.bytes = g_value_get_uint (value);
927 case ARG_MAX_SIZE_BUFFERS:
928 queue->max_size.buffers = g_value_get_uint (value);
930 case ARG_MAX_SIZE_TIME:
931 queue->max_size.time = g_value_get_uint64 (value);
933 case ARG_MIN_TRESHOLD_BYTES:
934 queue->max_size.bytes = g_value_get_uint (value);
936 case ARG_MIN_TRESHOLD_BUFFERS:
937 queue->max_size.buffers = g_value_get_uint (value);
939 case ARG_MIN_TRESHOLD_TIME:
940 queue->max_size.time = g_value_get_uint64 (value);
943 queue->leaky = g_value_get_enum (value);
945 case ARG_MAY_DEADLOCK:
946 queue->may_deadlock = g_value_get_boolean (value);
948 case ARG_BLOCK_TIMEOUT:
949 queue->block_timeout = g_value_get_uint64 (value);
952 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
956 g_mutex_unlock (queue->qlock);
960 gst_queue_get_property (GObject *object,
965 GstQueue *queue = GST_QUEUE (object);
968 case ARG_CUR_LEVEL_BYTES:
969 g_value_set_uint (value, queue->cur_level.bytes);
971 case ARG_CUR_LEVEL_BUFFERS:
972 g_value_set_uint (value, queue->cur_level.buffers);
974 case ARG_CUR_LEVEL_TIME:
975 g_value_set_uint64 (value, queue->cur_level.time);
977 case ARG_MAX_SIZE_BYTES:
978 g_value_set_uint (value, queue->max_size.bytes);
980 case ARG_MAX_SIZE_BUFFERS:
981 g_value_set_uint (value, queue->max_size.buffers);
983 case ARG_MAX_SIZE_TIME:
984 g_value_set_uint64 (value, queue->max_size.time);
986 case ARG_MIN_TRESHOLD_BYTES:
987 g_value_set_uint (value, queue->min_treshold.bytes);
989 case ARG_MIN_TRESHOLD_BUFFERS:
990 g_value_set_uint (value, queue->min_treshold.buffers);
992 case ARG_MIN_TRESHOLD_TIME:
993 g_value_set_uint64 (value, queue->min_treshold.time);
996 g_value_set_enum (value, queue->leaky);
998 case ARG_MAY_DEADLOCK:
999 g_value_set_boolean (value, queue->may_deadlock);
1001 case ARG_BLOCK_TIMEOUT:
1002 g_value_set_uint64 (value, queue->block_timeout);
1005 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);