2 * Copyright (C) 1999,2000 Erik Walthinsen <omega@cse.ogi.edu>
3 * 2000 Wim Taymans <wtay@chello.be>
4 * 2003 Colin Walters <cwalters@gnome.org>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Library General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Library General Public License for more details.
18 * You should have received a copy of the GNU Library General Public
19 * License along with this library; if not, write to the
20 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
21 * Boston, MA 02111-1307, USA.
25 #include "gst_private.h"
28 #include "gstscheduler.h"
32 static GstElementDetails gst_queue_details = GST_ELEMENT_DETAILS (
36 "Erik Walthinsen <omega@cse.ogi.edu>"
40 /* Queue signals and args */
50 /* FIXME: don't we have another way of doing this
51 * "Gstreamer format" (frame/byte/time) queries? */
52 ARG_CUR_LEVEL_BUFFERS,
58 ARG_MIN_TRESHOLD_BUFFERS,
59 ARG_MIN_TRESHOLD_BYTES,
60 ARG_MIN_TRESHOLD_TIME,
67 typedef struct _GstQueueEventResponse {
69 gboolean ret, handled;
70 } GstQueueEventResponse;
72 static void gst_queue_base_init (GstQueueClass *klass);
73 static void gst_queue_class_init (GstQueueClass *klass);
74 static void gst_queue_init (GstQueue *queue);
75 static void gst_queue_dispose (GObject *object);
77 static void gst_queue_set_property (GObject *object,
81 static void gst_queue_get_property (GObject *object,
86 static void gst_queue_chain (GstPad *pad,
88 static GstData *gst_queue_get (GstPad *pad);
90 static gboolean gst_queue_handle_src_event (GstPad *pad,
93 static void gst_queue_locked_flush (GstQueue *queue);
95 static GstElementStateReturn
96 gst_queue_change_state (GstElement *element);
97 static gboolean gst_queue_release_locks (GstElement *element);
100 #define GST_TYPE_QUEUE_LEAKY (queue_leaky_get_type ())
103 queue_leaky_get_type (void)
105 static GType queue_leaky_type = 0;
106 static GEnumValue queue_leaky[] = {
107 { GST_QUEUE_NO_LEAK, "0", "Not Leaky" },
108 { GST_QUEUE_LEAK_UPSTREAM, "1", "Leaky on Upstream" },
109 { GST_QUEUE_LEAK_DOWNSTREAM, "2", "Leaky on Downstream" },
112 if (!queue_leaky_type) {
113 queue_leaky_type = g_enum_register_static("GstQueueLeaky", queue_leaky);
115 return queue_leaky_type;
118 static GstElementClass *parent_class = NULL;
119 static guint gst_queue_signals[LAST_SIGNAL] = { 0 };
122 gst_queue_get_type (void)
124 static GType queue_type = 0;
127 static const GTypeInfo queue_info = {
128 sizeof (GstQueueClass),
129 (GBaseInitFunc) gst_queue_base_init,
131 (GClassInitFunc) gst_queue_class_init,
136 (GInstanceInitFunc) gst_queue_init,
140 queue_type = g_type_register_static (GST_TYPE_ELEMENT,
141 "GstQueue", &queue_info, 0);
148 gst_queue_base_init (GstQueueClass *klass)
150 GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
152 gst_element_class_set_details (gstelement_class, &gst_queue_details);
156 gst_queue_class_init (GstQueueClass *klass)
158 GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
159 GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
161 parent_class = g_type_class_peek_parent (klass);
164 gst_queue_signals[SIGNAL_UNDERRUN] =
165 g_signal_new ("underrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
166 G_STRUCT_OFFSET (GstQueueClass, underrun), NULL, NULL,
167 g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0);
168 gst_queue_signals[SIGNAL_RUNNING] =
169 g_signal_new ("running", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
170 G_STRUCT_OFFSET (GstQueueClass, running), NULL, NULL,
171 g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0);
172 gst_queue_signals[SIGNAL_OVERRUN] =
173 g_signal_new ("overrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
174 G_STRUCT_OFFSET (GstQueueClass, overrun), NULL, NULL,
175 g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0);
178 g_object_class_install_property (gobject_class, ARG_CUR_LEVEL_BYTES,
179 g_param_spec_uint ("current-level-bytes", "Current level (kB)",
180 "Current amount of data in the queue (bytes)",
181 0, G_MAXUINT, 0, G_PARAM_READABLE));
182 g_object_class_install_property (gobject_class, ARG_CUR_LEVEL_BUFFERS,
183 g_param_spec_uint ("current-level-buffers", "Current level (buffers)",
184 "Current number of buffers in the queue",
185 0, G_MAXUINT, 0, G_PARAM_READABLE));
186 g_object_class_install_property (gobject_class, ARG_CUR_LEVEL_TIME,
187 g_param_spec_uint64 ("current-level-time", "Current level (ns)",
188 "Current amount of data in the queue (in ns)",
189 0, G_MAXUINT64, 0, G_PARAM_READABLE));
191 g_object_class_install_property (gobject_class, ARG_MAX_SIZE_BYTES,
192 g_param_spec_uint ("max-size-bytes", "Max. size (kB)",
193 "Max. amount of data in the queue (bytes, 0=disable)",
194 0, G_MAXUINT, 0, G_PARAM_READWRITE));
195 g_object_class_install_property (gobject_class, ARG_MAX_SIZE_BUFFERS,
196 g_param_spec_uint ("max-size-buffers", "Max. size (buffers)",
197 "Max. number of buffers in the queue (0=disable)",
198 0, G_MAXUINT, 0, G_PARAM_READWRITE));
199 g_object_class_install_property (gobject_class, ARG_MAX_SIZE_TIME,
200 g_param_spec_uint64 ("max-size-time", "Max. size (ns)",
201 "Max. amount of data in the queue (in ns, 0=disable)",
202 0, G_MAXUINT64, 0, G_PARAM_READWRITE));
204 g_object_class_install_property (gobject_class, ARG_MIN_TRESHOLD_BYTES,
205 g_param_spec_uint ("min-treshold-bytes", "Min. treshold (kB)",
206 "Min. amount of data in the queue to allow reading (bytes, 0=disable)",
207 0, G_MAXUINT, 0, G_PARAM_READWRITE));
208 g_object_class_install_property (gobject_class, ARG_MIN_TRESHOLD_BUFFERS,
209 g_param_spec_uint ("min-treshold-buffers", "Min. treshold (buffers)",
210 "Min. number of buffers in the queue to allow reading (0=disable)",
211 0, G_MAXUINT, 0, G_PARAM_READWRITE));
212 g_object_class_install_property (gobject_class, ARG_MIN_TRESHOLD_TIME,
213 g_param_spec_uint64 ("min-treshold-time", "Min. treshold (ns)",
214 "Min. amount of data in the queue to allow reading (in ns, 0=disable)",
215 0, G_MAXUINT64, 0, G_PARAM_READWRITE));
217 g_object_class_install_property (gobject_class, ARG_LEAKY,
218 g_param_spec_enum ("leaky", "Leaky",
219 "Where the queue leaks, if at all",
220 GST_TYPE_QUEUE_LEAKY, GST_QUEUE_NO_LEAK, G_PARAM_READWRITE));
221 g_object_class_install_property (gobject_class, ARG_MAY_DEADLOCK,
222 g_param_spec_boolean ("may_deadlock", "May Deadlock",
223 "The queue may deadlock if it's full and not PLAYING",
224 TRUE, G_PARAM_READWRITE));
225 g_object_class_install_property (gobject_class, ARG_BLOCK_TIMEOUT,
226 g_param_spec_uint64 ("block_timeout", "Timeout for Block",
227 "Nanoseconds until blocked queue times out and returns filler event. "
228 "Value of -1 disables timeout",
229 0, G_MAXUINT64, -1, G_PARAM_READWRITE));
231 /* set several parent class virtual functions */
232 gobject_class->dispose = GST_DEBUG_FUNCPTR (gst_queue_dispose);
233 gobject_class->set_property = GST_DEBUG_FUNCPTR (gst_queue_set_property);
234 gobject_class->get_property = GST_DEBUG_FUNCPTR (gst_queue_get_property);
236 gstelement_class->change_state = GST_DEBUG_FUNCPTR (gst_queue_change_state);
237 gstelement_class->release_locks = GST_DEBUG_FUNCPTR (gst_queue_release_locks);
241 gst_queue_init (GstQueue *queue)
243 /* scheduling on this kind of element is, well, interesting */
244 GST_FLAG_SET (queue, GST_ELEMENT_DECOUPLED);
245 GST_FLAG_SET (queue, GST_ELEMENT_EVENT_AWARE);
247 queue->sinkpad = gst_pad_new ("sink", GST_PAD_SINK);
248 gst_pad_set_chain_function (queue->sinkpad, GST_DEBUG_FUNCPTR (gst_queue_chain));
249 gst_element_add_pad (GST_ELEMENT (queue), queue->sinkpad);
250 gst_pad_set_link_function (queue->sinkpad, GST_DEBUG_FUNCPTR (gst_pad_proxy_pad_link));
251 gst_pad_set_getcaps_function (queue->sinkpad, GST_DEBUG_FUNCPTR (gst_pad_proxy_getcaps));
252 gst_pad_set_active (queue->sinkpad, TRUE);
254 queue->srcpad = gst_pad_new ("src", GST_PAD_SRC);
255 gst_pad_set_get_function (queue->srcpad, GST_DEBUG_FUNCPTR (gst_queue_get));
256 gst_element_add_pad (GST_ELEMENT (queue), queue->srcpad);
257 gst_pad_set_link_function (queue->srcpad, GST_DEBUG_FUNCPTR (gst_pad_proxy_pad_link));
258 gst_pad_set_getcaps_function (queue->srcpad, GST_DEBUG_FUNCPTR (gst_pad_proxy_getcaps));
259 gst_pad_set_event_function (queue->srcpad, GST_DEBUG_FUNCPTR (gst_queue_handle_src_event));
260 gst_pad_set_active (queue->srcpad, TRUE);
262 queue->cur_level.buffers = 0; /* no content */
263 queue->cur_level.bytes = 0; /* no content */
264 queue->cur_level.time = 0; /* no content */
265 queue->max_size.buffers = 250; /* high limit */
266 queue->max_size.bytes = 0; /* unlimited */
267 queue->max_size.time = 0; /* unlimited */
268 queue->min_treshold.buffers = 0; /* no treshold */
269 queue->min_treshold.bytes = 0; /* no treshold */
270 queue->min_treshold.time = 0; /* no treshold */
272 queue->leaky = GST_QUEUE_NO_LEAK;
273 queue->may_deadlock = TRUE;
274 queue->block_timeout = GST_CLOCK_TIME_NONE;
275 queue->interrupt = FALSE;
276 queue->flush = FALSE;
278 queue->qlock = g_mutex_new ();
279 queue->item_add = g_cond_new ();
280 queue->item_del = g_cond_new ();
281 queue->event_done = g_cond_new ();
282 queue->events = g_queue_new ();
283 queue->queue = g_queue_new ();
285 GST_CAT_DEBUG_OBJECT (GST_CAT_THREAD, queue,
286 "initialized queue's not_empty & not_full conditions");
290 gst_queue_dispose (GObject *object)
292 GstQueue *queue = GST_QUEUE (object);
294 gst_element_set_state (GST_ELEMENT (queue), GST_STATE_NULL);
296 while (!g_queue_is_empty (queue->queue)) {
297 GstData *data = g_queue_pop_head (queue->queue);
298 gst_data_unref (data);
300 g_queue_free (queue->queue);
301 g_mutex_free (queue->qlock);
302 g_cond_free (queue->item_add);
303 g_cond_free (queue->item_del);
304 g_cond_free (queue->event_done);
305 while (!g_queue_is_empty (queue->events)) {
306 GstEvent *event = g_queue_pop_head (queue->events);
307 gst_event_unref (event);
310 if (G_OBJECT_CLASS (parent_class)->dispose)
311 G_OBJECT_CLASS (parent_class)->dispose (object);
315 gst_queue_locked_flush (GstQueue *queue)
317 while (!g_queue_is_empty (queue->queue)) {
318 GstData *data = g_queue_pop_head (queue->queue);
319 gst_data_unref (data);
321 queue->timeval = NULL;
322 queue->cur_level.buffers = 0;
323 queue->cur_level.bytes = 0;
324 queue->cur_level.time = 0;
326 /* make sure any pending buffers to be added are flushed too */
329 /* we deleted something... */
330 g_cond_signal (queue->item_del);
334 gst_queue_handle_pending_events (GstQueue *queue)
336 /* check for events to send upstream */
337 while (!g_queue_is_empty (queue->events)){
338 GstQueueEventResponse *er = g_queue_pop_head (queue->events);
339 GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue, "sending event upstream");
340 er->ret = gst_pad_event_default (queue->srcpad, er->event);
342 g_cond_signal (queue->event_done);
343 GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue, "event sent");
347 #define STATUS(queue, msg) \
348 GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue, \
349 "(%s:%s) " msg ": %u of %u-%u buffers, %u of %u-%u " \
350 "bytes, %" G_GUINT64_FORMAT " of %" G_GUINT64_FORMAT \
351 "-%" G_GUINT64_FORMAT " ns, %u elements", \
352 GST_DEBUG_PAD_NAME (pad), \
353 queue->cur_level.buffers, \
354 queue->min_treshold.buffers, \
355 queue->max_size.buffers, \
356 queue->cur_level.bytes, \
357 queue->min_treshold.bytes, \
358 queue->max_size.bytes, \
359 queue->cur_level.time, \
360 queue->min_treshold.time, \
361 queue->max_size.time, \
362 queue->queue->length)
365 gst_queue_chain (GstPad *pad,
370 g_return_if_fail (pad != NULL);
371 g_return_if_fail (GST_IS_PAD (pad));
372 g_return_if_fail (data != NULL);
374 queue = GST_QUEUE (GST_OBJECT_PARENT (pad));
377 /* we have to lock the queue since we span threads */
378 GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue, "locking t:%p", g_thread_self ());
379 g_mutex_lock (queue->qlock);
380 GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue, "locked t:%p", g_thread_self ());
382 gst_queue_handle_pending_events (queue);
384 /* assume don't need to flush this buffer when the queue is filled */
385 queue->flush = FALSE;
387 if (GST_IS_EVENT (data)) {
388 switch (GST_EVENT_TYPE (data)) {
389 case GST_EVENT_FLUSH:
390 STATUS (queue, "received flush event");
391 gst_queue_locked_flush (queue);
392 STATUS (queue, "after flush");
395 STATUS (queue, "received EOS");
398 /* we put the event in the queue, we don't have to act ourselves */
399 GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue,
400 "adding event %p of type %d",
401 data, GST_EVENT_TYPE (data));
406 if (GST_IS_BUFFER (data))
407 GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue,
408 "adding buffer %p of size %d",
409 data, GST_BUFFER_SIZE (data));
411 /* We make space available if we're "full" according to whatever
412 * the user defined as "full". Note that this only applies to buffers.
413 * We always handle events and they don't count in our statistics. */
414 if (GST_IS_BUFFER (data) &&
415 ((queue->max_size.buffers > 0 &&
416 queue->cur_level.buffers >= queue->max_size.buffers) ||
417 (queue->max_size.bytes > 0 &&
418 queue->cur_level.bytes >= queue->max_size.bytes) ||
419 (queue->max_size.time > 0 &&
420 queue->cur_level.time >= queue->max_size.time))) {
421 g_mutex_unlock (queue->qlock);
422 g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_OVERRUN], 0);
423 g_mutex_lock (queue->qlock);
425 /* how are we going to make space for this buffer? */
426 switch (queue->leaky) {
427 /* leak current buffer */
428 case GST_QUEUE_LEAK_UPSTREAM:
429 GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue,
430 "queue is full, leaking buffer on upstream end");
431 /* now we can clean up and exit right away */
432 g_mutex_unlock (queue->qlock);
435 /* leak first buffer in the queue */
436 case GST_QUEUE_LEAK_DOWNSTREAM: {
437 /* this is a bit hacky. We'll manually iterate the list
438 * and find the first buffer from the head on. We'll
439 * unref that and "fix up" the GQueue object... */
441 GstData *leak = NULL;
443 GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue,
444 "queue is full, leaking buffer on downstream end");
446 for (item = queue->queue->head; item != NULL; item = item->next) {
447 if (GST_IS_BUFFER (item->data)) {
453 /* if we didn't find anything, it means we have no buffers
454 * in here. That cannot happen, since we had >= 1 bufs */
457 /* Now remove it from the list, fixing up the GQueue
458 * CHECKME: is a queue->head the first or the last item? */
459 item = g_list_delete_link (queue->queue->head, item);
460 queue->queue->head = g_list_first (item);
461 queue->queue->tail = g_list_last (item);
462 queue->queue->length--;
464 /* and unref the data at the end. Twice, because we keep a ref
465 * to make things read-only. Also keep our list uptodate. */
466 queue->cur_level.bytes -= GST_BUFFER_SIZE (data);
467 queue->cur_level.buffers --;
468 if (GST_BUFFER_DURATION (data) != GST_CLOCK_TIME_NONE)
469 queue->cur_level.time -= GST_BUFFER_DURATION (data);
471 gst_data_unref (data);
472 gst_data_unref (data);
477 g_warning ("Unknown leaky type, using default");
480 /* don't leak. Instead, wait for space to be available */
481 case GST_QUEUE_NO_LEAK:
482 STATUS (queue, "pre-full wait");
484 while ((queue->max_size.buffers > 0 &&
485 queue->cur_level.buffers >= queue->max_size.buffers) ||
486 (queue->max_size.bytes > 0 &&
487 queue->cur_level.bytes >= queue->max_size.bytes) ||
488 (queue->max_size.time > 0 &&
489 queue->cur_level.time >= queue->max_size.time)) {
490 /* if there's a pending state change for this queue
491 * or its manager, switch back to iterator so bottom
492 * half of state change executes */
493 if (queue->interrupt) {
494 GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue, "interrupted");
495 g_mutex_unlock (queue->qlock);
496 if (gst_scheduler_interrupt (gst_pad_get_scheduler (queue->sinkpad),
497 GST_ELEMENT (queue))) {
500 /* if we got here because we were unlocked after a
501 * flush, we don't need to add the buffer to the
504 GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue,
505 "not adding pending buffer after flush");
508 GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue,
509 "adding pending buffer after interrupt");
513 if (GST_STATE (queue) != GST_STATE_PLAYING) {
514 /* this means the other end is shut down. Try to
515 * signal to resolve the error */
516 if (!queue->may_deadlock) {
517 g_mutex_unlock (queue->qlock);
518 gst_data_unref (data);
519 gst_element_error (GST_ELEMENT (queue),
520 "deadlock found, source pad elements are shut down");
521 /* we don't go to out_unref here, since we want to
522 * unref the buffer *before* calling gst_element_error */
525 GST_CAT_WARNING_OBJECT (GST_CAT_DATAFLOW, queue,
526 "%s: waiting for the app to restart "
527 "source pad elements",
528 GST_ELEMENT_NAME (queue));
532 /* OK, we've got a serious issue here. Imagine the situation
533 * where the puller (next element) is sending an event here,
534 * so it cannot pull events from the queue, and we cannot
535 * push data further because the queue is 'full' and therefore,
536 * we wait here (and do not handle events): deadlock! to solve
537 * that, we handle pending upstream events here, too. */
538 gst_queue_handle_pending_events (queue);
540 STATUS (queue, "waiting for item_del signal");
541 g_cond_wait (queue->item_del, queue->qlock);
542 STATUS (queue, "received item_del signal");
545 STATUS (queue, "post-full wait");
546 g_mutex_unlock (queue->qlock);
547 g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_RUNNING], 0);
548 g_mutex_lock (queue->qlock);
553 /* put the buffer on the tail of the list. We keep a reference,
554 * so that the data is read-only while in here. There's a good
555 * reason to do so: we have a size and time counter, and any
556 * modification to the content could change any of the two. */
558 g_queue_push_tail (queue->queue, data);
560 /* Note that we only add buffers (not events) to the statistics */
561 if (GST_IS_BUFFER (data)) {
562 queue->cur_level.buffers++;
563 queue->cur_level.bytes += GST_BUFFER_SIZE (data);
564 if (GST_BUFFER_DURATION (data) != GST_CLOCK_TIME_NONE)
565 queue->cur_level.time += GST_BUFFER_DURATION (data);
568 STATUS (queue, "+ level");
570 GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue, "signalling item_add");
571 g_cond_signal (queue->item_add);
572 g_mutex_unlock (queue->qlock);
577 gst_data_unref (data);
582 gst_queue_get (GstPad *pad)
587 g_return_val_if_fail (pad != NULL, NULL);
588 g_return_val_if_fail (GST_IS_PAD (pad), NULL);
590 queue = GST_QUEUE (gst_pad_get_parent (pad));
593 /* have to lock for thread-safety */
594 GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue,
595 "locking t:%p", g_thread_self ());
596 g_mutex_lock (queue->qlock);
597 GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue,
598 "locked t:%p", g_thread_self ());
600 if (queue->queue->length == 0 ||
601 (queue->min_treshold.buffers > 0 &&
602 queue->cur_level.buffers < queue->min_treshold.buffers) ||
603 (queue->min_treshold.bytes > 0 &&
604 queue->cur_level.bytes < queue->min_treshold.bytes) ||
605 (queue->min_treshold.time > 0 &&
606 queue->cur_level.time < queue->min_treshold.time)) {
607 g_mutex_unlock (queue->qlock);
608 g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_UNDERRUN], 0);
609 g_mutex_lock (queue->qlock);
611 STATUS (queue, "pre-empty wait");
612 while (queue->queue->length == 0 ||
613 (queue->min_treshold.buffers > 0 &&
614 queue->cur_level.buffers < queue->min_treshold.buffers) ||
615 (queue->min_treshold.bytes > 0 &&
616 queue->cur_level.bytes < queue->min_treshold.bytes) ||
617 (queue->min_treshold.time > 0 &&
618 queue->cur_level.time < queue->min_treshold.time)) {
619 /* if there's a pending state change for this queue or its
620 * manager, switch back to iterator so bottom half of state
621 * change executes. */
622 if (queue->interrupt) {
623 GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue, "interrupted");
624 g_mutex_unlock (queue->qlock);
625 if (gst_scheduler_interrupt (gst_pad_get_scheduler (queue->srcpad),
626 GST_ELEMENT (queue)))
627 return GST_DATA (gst_event_new (GST_EVENT_INTERRUPT));
630 if (GST_STATE (queue) != GST_STATE_PLAYING) {
631 /* this means the other end is shut down */
632 if (!queue->may_deadlock) {
633 g_mutex_unlock (queue->qlock);
634 gst_element_error (GST_ELEMENT (queue),
635 "deadlock found, sink pad elements are shut down");
638 GST_CAT_WARNING_OBJECT (GST_CAT_DATAFLOW, queue,
639 "%s: waiting for the app to restart "
640 "source pad elements",
641 GST_ELEMENT_NAME (queue));
645 STATUS (queue, "waiting for item_add");
647 if (queue->block_timeout != GST_CLOCK_TIME_NONE) {
649 g_get_current_time (&timeout);
650 g_time_val_add (&timeout, queue->block_timeout / 1000);
651 if (!g_cond_timed_wait (queue->item_add, queue->qlock, &timeout)){
652 g_mutex_unlock (queue->qlock);
653 GST_CAT_WARNING_OBJECT (GST_CAT_DATAFLOW, queue,
654 "Sending filler event");
655 return GST_DATA (gst_event_new_filler ());
658 g_cond_wait (queue->item_add, queue->qlock);
660 STATUS (queue, "got item_add signal");
663 STATUS (queue, "post-empty wait");
664 g_mutex_unlock (queue->qlock);
665 g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_RUNNING], 0);
666 g_mutex_lock (queue->qlock);
669 /* There's something in the list now, whatever it is */
670 data = g_queue_pop_head (queue->queue);
671 GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue,
672 "retrieved data %p from queue", data);
674 if (GST_IS_BUFFER (data)) {
675 /* Update statistics */
676 queue->cur_level.buffers--;
677 queue->cur_level.bytes -= GST_BUFFER_SIZE (data);
678 if (GST_BUFFER_DURATION (data) != GST_CLOCK_TIME_NONE)
679 queue->cur_level.time -= GST_BUFFER_DURATION (data);
682 /* Now that we're done, we can lose our own reference to
683 * the item, since we're no longer in danger. */
684 gst_data_unref (data);
686 STATUS (queue, "after _get()");
688 GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue, "signalling item_del");
689 g_cond_signal (queue->item_del);
690 g_mutex_unlock (queue->qlock);
692 /* FIXME: I suppose this needs to be locked, since the EOS
693 * bit affects the pipeline state. However, that bit is
694 * locked too so it'd cause a deadlock. */
695 if (GST_IS_EVENT (data)) {
696 GstEvent *event = GST_EVENT (data);
697 switch (GST_EVENT_TYPE (event)) {
699 GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue,
701 GST_ELEMENT_NAME (queue));
702 gst_element_set_eos (GST_ELEMENT (queue));
714 gst_queue_handle_src_event (GstPad *pad,
717 GstQueue *queue = GST_QUEUE (gst_pad_get_parent (pad));
720 g_mutex_lock (queue->qlock);
722 if (gst_element_get_state (GST_ELEMENT (queue)) == GST_STATE_PLAYING) {
723 GstQueueEventResponse er;
725 /* push the event to the queue and wait for upstream consumption */
728 g_queue_push_tail (queue->events, &er);
729 GST_CAT_WARNING_OBJECT (GST_CAT_DATAFLOW, queue,
730 "Preparing for loop for event handler");
731 /* see the chain function on why this is here - it prevents a deadlock */
732 g_cond_signal (queue->item_del);
733 while (!er.handled) {
735 g_get_current_time (&timeout);
736 g_time_val_add (&timeout, 500 * 1000); /* half a second */
737 if (!g_cond_timed_wait (queue->event_done, queue->qlock, &timeout) &&
739 GST_CAT_WARNING_OBJECT (GST_CAT_DATAFLOW, queue,
740 "timeout in upstream event handling");
741 /* remove ourselves from the pending list. Since we're
742 * locked, others cannot reference this anymore. */
743 queue->queue->head = g_list_remove (queue->queue->head, &er);
744 queue->queue->head = g_list_first (queue->queue->head);
745 queue->queue->tail = g_list_last (queue->queue->head);
746 queue->queue->length--;
751 GST_CAT_WARNING_OBJECT (GST_CAT_DATAFLOW, queue,
755 res = gst_pad_event_default (pad, event);
757 switch (GST_EVENT_TYPE (event)) {
758 case GST_EVENT_FLUSH:
759 GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue,
760 "FLUSH event, flushing queue\n");
761 gst_queue_locked_flush (queue);
764 if (GST_EVENT_SEEK_FLAGS (event) & GST_SEEK_FLAG_FLUSH) {
765 gst_queue_locked_flush (queue);
772 g_mutex_unlock (queue->qlock);
778 gst_queue_release_locks (GstElement *element)
782 queue = GST_QUEUE (element);
784 g_mutex_lock (queue->qlock);
785 queue->interrupt = TRUE;
786 g_cond_signal (queue->item_add);
787 g_cond_signal (queue->item_del);
788 g_mutex_unlock (queue->qlock);
793 static GstElementStateReturn
794 gst_queue_change_state (GstElement *element)
797 GstElementStateReturn ret = GST_STATE_SUCCESS;
799 queue = GST_QUEUE (element);
801 GST_CAT_LOG_OBJECT (GST_CAT_STATES, element, "starting state change");
803 /* lock the queue so another thread (not in sync with this thread's state)
804 * can't call this queue's _get (or whatever)
806 g_mutex_lock (queue->qlock);
808 switch (GST_STATE_TRANSITION (element)) {
809 case GST_STATE_NULL_TO_READY:
810 gst_queue_locked_flush (queue);
812 case GST_STATE_PAUSED_TO_PLAYING:
813 if (!GST_PAD_IS_LINKED (queue->sinkpad)) {
814 GST_CAT_DEBUG_OBJECT (GST_CAT_STATES, queue,
815 "queue %s is not linked",
816 GST_ELEMENT_NAME (queue));
817 /* FIXME can this be? */
818 g_cond_signal (queue->item_add);
820 ret = GST_STATE_FAILURE;
823 GstScheduler *src_sched, *sink_sched;
825 src_sched = gst_pad_get_scheduler (GST_PAD (queue->srcpad));
826 sink_sched = gst_pad_get_scheduler (GST_PAD (queue->sinkpad));
828 if (src_sched == sink_sched) {
829 GST_CAT_DEBUG_OBJECT (GST_CAT_STATES, queue,
830 "queue %s does not connect different schedulers",
831 GST_ELEMENT_NAME (queue));
833 g_warning ("queue %s does not connect different schedulers",
834 GST_ELEMENT_NAME (queue));
836 ret = GST_STATE_FAILURE;
840 queue->interrupt = FALSE;
842 case GST_STATE_PAUSED_TO_READY:
843 gst_queue_locked_flush (queue);
849 if (GST_ELEMENT_CLASS (parent_class)->change_state)
850 ret = GST_ELEMENT_CLASS (parent_class)->change_state (element);
852 /* this is an ugly hack to make sure our pads are always active.
853 * Reason for this is that pad activation for the queue element
854 * depends on 2 schedulers (ugh) */
855 gst_pad_set_active (queue->sinkpad, TRUE);
856 gst_pad_set_active (queue->srcpad, TRUE);
859 g_mutex_unlock (queue->qlock);
861 GST_CAT_LOG_OBJECT (GST_CAT_STATES, element, "done with state change");
868 gst_queue_set_property (GObject *object,
873 GstQueue *queue = GST_QUEUE (object);
875 /* someone could change levels here, and since this
876 * affects the get/put funcs, we need to lock for safety. */
877 g_mutex_lock (queue->qlock);
880 case ARG_MAX_SIZE_BYTES:
881 queue->max_size.bytes = g_value_get_uint (value);
883 case ARG_MAX_SIZE_BUFFERS:
884 queue->max_size.buffers = g_value_get_uint (value);
886 case ARG_MAX_SIZE_TIME:
887 queue->max_size.time = g_value_get_uint64 (value);
889 case ARG_MIN_TRESHOLD_BYTES:
890 queue->max_size.bytes = g_value_get_uint (value);
892 case ARG_MIN_TRESHOLD_BUFFERS:
893 queue->max_size.buffers = g_value_get_uint (value);
895 case ARG_MIN_TRESHOLD_TIME:
896 queue->max_size.time = g_value_get_uint64 (value);
899 queue->leaky = g_value_get_enum (value);
901 case ARG_MAY_DEADLOCK:
902 queue->may_deadlock = g_value_get_boolean (value);
904 case ARG_BLOCK_TIMEOUT:
905 queue->block_timeout = g_value_get_uint64 (value);
908 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
912 g_mutex_unlock (queue->qlock);
916 gst_queue_get_property (GObject *object,
921 GstQueue *queue = GST_QUEUE (object);
924 case ARG_CUR_LEVEL_BYTES:
925 g_value_set_uint (value, queue->cur_level.bytes);
927 case ARG_CUR_LEVEL_BUFFERS:
928 g_value_set_uint (value, queue->cur_level.buffers);
930 case ARG_CUR_LEVEL_TIME:
931 g_value_set_uint64 (value, queue->cur_level.time);
933 case ARG_MAX_SIZE_BYTES:
934 g_value_set_uint (value, queue->max_size.bytes);
936 case ARG_MAX_SIZE_BUFFERS:
937 g_value_set_uint (value, queue->max_size.buffers);
939 case ARG_MAX_SIZE_TIME:
940 g_value_set_uint64 (value, queue->max_size.time);
942 case ARG_MIN_TRESHOLD_BYTES:
943 g_value_set_uint (value, queue->min_treshold.bytes);
945 case ARG_MIN_TRESHOLD_BUFFERS:
946 g_value_set_uint (value, queue->min_treshold.buffers);
948 case ARG_MIN_TRESHOLD_TIME:
949 g_value_set_uint64 (value, queue->min_treshold.time);
952 g_value_set_enum (value, queue->leaky);
954 case ARG_MAY_DEADLOCK:
955 g_value_set_boolean (value, queue->may_deadlock);
957 case ARG_BLOCK_TIMEOUT:
958 g_value_set_uint64 (value, queue->block_timeout);
961 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);