2 * Copyright (C) 2014 Mathieu Duponchelle <mathieu.duponchelle@opencreed.com>
3 * Copyright (C) 2014 Thibault Saunier <tsaunier@gnome.org>
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Library General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Library General Public License for more details.
17 * You should have received a copy of the GNU Library General Public
18 * License along with this library; if not, write to the
19 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
20 * Boston, MA 02110-1301, USA.
23 * SECTION: gstaggregator
24 * @short_description: manages a set of pads with the purpose of
25 * aggregating their buffers.
26 * @see_also: gstcollectpads for historical reasons.
28 * Manages a set of pads with the purpose of aggregating their buffers.
29 * Control is given to the subclass when all pads have data.
32 * Base class for mixers and muxers. Implementers should at least implement
33 * the aggregate () vmethod.
36 * When data is queued on all pads, tha aggregate vmethod is called.
39 * One can peek at the data on any given GstAggregatorPad with the
40 * gst_aggregator_pad_get_buffer () method, and take ownership of it
41 * with the gst_aggregator_pad_steal_buffer () method. When a buffer
42 * has been taken with steal_buffer (), a new buffer can be queued
46 * If the subclass wishes to push a buffer downstream in its aggregate
47 * implementation, it should do so through the
48 * gst_aggregator_finish_buffer () method. This method will take care
49 * of sending and ordering mandatory events such as stream start, caps
53 * Same goes for EOS events, which should not be pushed directly by the
54 * subclass, it should instead return GST_FLOW_EOS in its aggregate
64 #include <string.h> /* strlen */
66 #include "gstaggregator.h"
69 /* Might become API */
70 static void gst_aggregator_merge_tags (GstAggregator * aggregator,
71 const GstTagList * tags, GstTagMergeMode mode);
72 static void gst_aggregator_set_timeout (GstAggregator * agg, gint64 timeout);
73 static gint64 gst_aggregator_get_timeout (GstAggregator * agg);
76 GST_DEBUG_CATEGORY_STATIC (aggregator_debug);
77 #define GST_CAT_DEFAULT aggregator_debug
79 /* GstAggregatorPad definitions */
80 #define PAD_LOCK_EVENT(pad) G_STMT_START { \
81 GST_LOG_OBJECT (pad, "Taking EVENT lock from thread %p", \
83 g_mutex_lock(&pad->priv->event_lock); \
84 GST_LOG_OBJECT (pad, "Took EVENT lock from thread %p", \
88 #define PAD_UNLOCK_EVENT(pad) G_STMT_START { \
89 GST_LOG_OBJECT (pad, "Releasing EVENT lock from thread %p", \
91 g_mutex_unlock(&pad->priv->event_lock); \
92 GST_LOG_OBJECT (pad, "Release EVENT lock from thread %p", \
97 #define PAD_WAIT_EVENT(pad) G_STMT_START { \
98 GST_LOG_OBJECT (pad, "Waiting for EVENT on thread %p", \
100 g_cond_wait(&(((GstAggregatorPad* )pad)->priv->event_cond), \
101 &(pad->priv->event_lock)); \
102 GST_LOG_OBJECT (pad, "DONE Waiting for EVENT on thread %p", \
106 #define PAD_BROADCAST_EVENT(pad) { \
107 GST_LOG_OBJECT (pad, "Signaling EVENT from thread %p", \
109 g_cond_broadcast(&(((GstAggregatorPad* )pad)->priv->event_cond)); \
112 #define GST_AGGREGATOR_SETCAPS_LOCK(self) G_STMT_START { \
113 GST_LOG_OBJECT (self, "Taking SETCAPS lock from thread %p", \
115 g_mutex_lock(&self->priv->setcaps_lock); \
116 GST_LOG_OBJECT (self, "Took SETCAPS lock from thread %p", \
120 #define GST_AGGREGATOR_SETCAPS_UNLOCK(self) G_STMT_START { \
121 GST_LOG_OBJECT (self, "Releasing SETCAPS lock from thread %p", \
123 g_mutex_unlock(&self->priv->setcaps_lock); \
124 GST_LOG_OBJECT (self, "Took SETCAPS lock from thread %p", \
128 #define PAD_STREAM_LOCK(pad) G_STMT_START { \
129 GST_LOG_OBJECT (pad, "Taking lock from thread %p", \
131 g_mutex_lock(&pad->priv->stream_lock); \
132 GST_LOG_OBJECT (pad, "Took lock from thread %p", \
136 #define PAD_STREAM_UNLOCK(pad) G_STMT_START { \
137 GST_LOG_OBJECT (pad, "Releasing lock from thread %p", \
139 g_mutex_unlock(&pad->priv->stream_lock); \
140 GST_LOG_OBJECT (pad, "Release lock from thread %p", \
144 struct _GstAggregatorPadPrivate
146 gboolean pending_flush_start;
147 gboolean pending_flush_stop;
148 gboolean pending_eos;
151 GstClockID timeout_id;
160 _aggpad_flush (GstAggregatorPad * aggpad, GstAggregator * agg)
162 GstAggregatorPadClass *klass = GST_AGGREGATOR_PAD_GET_CLASS (aggpad);
165 aggpad->priv->flushing = FALSE;
168 return klass->flush (aggpad, agg);
173 /*************************************
174 * GstAggregator implementation *
175 *************************************/
176 static GstElementClass *aggregator_parent_class = NULL;
178 #define AGGREGATOR_QUEUE(self) (((GstAggregator*)self)->priv->queue)
180 #define QUEUE_PUSH(self) G_STMT_START { \
181 GST_LOG_OBJECT (self, "Pushing to QUEUE in thread %p", \
183 g_async_queue_push (AGGREGATOR_QUEUE (self), GINT_TO_POINTER (1)); \
186 #define QUEUE_POP(self) G_STMT_START { \
187 GST_LOG_OBJECT (self, "Waiting on QUEUE in thread %p", \
189 g_async_queue_pop (AGGREGATOR_QUEUE (self)); \
190 GST_LOG_OBJECT (self, "Waited on QUEUE in thread %p", \
194 #define QUEUE_FLUSH(self) G_STMT_START { \
195 GST_LOG_OBJECT (self, "Flushing QUEUE in thread %p", \
197 g_async_queue_lock (AGGREGATOR_QUEUE (self)); \
198 while (g_async_queue_try_pop_unlocked (AGGREGATOR_QUEUE (self))); \
199 g_async_queue_unlock (AGGREGATOR_QUEUE (self)); \
200 GST_LOG_OBJECT (self, "Flushed QUEUE in thread %p", \
204 struct _GstAggregatorPrivate
210 /* Our state is >= PAUSED */
215 gboolean send_stream_start;
216 gboolean send_segment;
217 gboolean flush_seeking;
218 gboolean pending_flush_start;
220 GstFlowReturn flow_return;
225 gboolean tags_changed;
227 /* Lock to prevent two src setcaps from happening at the same time */
237 gboolean one_actually_seeked;
240 #define DEFAULT_TIMEOUT -1
250 * gst_aggregator_iterate_sinkpads:
251 * @self: The #GstAggregator
252 * @func: The function to call.
253 * @user_data: The data to pass to @func.
255 * Iterate the sinkpads of aggregator to call a function on them.
257 * This method guarantees that @func will be called only once for each
261 gst_aggregator_iterate_sinkpads (GstAggregator * self,
262 GstAggregatorPadForeachFunc func, gpointer user_data)
264 gboolean result = FALSE;
266 gboolean done = FALSE;
267 GValue item = { 0, };
268 GList *seen_pads = NULL;
270 iter = gst_element_iterate_sink_pads (GST_ELEMENT (self));
276 switch (gst_iterator_next (iter, &item)) {
277 case GST_ITERATOR_OK:
281 pad = g_value_get_object (&item);
283 /* if already pushed, skip. FIXME, find something faster to tag pads */
284 if (pad == NULL || g_list_find (seen_pads, pad)) {
285 g_value_reset (&item);
289 GST_LOG_OBJECT (self, "calling function on pad %s:%s",
290 GST_DEBUG_PAD_NAME (pad));
291 result = func (self, pad, user_data);
295 seen_pads = g_list_prepend (seen_pads, pad);
297 g_value_reset (&item);
300 case GST_ITERATOR_RESYNC:
301 gst_iterator_resync (iter);
303 case GST_ITERATOR_ERROR:
304 GST_ERROR_OBJECT (self,
305 "Could not iterate over internally linked pads");
308 case GST_ITERATOR_DONE:
313 g_value_unset (&item);
314 gst_iterator_free (iter);
316 if (seen_pads == NULL) {
317 GST_DEBUG_OBJECT (self, "No pad seen");
321 g_list_free (seen_pads);
327 static inline gboolean
328 _check_all_pads_with_data_or_eos_or_timeout (GstAggregator * self,
329 GstAggregatorPad * aggpad)
331 if (aggpad->buffer || aggpad->eos) {
335 if (g_atomic_int_get (&aggpad->unresponsive) == TRUE) {
336 /* pad has been deemed unresponsive */
340 GST_LOG_OBJECT (aggpad, "Not ready to be aggregated");
346 _reset_flow_values (GstAggregator * self)
348 self->priv->flow_return = GST_FLOW_FLUSHING;
349 self->priv->send_stream_start = TRUE;
350 self->priv->send_segment = TRUE;
351 gst_segment_init (&self->segment, GST_FORMAT_TIME);
355 _push_mandatory_events (GstAggregator * self)
357 GstAggregatorPrivate *priv = self->priv;
359 if (g_atomic_int_get (&self->priv->send_stream_start)) {
362 GST_INFO_OBJECT (self, "pushing stream start");
363 /* stream-start (FIXME: create id based on input ids) */
364 g_snprintf (s_id, sizeof (s_id), "agg-%08x", g_random_int ());
365 if (!gst_pad_push_event (self->srcpad, gst_event_new_stream_start (s_id))) {
366 GST_WARNING_OBJECT (self->srcpad, "Sending stream start event failed");
368 g_atomic_int_set (&self->priv->send_stream_start, FALSE);
371 if (self->priv->srccaps) {
373 GST_INFO_OBJECT (self, "pushing caps: %" GST_PTR_FORMAT,
374 self->priv->srccaps);
375 if (!gst_pad_push_event (self->srcpad,
376 gst_event_new_caps (self->priv->srccaps))) {
377 GST_WARNING_OBJECT (self->srcpad, "Sending caps event failed");
379 gst_caps_unref (self->priv->srccaps);
380 self->priv->srccaps = NULL;
383 if (g_atomic_int_get (&self->priv->send_segment)) {
384 if (!g_atomic_int_get (&self->priv->flush_seeking)) {
385 GstEvent *segev = gst_event_new_segment (&self->segment);
387 if (!self->priv->seqnum)
388 self->priv->seqnum = gst_event_get_seqnum (segev);
390 gst_event_set_seqnum (segev, self->priv->seqnum);
392 GST_DEBUG_OBJECT (self, "pushing segment %" GST_PTR_FORMAT, segev);
393 gst_pad_push_event (self->srcpad, segev);
394 g_atomic_int_set (&self->priv->send_segment, FALSE);
398 if (priv->tags && priv->tags_changed) {
399 gst_pad_push_event (self->srcpad,
400 gst_event_new_tag (gst_tag_list_ref (priv->tags)));
401 priv->tags_changed = FALSE;
406 * gst_aggregator_set_src_caps:
407 * @self: The #GstAggregator
408 * @caps: The #GstCaps to set on the src pad.
410 * Sets the caps to be used on the src pad.
413 gst_aggregator_set_src_caps (GstAggregator * self, GstCaps * caps)
415 GST_AGGREGATOR_SETCAPS_LOCK (self);
416 gst_caps_replace (&self->priv->srccaps, caps);
417 _push_mandatory_events (self);
418 GST_AGGREGATOR_SETCAPS_UNLOCK (self);
422 * gst_aggregator_finish_buffer:
423 * @self: The #GstAggregator
424 * @buffer: the #GstBuffer to push.
426 * This method will take care of sending mandatory events before pushing
427 * the provided buffer.
430 gst_aggregator_finish_buffer (GstAggregator * self, GstBuffer * buffer)
432 _push_mandatory_events (self);
434 if (!g_atomic_int_get (&self->priv->flush_seeking) &&
435 gst_pad_is_active (self->srcpad)) {
436 GST_TRACE_OBJECT (self, "pushing buffer %" GST_PTR_FORMAT, buffer);
437 return gst_pad_push (self->srcpad, buffer);
439 GST_INFO_OBJECT (self, "Not pushing (active: %i, flushing: %i)",
440 g_atomic_int_get (&self->priv->flush_seeking),
441 gst_pad_is_active (self->srcpad));
442 gst_buffer_unref (buffer);
448 _push_eos (GstAggregator * self)
451 _push_mandatory_events (self);
453 self->priv->send_eos = FALSE;
454 event = gst_event_new_eos ();
455 gst_event_set_seqnum (event, self->priv->seqnum);
456 gst_pad_push_event (self->srcpad, event);
460 aggregate_func (GstAggregator * self)
462 GstAggregatorPrivate *priv = self->priv;
463 GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (self);
465 if (self->priv->running == FALSE) {
466 GST_DEBUG_OBJECT (self, "Not running anymore");
473 GST_LOG_OBJECT (self, "Checking aggregate");
474 while (priv->send_eos && gst_aggregator_iterate_sinkpads (self,
475 (GstAggregatorPadForeachFunc)
476 _check_all_pads_with_data_or_eos_or_timeout, NULL) && priv->running) {
477 GST_TRACE_OBJECT (self, "Actually aggregating!");
479 priv->flow_return = klass->aggregate (self);
481 if (priv->flow_return == GST_FLOW_EOS) {
486 if (priv->flow_return == GST_FLOW_FLUSHING &&
487 g_atomic_int_get (&priv->flush_seeking))
488 priv->flow_return = GST_FLOW_OK;
490 GST_LOG_OBJECT (self, "flow return is %s",
491 gst_flow_get_name (priv->flow_return));
493 if (priv->flow_return != GST_FLOW_OK)
500 _start (GstAggregator * self)
502 self->priv->running = TRUE;
503 self->priv->send_stream_start = TRUE;
504 self->priv->send_segment = TRUE;
505 self->priv->send_eos = TRUE;
506 self->priv->srccaps = NULL;
507 self->priv->flow_return = GST_FLOW_OK;
513 _check_pending_flush_stop (GstAggregatorPad * pad)
515 return (!pad->priv->pending_flush_stop && !pad->priv->pending_flush_start);
519 _stop_srcpad_task (GstAggregator * self, GstEvent * flush_start)
523 GST_INFO_OBJECT (self, "%s srcpad task",
524 flush_start ? "Pausing" : "Stopping");
526 self->priv->running = FALSE;
530 res = gst_pad_push_event (self->srcpad, flush_start);
533 gst_pad_stop_task (self->srcpad);
540 _start_srcpad_task (GstAggregator * self)
542 GST_INFO_OBJECT (self, "Starting srcpad task");
544 self->priv->running = TRUE;
545 gst_pad_start_task (GST_PAD (self->srcpad),
546 (GstTaskFunction) aggregate_func, self, NULL);
550 _flush (GstAggregator * self)
552 GstFlowReturn ret = GST_FLOW_OK;
553 GstAggregatorPrivate *priv = self->priv;
554 GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (self);
556 GST_DEBUG_OBJECT (self, "Flushing everything");
557 g_atomic_int_set (&priv->send_segment, TRUE);
558 g_atomic_int_set (&priv->flush_seeking, FALSE);
559 g_atomic_int_set (&priv->tags_changed, FALSE);
561 ret = klass->flush (self);
567 _all_flush_stop_received (GstAggregator * self)
570 GstAggregatorPad *tmppad;
572 GST_OBJECT_LOCK (self);
573 for (tmp = GST_ELEMENT (self)->sinkpads; tmp; tmp = tmp->next) {
574 tmppad = (GstAggregatorPad *) tmp->data;
576 if (_check_pending_flush_stop (tmppad) == FALSE) {
577 GST_DEBUG_OBJECT (tmppad, "Is not last %i -- %i",
578 tmppad->priv->pending_flush_start, tmppad->priv->pending_flush_stop);
579 GST_OBJECT_UNLOCK (self);
583 GST_OBJECT_UNLOCK (self);
589 _flush_start (GstAggregator * self, GstAggregatorPad * aggpad, GstEvent * event)
592 GstAggregatorPrivate *priv = self->priv;
593 GstAggregatorPadPrivate *padpriv = aggpad->priv;
595 g_atomic_int_set (&aggpad->priv->flushing, TRUE);
596 /* Remove pad buffer and wake up the streaming thread */
597 tmpbuf = gst_aggregator_pad_steal_buffer (aggpad);
598 gst_buffer_replace (&tmpbuf, NULL);
599 PAD_STREAM_LOCK (aggpad);
600 if (g_atomic_int_compare_and_exchange (&padpriv->pending_flush_start,
601 TRUE, FALSE) == TRUE) {
602 GST_DEBUG_OBJECT (aggpad, "Expecting FLUSH_STOP now");
603 g_atomic_int_set (&padpriv->pending_flush_stop, TRUE);
606 if (g_atomic_int_get (&priv->flush_seeking)) {
607 /* If flush_seeking we forward the first FLUSH_START */
608 if (g_atomic_int_compare_and_exchange (&priv->pending_flush_start,
609 TRUE, FALSE) == TRUE) {
611 GST_INFO_OBJECT (self, "Flushing, pausing srcpad task");
612 _stop_srcpad_task (self, event);
613 priv->flow_return = GST_FLOW_OK;
615 GST_INFO_OBJECT (self, "Getting STREAM_LOCK while seeking");
616 GST_PAD_STREAM_LOCK (self->srcpad);
617 GST_LOG_OBJECT (self, "GOT STREAM_LOCK");
621 gst_event_unref (event);
623 PAD_STREAM_UNLOCK (aggpad);
625 tmpbuf = gst_aggregator_pad_steal_buffer (aggpad);
626 gst_buffer_replace (&tmpbuf, NULL);
629 /* GstAggregator vmethods default implementations */
631 _sink_event (GstAggregator * self, GstAggregatorPad * aggpad, GstEvent * event)
634 GstPad *pad = GST_PAD (aggpad);
635 GstAggregatorPrivate *priv = self->priv;
637 switch (GST_EVENT_TYPE (event)) {
638 case GST_EVENT_FLUSH_START:
640 _flush_start (self, aggpad, event);
641 /* We forward only in one case: right after flush_seeking */
645 case GST_EVENT_FLUSH_STOP:
647 GST_DEBUG_OBJECT (aggpad, "Got FLUSH_STOP");
649 _aggpad_flush (aggpad, self);
650 if (g_atomic_int_get (&priv->flush_seeking)) {
651 g_atomic_int_set (&aggpad->priv->pending_flush_stop, FALSE);
653 if (g_atomic_int_get (&priv->flush_seeking)) {
654 if (_all_flush_stop_received (self)) {
655 /* That means we received FLUSH_STOP/FLUSH_STOP on
656 * all sinkpads -- Seeking is Done... sending FLUSH_STOP */
658 gst_pad_push_event (self->srcpad, event);
659 priv->send_eos = TRUE;
663 GST_INFO_OBJECT (self, "Releasing source pad STREAM_LOCK");
664 GST_PAD_STREAM_UNLOCK (self->srcpad);
665 _start_srcpad_task (self);
670 /* We never forward the event */
675 GST_DEBUG_OBJECT (aggpad, "EOS");
677 /* We still have a buffer, and we don't want the subclass to have to
678 * check for it. Mark pending_eos, eos will be set when steal_buffer is
681 PAD_LOCK_EVENT (aggpad);
682 if (!aggpad->buffer) {
685 aggpad->priv->pending_eos = TRUE;
687 PAD_UNLOCK_EVENT (aggpad);
692 case GST_EVENT_SEGMENT:
694 PAD_LOCK_EVENT (aggpad);
695 gst_event_copy_segment (event, &aggpad->segment);
696 self->priv->seqnum = gst_event_get_seqnum (event);
697 PAD_UNLOCK_EVENT (aggpad);
700 case GST_EVENT_STREAM_START:
708 gst_event_parse_tag (event, &tags);
710 if (gst_tag_list_get_scope (tags) == GST_TAG_SCOPE_STREAM) {
711 gst_aggregator_merge_tags (self, tags, GST_TAG_MERGE_REPLACE);
712 gst_event_unref (event);
724 GST_DEBUG_OBJECT (pad, "Forwarding event: %" GST_PTR_FORMAT, event);
725 return gst_pad_event_default (pad, GST_OBJECT (self), event);
728 GST_DEBUG_OBJECT (pad, "Eating event: %" GST_PTR_FORMAT, event);
730 gst_event_unref (event);
736 _stop_pad (GstAggregator * self, GstAggregatorPad * pad, gpointer unused_udata)
738 _aggpad_flush (pad, self);
740 PAD_LOCK_EVENT (pad);
741 /* remove the timeouts */
742 if (pad->priv->timeout_id) {
743 gst_clock_id_unschedule (pad->priv->timeout_id);
744 gst_clock_id_unref (pad->priv->timeout_id);
745 pad->priv->timeout_id = NULL;
747 PAD_UNLOCK_EVENT (pad);
753 _stop (GstAggregator * agg)
755 _reset_flow_values (agg);
757 gst_aggregator_iterate_sinkpads (agg,
758 (GstAggregatorPadForeachFunc) _stop_pad, NULL);
761 gst_tag_list_unref (agg->priv->tags);
762 agg->priv->tags = NULL;
767 /* GstElement vmethods implementations */
768 static GstStateChangeReturn
769 _change_state (GstElement * element, GstStateChange transition)
771 GstStateChangeReturn ret;
772 GstAggregator *self = GST_AGGREGATOR (element);
773 GstAggregatorClass *agg_class = GST_AGGREGATOR_GET_CLASS (self);
776 switch (transition) {
777 case GST_STATE_CHANGE_READY_TO_PAUSED:
778 agg_class->start (self);
785 GST_ELEMENT_CLASS (aggregator_parent_class)->change_state (element,
786 transition)) == GST_STATE_CHANGE_FAILURE)
790 switch (transition) {
791 case GST_STATE_CHANGE_PAUSED_TO_READY:
792 agg_class->stop (self);
802 GST_ERROR_OBJECT (element, "parent failed state change");
808 _release_pad (GstElement * element, GstPad * pad)
812 GstAggregator *self = GST_AGGREGATOR (element);
813 GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad);
815 GST_INFO_OBJECT (pad, "Removing pad");
817 g_atomic_int_set (&aggpad->priv->flushing, TRUE);
818 tmpbuf = gst_aggregator_pad_steal_buffer (aggpad);
819 gst_buffer_replace (&tmpbuf, NULL);
820 gst_element_remove_pad (element, pad);
822 /* Something changed make sure we try to aggregate */
827 _unresponsive_timeout (GstClock * clock, GstClockTime time, GstClockID id,
830 GstAggregatorPad *aggpad;
833 if (user_data == NULL)
836 aggpad = GST_AGGREGATOR_PAD (user_data);
838 /* avoid holding the last reference to the parent element here */
839 PAD_LOCK_EVENT (aggpad);
841 self = GST_AGGREGATOR (gst_pad_get_parent (GST_PAD (aggpad)));
843 GST_DEBUG_OBJECT (aggpad, "marked unresponsive");
845 g_atomic_int_set (&aggpad->unresponsive, TRUE);
849 gst_object_unref (self);
852 PAD_UNLOCK_EVENT (aggpad);
858 _request_new_pad (GstElement * element,
859 GstPadTemplate * templ, const gchar * req_name, const GstCaps * caps)
862 GstAggregatorPad *agg_pad;
864 GstElementClass *klass = GST_ELEMENT_GET_CLASS (element);
865 GstAggregatorPrivate *priv = GST_AGGREGATOR (element)->priv;
867 self = GST_AGGREGATOR (element);
869 if (templ == gst_element_class_get_pad_template (klass, "sink_%u")) {
873 GST_OBJECT_LOCK (element);
874 if (req_name == NULL || strlen (req_name) < 6
875 || !g_str_has_prefix (req_name, "sink_")) {
876 /* no name given when requesting the pad, use next available int */
879 /* parse serial number from requested padname */
880 serial = g_ascii_strtoull (&req_name[5], NULL, 10);
881 if (serial >= priv->padcount)
882 priv->padcount = serial;
885 name = g_strdup_printf ("sink_%u", priv->padcount);
886 agg_pad = g_object_new (GST_AGGREGATOR_GET_CLASS (self)->sinkpads_type,
887 "name", name, "direction", GST_PAD_SINK, "template", templ, NULL);
890 GST_OBJECT_UNLOCK (element);
896 GST_DEBUG_OBJECT (element, "Adding pad %s", GST_PAD_NAME (agg_pad));
899 gst_pad_set_active (GST_PAD (agg_pad), TRUE);
901 /* add the pad to the element */
902 gst_element_add_pad (element, GST_PAD (agg_pad));
904 return GST_PAD (agg_pad);
908 _send_event (GstElement * element, GstEvent * event)
910 GstAggregator *self = GST_AGGREGATOR (element);
912 GST_STATE_LOCK (element);
913 if (GST_EVENT_TYPE (event) == GST_EVENT_SEEK &&
914 GST_STATE (element) < GST_STATE_PAUSED) {
918 GstSeekType start_type, stop_type;
921 gst_event_parse_seek (event, &rate, &fmt, &flags, &start_type,
922 &start, &stop_type, &stop);
923 gst_segment_do_seek (&self->segment, rate, fmt, flags, start_type, start,
924 stop_type, stop, NULL);
926 self->priv->seqnum = gst_event_get_seqnum (event);
927 GST_DEBUG_OBJECT (element, "Storing segment %" GST_PTR_FORMAT, event);
929 GST_STATE_UNLOCK (element);
932 return GST_ELEMENT_CLASS (aggregator_parent_class)->send_event (element,
937 _src_query (GstAggregator * self, GstQuery * query)
941 switch (GST_QUERY_TYPE (query)) {
942 case GST_QUERY_SEEKING:
946 /* don't pass it along as some (file)sink might claim it does
947 * whereas with a collectpads in between that will not likely work */
948 gst_query_parse_seeking (query, &format, NULL, NULL, NULL);
949 gst_query_set_seeking (query, format, FALSE, 0, -1);
958 return gst_pad_query_default (self->srcpad, GST_OBJECT (self), query);
965 event_forward_func (GstPad * pad, EventData * evdata)
968 GstPad *peer = gst_pad_get_peer (pad);
969 GstAggregatorPadPrivate *padpriv = GST_AGGREGATOR_PAD (pad)->priv;
972 ret = gst_pad_send_event (peer, gst_event_ref (evdata->event));
973 GST_DEBUG_OBJECT (pad, "return of event push is %d", ret);
974 gst_object_unref (peer);
978 if (GST_EVENT_TYPE (evdata->event) == GST_EVENT_SEEK)
979 GST_ERROR_OBJECT (pad, "Event %" GST_PTR_FORMAT " failed", evdata->event);
981 if (GST_EVENT_TYPE (evdata->event) == GST_EVENT_SEEK) {
982 GstQuery *seeking = gst_query_new_seeking (GST_FORMAT_TIME);
984 if (gst_pad_query (peer, seeking)) {
987 gst_query_parse_seeking (seeking, NULL, &seekable, NULL, NULL);
989 if (seekable == FALSE) {
990 GST_INFO_OBJECT (pad,
991 "Source not seekable, We failed but it does not matter!");
996 GST_ERROR_OBJECT (pad, "Query seeking FAILED");
1000 if (evdata->flush) {
1001 padpriv->pending_flush_start = FALSE;
1002 padpriv->pending_flush_stop = FALSE;
1005 evdata->one_actually_seeked = TRUE;
1008 evdata->result &= ret;
1010 /* Always send to all pads */
1015 _set_flush_pending (GstAggregator * self, GstAggregatorPad * pad,
1018 pad->priv->pending_flush_start = TRUE;
1019 pad->priv->pending_flush_stop = FALSE;
1025 _forward_event_to_all_sinkpads (GstAggregator * self, GstEvent * event,
1030 evdata.event = event;
1031 evdata.result = TRUE;
1032 evdata.flush = flush;
1033 evdata.one_actually_seeked = FALSE;
1035 /* We first need to set all pads as flushing in a first pass
1036 * as flush_start flush_stop is sometimes sent synchronously
1037 * while we send the seek event */
1039 gst_aggregator_iterate_sinkpads (self,
1040 (GstAggregatorPadForeachFunc) _set_flush_pending, NULL);
1041 gst_pad_forward (self->srcpad, (GstPadForwardFunction) event_forward_func,
1044 gst_event_unref (event);
1050 _do_seek (GstAggregator * self, GstEvent * event)
1055 GstSeekType start_type, stop_type;
1059 GstAggregatorPrivate *priv = self->priv;
1061 gst_event_parse_seek (event, &rate, &fmt, &flags, &start_type,
1062 &start, &stop_type, &stop);
1064 GST_INFO_OBJECT (self, "starting SEEK");
1066 flush = flags & GST_SEEK_FLAG_FLUSH;
1069 g_atomic_int_set (&priv->pending_flush_start, TRUE);
1070 g_atomic_int_set (&priv->flush_seeking, TRUE);
1073 gst_segment_do_seek (&self->segment, rate, fmt, flags, start_type, start,
1074 stop_type, stop, NULL);
1076 /* forward the seek upstream */
1077 evdata = _forward_event_to_all_sinkpads (self, event, flush);
1080 if (!evdata.result || !evdata.one_actually_seeked) {
1081 g_atomic_int_set (&priv->flush_seeking, FALSE);
1082 g_atomic_int_set (&priv->pending_flush_start, FALSE);
1085 GST_INFO_OBJECT (self, "seek done, result: %d", evdata.result);
1087 return evdata.result;
1091 _src_event (GstAggregator * self, GstEvent * event)
1094 gboolean res = TRUE;
1096 switch (GST_EVENT_TYPE (event)) {
1097 case GST_EVENT_SEEK:
1099 gst_event_ref (event);
1100 res = _do_seek (self, event);
1101 gst_event_unref (event);
1105 case GST_EVENT_NAVIGATION:
1107 /* navigation is rather pointless. */
1109 gst_event_unref (event);
1118 evdata = _forward_event_to_all_sinkpads (self, event, FALSE);
1119 res = evdata.result;
1126 src_event_func (GstPad * pad, GstObject * parent, GstEvent * event)
1128 GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (parent);
1130 return klass->src_event (GST_AGGREGATOR (parent), event);
1134 src_query_func (GstPad * pad, GstObject * parent, GstQuery * query)
1136 GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (parent);
1138 return klass->src_query (GST_AGGREGATOR (parent), query);
1142 src_activate_mode (GstPad * pad,
1143 GstObject * parent, GstPadMode mode, gboolean active)
1145 GstAggregator *self = GST_AGGREGATOR (parent);
1146 GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (parent);
1148 if (klass->src_activate) {
1149 if (klass->src_activate (self, mode, active) == FALSE) {
1154 if (active == TRUE) {
1156 case GST_PAD_MODE_PUSH:
1158 GST_INFO_OBJECT (pad, "Activating pad!");
1159 _start_srcpad_task (self);
1164 GST_ERROR_OBJECT (pad, "Only supported mode is PUSH");
1171 GST_INFO_OBJECT (self, "Deactivating srcpad");
1172 _stop_srcpad_task (self, FALSE);
1178 _sink_query (GstAggregator * self, GstAggregatorPad * aggpad, GstQuery * query)
1180 GstPad *pad = GST_PAD (aggpad);
1182 return gst_pad_query_default (pad, GST_OBJECT (self), query);
1186 gst_aggregator_finalize (GObject * object)
1188 GstAggregator *self = (GstAggregator *) object;
1190 gst_object_unref (self->clock);
1191 g_mutex_clear (&self->priv->setcaps_lock);
1193 G_OBJECT_CLASS (aggregator_parent_class)->finalize (object);
1197 gst_aggregator_dispose (GObject * object)
1199 GstAggregator *self = (GstAggregator *) object;
1201 G_OBJECT_CLASS (aggregator_parent_class)->dispose (object);
1203 if (AGGREGATOR_QUEUE (self)) {
1204 g_async_queue_unref (AGGREGATOR_QUEUE (self));
1205 AGGREGATOR_QUEUE (self) = NULL;
1210 * gst_aggregator_set_timeout:
1211 * @agg: a #GstAggregator
1212 * @timeout: the new timeout value.
1214 * Sets the new timeout value to @timeout. This value is used to limit the
1215 * amount of time a pad waits for data to appear before considering the pad
1219 gst_aggregator_set_timeout (GstAggregator * agg, gint64 timeout)
1221 g_return_if_fail (GST_IS_AGGREGATOR (agg));
1223 GST_OBJECT_LOCK (agg);
1224 agg->timeout = timeout;
1225 GST_OBJECT_UNLOCK (agg);
1229 * gst_aggregator_get_timeout:
1230 * @agg: a #GstAggregator
1232 * Gets the timeout value. See gst_aggregator_set_timeout for
1235 * Returns: The time in nanoseconds to wait for data to arrive on a sink pad
1236 * before a pad is deemed unresponsive. A value of -1 means an
1240 gst_aggregator_get_timeout (GstAggregator * agg)
1244 g_return_val_if_fail (GST_IS_AGGREGATOR (agg), -1);
1246 GST_OBJECT_LOCK (agg);
1248 GST_OBJECT_UNLOCK (agg);
1254 gst_aggregator_set_property (GObject * object, guint prop_id,
1255 const GValue * value, GParamSpec * pspec)
1257 GstAggregator *agg = GST_AGGREGATOR (object);
1261 gst_aggregator_set_timeout (agg, g_value_get_int64 (value));
1264 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
1270 gst_aggregator_get_property (GObject * object, guint prop_id,
1271 GValue * value, GParamSpec * pspec)
1273 GstAggregator *agg = GST_AGGREGATOR (object);
1277 g_value_set_int64 (value, gst_aggregator_get_timeout (agg));
1280 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
1285 /* GObject vmethods implementations */
1287 gst_aggregator_class_init (GstAggregatorClass * klass)
1289 GObjectClass *gobject_class = (GObjectClass *) klass;
1290 GstElementClass *gstelement_class = (GstElementClass *) klass;
1292 aggregator_parent_class = g_type_class_peek_parent (klass);
1293 g_type_class_add_private (klass, sizeof (GstAggregatorPrivate));
1295 GST_DEBUG_CATEGORY_INIT (aggregator_debug, "aggregator",
1296 GST_DEBUG_FG_MAGENTA, "GstAggregator");
1298 klass->sinkpads_type = GST_TYPE_AGGREGATOR_PAD;
1299 klass->start = _start;
1300 klass->stop = _stop;
1302 klass->sink_event = _sink_event;
1303 klass->sink_query = _sink_query;
1305 klass->src_event = _src_event;
1306 klass->src_query = _src_query;
1308 gstelement_class->request_new_pad = GST_DEBUG_FUNCPTR (_request_new_pad);
1309 gstelement_class->send_event = GST_DEBUG_FUNCPTR (_send_event);
1310 gstelement_class->release_pad = GST_DEBUG_FUNCPTR (_release_pad);
1311 gstelement_class->change_state = GST_DEBUG_FUNCPTR (_change_state);
1313 gobject_class->set_property = gst_aggregator_set_property;
1314 gobject_class->get_property = gst_aggregator_get_property;
1315 gobject_class->finalize = gst_aggregator_finalize;
1316 gobject_class->dispose = gst_aggregator_dispose;
1318 g_object_class_install_property (gobject_class, PROP_TIMEOUT,
1319 g_param_spec_int64 ("timeout", "Buffer timeout",
1320 "Number of nanoseconds to wait for a buffer to arrive on a sink pad"
1321 "before the pad is deemed unresponsive (-1 unlimited)", -1,
1322 G_MAXINT64, DEFAULT_TIMEOUT,
1323 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
1327 gst_aggregator_init (GstAggregator * self, GstAggregatorClass * klass)
1329 GstPadTemplate *pad_template;
1330 GstAggregatorPrivate *priv;
1332 g_return_if_fail (klass->aggregate != NULL);
1335 G_TYPE_INSTANCE_GET_PRIVATE (self, GST_TYPE_AGGREGATOR,
1336 GstAggregatorPrivate);
1341 gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src");
1342 g_return_if_fail (pad_template != NULL);
1344 priv->padcount = -1;
1345 priv->tags_changed = FALSE;
1346 _reset_flow_values (self);
1348 AGGREGATOR_QUEUE (self) = g_async_queue_new ();
1349 self->srcpad = gst_pad_new_from_template (pad_template, "src");
1351 gst_pad_set_event_function (self->srcpad,
1352 GST_DEBUG_FUNCPTR ((GstPadEventFunction) src_event_func));
1353 gst_pad_set_query_function (self->srcpad,
1354 GST_DEBUG_FUNCPTR ((GstPadQueryFunction) src_query_func));
1355 gst_pad_set_activatemode_function (self->srcpad,
1356 GST_DEBUG_FUNCPTR ((GstPadActivateModeFunction) src_activate_mode));
1358 gst_element_add_pad (GST_ELEMENT (self), self->srcpad);
1360 self->clock = gst_system_clock_obtain ();
1363 g_mutex_init (&self->priv->setcaps_lock);
1366 /* we can't use G_DEFINE_ABSTRACT_TYPE because we need the klass in the _init
1367 * method to get to the padtemplates */
1369 gst_aggregator_get_type (void)
1371 static volatile gsize type = 0;
1373 if (g_once_init_enter (&type)) {
1375 static const GTypeInfo info = {
1376 sizeof (GstAggregatorClass),
1379 (GClassInitFunc) gst_aggregator_class_init,
1382 sizeof (GstAggregator),
1384 (GInstanceInitFunc) gst_aggregator_init,
1387 _type = g_type_register_static (GST_TYPE_ELEMENT,
1388 "GstAggregator", &info, G_TYPE_FLAG_ABSTRACT);
1389 g_once_init_leave (&type, _type);
1394 static GstFlowReturn
1395 _chain (GstPad * pad, GstObject * object, GstBuffer * buffer)
1397 GstBuffer *actual_buf = buffer;
1398 GstAggregator *self = GST_AGGREGATOR (object);
1399 GstAggregatorPrivate *priv = self->priv;
1400 GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad);
1401 GstAggregatorClass *aggclass = GST_AGGREGATOR_GET_CLASS (object);
1402 GstClockTime timeout = gst_aggregator_get_timeout (self);
1405 GST_DEBUG_OBJECT (aggpad, "Start chaining a buffer %" GST_PTR_FORMAT, buffer);
1406 if (aggpad->priv->timeout_id) {
1407 gst_clock_id_unschedule (aggpad->priv->timeout_id);
1408 gst_clock_id_unref (aggpad->priv->timeout_id);
1409 aggpad->priv->timeout_id = NULL;
1411 g_atomic_int_set (&aggpad->unresponsive, FALSE);
1413 PAD_STREAM_LOCK (aggpad);
1415 if (g_atomic_int_get (&aggpad->priv->flushing) == TRUE)
1418 if (g_atomic_int_get (&aggpad->priv->pending_eos) == TRUE)
1421 PAD_LOCK_EVENT (aggpad);
1423 if (aggpad->buffer) {
1424 GST_DEBUG_OBJECT (aggpad, "Waiting for buffer to be consumed");
1425 PAD_WAIT_EVENT (aggpad);
1427 PAD_UNLOCK_EVENT (aggpad);
1429 if (g_atomic_int_get (&aggpad->priv->flushing) == TRUE)
1432 if (aggclass->clip) {
1433 aggclass->clip (self, aggpad, buffer, &actual_buf);
1436 PAD_LOCK_EVENT (aggpad);
1438 gst_buffer_unref (aggpad->buffer);
1439 aggpad->buffer = actual_buf;
1440 PAD_UNLOCK_EVENT (aggpad);
1441 PAD_STREAM_UNLOCK (aggpad);
1445 if (GST_CLOCK_TIME_IS_VALID (timeout)) {
1446 now = gst_clock_get_time (self->clock);
1447 aggpad->priv->timeout_id =
1448 gst_clock_new_single_shot_id (self->clock, now + timeout);
1449 gst_clock_id_wait_async (aggpad->priv->timeout_id, _unresponsive_timeout,
1450 gst_object_ref (aggpad), gst_object_unref);
1453 GST_DEBUG_OBJECT (aggpad, "Done chaining");
1455 return priv->flow_return;
1458 PAD_STREAM_UNLOCK (aggpad);
1460 gst_buffer_unref (buffer);
1461 GST_DEBUG_OBJECT (aggpad, "We are flushing");
1463 return GST_FLOW_FLUSHING;
1466 PAD_STREAM_UNLOCK (aggpad);
1468 gst_buffer_unref (buffer);
1469 GST_DEBUG_OBJECT (pad, "We are EOS already...");
1471 return GST_FLOW_EOS;
1475 pad_query_func (GstPad * pad, GstObject * parent, GstQuery * query)
1477 GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (parent);
1479 return klass->sink_query (GST_AGGREGATOR (parent),
1480 GST_AGGREGATOR_PAD (pad), query);
1484 pad_event_func (GstPad * pad, GstObject * parent, GstEvent * event)
1486 GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (parent);
1488 return klass->sink_event (GST_AGGREGATOR (parent),
1489 GST_AGGREGATOR_PAD (pad), event);
1493 pad_activate_mode_func (GstPad * pad,
1494 GstObject * parent, GstPadMode mode, gboolean active)
1496 GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad);
1498 if (active == FALSE) {
1499 PAD_LOCK_EVENT (aggpad);
1500 g_atomic_int_set (&aggpad->priv->flushing, TRUE);
1501 gst_buffer_replace (&aggpad->buffer, NULL);
1502 PAD_BROADCAST_EVENT (aggpad);
1503 PAD_UNLOCK_EVENT (aggpad);
1505 g_atomic_int_set (&aggpad->priv->flushing, FALSE);
1506 PAD_LOCK_EVENT (aggpad);
1507 PAD_BROADCAST_EVENT (aggpad);
1508 PAD_UNLOCK_EVENT (aggpad);
1514 /***********************************
1515 * GstAggregatorPad implementation *
1516 ************************************/
1517 static GstPadClass *aggregator_pad_parent_class = NULL;
1518 G_DEFINE_TYPE (GstAggregatorPad, gst_aggregator_pad, GST_TYPE_PAD);
1521 _pad_constructed (GObject * object)
1523 GstPad *pad = GST_PAD (object);
1525 gst_pad_set_chain_function (pad,
1526 GST_DEBUG_FUNCPTR ((GstPadChainFunction) _chain));
1527 gst_pad_set_event_function (pad,
1528 GST_DEBUG_FUNCPTR ((GstPadEventFunction) pad_event_func));
1529 gst_pad_set_query_function (pad,
1530 GST_DEBUG_FUNCPTR ((GstPadQueryFunction) pad_query_func));
1531 gst_pad_set_activatemode_function (pad,
1532 GST_DEBUG_FUNCPTR ((GstPadActivateModeFunction) pad_activate_mode_func));
1536 gst_aggregator_pad_finalize (GObject * object)
1538 GstAggregatorPad *pad = (GstAggregatorPad *) object;
1540 g_mutex_clear (&pad->priv->event_lock);
1541 g_cond_clear (&pad->priv->event_cond);
1542 g_mutex_clear (&pad->priv->stream_lock);
1544 G_OBJECT_CLASS (aggregator_pad_parent_class)->finalize (object);
1548 gst_aggregator_pad_dispose (GObject * object)
1550 GstAggregatorPad *pad = (GstAggregatorPad *) object;
1553 buf = gst_aggregator_pad_steal_buffer (pad);
1555 gst_buffer_unref (buf);
1557 G_OBJECT_CLASS (aggregator_pad_parent_class)->dispose (object);
1561 gst_aggregator_pad_class_init (GstAggregatorPadClass * klass)
1563 GObjectClass *gobject_class = (GObjectClass *) klass;
1565 aggregator_pad_parent_class = g_type_class_peek_parent (klass);
1566 g_type_class_add_private (klass, sizeof (GstAggregatorPadPrivate));
1568 gobject_class->constructed = GST_DEBUG_FUNCPTR (_pad_constructed);
1569 gobject_class->finalize = GST_DEBUG_FUNCPTR (gst_aggregator_pad_finalize);
1570 gobject_class->dispose = GST_DEBUG_FUNCPTR (gst_aggregator_pad_dispose);
1574 gst_aggregator_pad_init (GstAggregatorPad * pad)
1577 G_TYPE_INSTANCE_GET_PRIVATE (pad, GST_TYPE_AGGREGATOR_PAD,
1578 GstAggregatorPadPrivate);
1581 g_mutex_init (&pad->priv->event_lock);
1582 g_cond_init (&pad->priv->event_cond);
1584 g_mutex_init (&pad->priv->stream_lock);
1588 * gst_aggregator_pad_steal_buffer:
1589 * @pad: the pad to get buffer from
1591 * Steal the ref to the buffer currently queued in @pad.
1593 * Returns: (transfer full): The buffer in @pad or NULL if no buffer was
1594 * queued. You should unref the buffer after usage.
1597 gst_aggregator_pad_steal_buffer (GstAggregatorPad * pad)
1599 GstBuffer *buffer = NULL;
1601 PAD_LOCK_EVENT (pad);
1603 GST_TRACE_OBJECT (pad, "Consuming buffer");
1604 buffer = pad->buffer;
1606 if (pad->priv->pending_eos) {
1607 pad->priv->pending_eos = FALSE;
1610 PAD_BROADCAST_EVENT (pad);
1611 GST_DEBUG_OBJECT (pad, "Consummed: %" GST_PTR_FORMAT, buffer);
1613 PAD_UNLOCK_EVENT (pad);
1619 * gst_aggregator_pad_get_buffer:
1620 * @pad: the pad to get buffer from
1622 * Returns: (transfer full): A reference to the buffer in @pad or
1623 * NULL if no buffer was queued. You should unref the buffer after
1627 gst_aggregator_pad_get_buffer (GstAggregatorPad * pad)
1629 GstBuffer *buffer = NULL;
1631 PAD_LOCK_EVENT (pad);
1633 buffer = gst_buffer_ref (pad->buffer);
1634 PAD_UNLOCK_EVENT (pad);
1640 * gst_aggregator_merge_tags:
1641 * @self: a #GstAggregator
1642 * @tags: a #GstTagList to merge
1643 * @mode: the #GstTagMergeMode to use
1645 * Adds tags to so-called pending tags, which will be processed
1646 * before pushing out data downstream.
1648 * Note that this is provided for convenience, and the subclass is
1649 * not required to use this and can still do tag handling on its own.
1654 gst_aggregator_merge_tags (GstAggregator * self,
1655 const GstTagList * tags, GstTagMergeMode mode)
1659 g_return_if_fail (GST_IS_AGGREGATOR (self));
1660 g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags));
1662 /* FIXME Check if we can use OBJECT lock here! */
1663 GST_OBJECT_LOCK (self);
1665 GST_DEBUG_OBJECT (self, "merging tags %" GST_PTR_FORMAT, tags);
1666 otags = self->priv->tags;
1667 self->priv->tags = gst_tag_list_merge (self->priv->tags, tags, mode);
1669 gst_tag_list_unref (otags);
1670 self->priv->tags_changed = TRUE;
1671 GST_OBJECT_UNLOCK (self);