*/
/**
* SECTION: gstaggregator
+ * @title: GstAggregator
* @short_description: manages a set of pads with the purpose of
* aggregating their buffers.
* @see_also: gstcollectpads for historical reasons.
*
* Manages a set of pads with the purpose of aggregating their buffers.
* Control is given to the subclass when all pads have data.
- * <itemizedlist>
- * <listitem><para>
- * Base class for mixers and muxers. Subclasses should at least implement
+ *
+ * * Base class for mixers and muxers. Subclasses should at least implement
* the #GstAggregatorClass.aggregate() virtual method.
- * </para></listitem>
- * <listitem><para>
- * When data is queued on all pads, tha aggregate vmethod is called.
- * </para></listitem>
- * <listitem><para>
- * One can peek at the data on any given GstAggregatorPad with the
+ *
+ * * When data is queued on all pads, tha aggregate vmethod is called.
+ *
+ * * One can peek at the data on any given GstAggregatorPad with the
* gst_aggregator_pad_get_buffer () method, and take ownership of it
* with the gst_aggregator_pad_steal_buffer () method. When a buffer
* has been taken with steal_buffer (), a new buffer can be queued
* on that pad.
- * </para></listitem>
- * <listitem><para>
- * If the subclass wishes to push a buffer downstream in its aggregate
+ *
+ * * If the subclass wishes to push a buffer downstream in its aggregate
* implementation, it should do so through the
* gst_aggregator_finish_buffer () method. This method will take care
* of sending and ordering mandatory events such as stream start, caps
* and segment.
- * </para></listitem>
- * <listitem><para>
- * Same goes for EOS events, which should not be pushed directly by the
+ *
+ * * Same goes for EOS events, which should not be pushed directly by the
* subclass, it should instead return GST_FLOW_EOS in its aggregate
* implementation.
- * </para></listitem>
- * <listitem><para>
- * Note that the aggregator logic regarding gap event handling is to turn
+ *
+ * * Note that the aggregator logic regarding gap event handling is to turn
* these into gap buffers with matching PTS and duration. It will also
* flag these buffers with GST_BUFFER_FLAG_GAP and GST_BUFFER_FLAG_DROPPABLE
* to ease their identification and subsequent processing.
- * </para></listitem>
- * </itemizedlist>
+ *
*/
#ifdef HAVE_CONFIG_H
gboolean pending_flush_stop;
gboolean pending_eos;
+ gboolean first_buffer;
+
GQueue buffers;
+ guint num_buffers;
GstClockTime head_position;
GstClockTime tail_position;
GstClockTime head_time;
GMutex flush_lock;
};
-static gboolean
-gst_aggregator_pad_flush (GstAggregatorPad * aggpad, GstAggregator * agg)
+/* Must be called with PAD_LOCK held */
+static void
+gst_aggregator_pad_reset_unlocked (GstAggregatorPad * aggpad)
{
- GstAggregatorPadClass *klass = GST_AGGREGATOR_PAD_GET_CLASS (aggpad);
-
- PAD_LOCK (aggpad);
aggpad->priv->pending_eos = FALSE;
aggpad->priv->eos = FALSE;
aggpad->priv->flow_return = GST_FLOW_OK;
aggpad->priv->head_time = GST_CLOCK_TIME_NONE;
aggpad->priv->tail_time = GST_CLOCK_TIME_NONE;
aggpad->priv->time_level = 0;
+ aggpad->priv->first_buffer = TRUE;
+}
+
+static gboolean
+gst_aggregator_pad_flush (GstAggregatorPad * aggpad, GstAggregator * agg)
+{
+ GstAggregatorPadClass *klass = GST_AGGREGATOR_PAD_GET_CLASS (aggpad);
+
+ PAD_LOCK (aggpad);
+ gst_aggregator_pad_reset_unlocked (aggpad);
PAD_UNLOCK (aggpad);
if (klass->flush)
struct _GstAggregatorPrivate
{
- gint padcount;
+ gint max_padserial;
/* Our state is >= PAUSED */
gboolean running; /* protected by src_lock */
gboolean peer_latency_live; /* protected by src_lock */
GstClockTime peer_latency_min; /* protected by src_lock */
GstClockTime peer_latency_max; /* protected by src_lock */
- gboolean has_peer_latency;
+ gboolean has_peer_latency; /* protected by src_lock */
GstClockTime sub_latency_min; /* protected by src_lock */
GstClockTime sub_latency_max; /* protected by src_lock */
GMutex src_lock;
GCond src_cond;
- gboolean first_buffer;
+ gboolean first_buffer; /* protected by object lock */
GstAggregatorStartTimeSelection start_time_selection;
GstClockTime start_time;
GstEvent *event;
gboolean result;
gboolean flush;
+ gboolean only_to_active_pads;
gboolean one_actually_seeked;
} EventData;
{
GstAggregatorPad *pad;
GList *l, *sinkpads;
+ gboolean have_buffer = TRUE;
+ gboolean have_event = FALSE;
GST_LOG_OBJECT (self, "checking pads");
PAD_LOCK (pad);
- /* In live mode, having a single pad with buffers is enough to
- * generate a start time from it. In non-live mode all pads need
- * to have a buffer
- */
- if (self->priv->peer_latency_live &&
- !gst_aggregator_pad_queue_is_empty (pad))
- self->priv->first_buffer = FALSE;
+ if (pad->priv->num_buffers == 0) {
+ if (!gst_aggregator_pad_queue_is_empty (pad))
+ have_event = TRUE;
+ if (!pad->priv->eos) {
+ have_buffer = FALSE;
- if (gst_aggregator_pad_queue_is_empty (pad) && !pad->priv->eos) {
- PAD_UNLOCK (pad);
- goto pad_not_ready;
+ /* If not live we need data on all pads, so leave the loop */
+ if (!self->priv->peer_latency_live) {
+ PAD_UNLOCK (pad);
+ goto pad_not_ready;
+ }
+ }
+ } else if (self->priv->peer_latency_live) {
+ /* In live mode, having a single pad with buffers is enough to
+ * generate a start time from it. In non-live mode all pads need
+ * to have a buffer
+ */
+ self->priv->first_buffer = FALSE;
}
- PAD_UNLOCK (pad);
+ PAD_UNLOCK (pad);
}
- self->priv->first_buffer = FALSE;
+ if (!have_buffer && !have_event)
+ goto pad_not_ready;
+
+ if (have_buffer)
+ self->priv->first_buffer = FALSE;
GST_OBJECT_UNLOCK (self);
GST_LOG_OBJECT (self, "pads are ready");
}
pad_not_ready:
{
- GST_LOG_OBJECT (pad, "pad not ready to be aggregated yet");
+ if (have_event)
+ GST_LOG_OBJECT (pad, "pad not ready to be aggregated yet,"
+ " but waking up for serialized event");
+ else
+ GST_LOG_OBJECT (pad, "pad not ready to be aggregated yet");
GST_OBJECT_UNLOCK (self);
- return FALSE;
+ return have_event;
}
}
* and if a pad does not have a buffer in time we ignore
* that pad.
*/
+ GST_OBJECT_LOCK (self);
if (!GST_CLOCK_TIME_IS_VALID (latency) ||
!GST_IS_CLOCK (GST_ELEMENT_CLOCK (self)) ||
!GST_CLOCK_TIME_IS_VALID (start) ||
* then check if we're ready now. If we return FALSE,
* we will be directly called again.
*/
+ GST_OBJECT_UNLOCK (self);
SRC_WAIT (self);
} else {
GstClockTime base_time, time;
GST_DEBUG_OBJECT (self, "got subclass start time: %" GST_TIME_FORMAT,
GST_TIME_ARGS (start));
- GST_OBJECT_LOCK (self);
base_time = GST_ELEMENT_CAST (self)->base_time;
- clock = GST_ELEMENT_CLOCK (self);
- if (clock)
- gst_object_ref (clock);
+ clock = gst_object_ref (GST_ELEMENT_CLOCK (self));
GST_OBJECT_UNLOCK (self);
time = base_time + start;
GST_TIME_FORMAT " (base %" GST_TIME_FORMAT " start %" GST_TIME_FORMAT
" latency %" GST_TIME_FORMAT " current %" GST_TIME_FORMAT ")",
GST_TIME_ARGS (time),
- GST_TIME_ARGS (GST_ELEMENT_CAST (self)->base_time),
+ GST_TIME_ARGS (base_time),
GST_TIME_ARGS (start), GST_TIME_ARGS (latency),
GST_TIME_ARGS (gst_clock_get_time (clock)));
}
GST_DEBUG_OBJECT (self,
- "clock returned %d (jitter: %s%" GST_TIME_FORMAT ")",
- status, (jitter < 0 ? "-" : " "),
- GST_TIME_ARGS ((jitter < 0 ? -jitter : jitter)));
+ "clock returned %d (jitter: %" GST_STIME_FORMAT ")",
+ status, GST_STIME_ARGS (jitter));
/* we timed out */
if (status == GST_CLOCK_OK || status == GST_CLOCK_EARLY) {
}
item = next;
}
+ aggpad->priv->num_buffers = 0;
PAD_BROADCAST_EVENT (aggpad);
PAD_UNLOCK (aggpad);
GstAggregatorClass *klass;
gboolean result;
- self->priv->running = TRUE;
self->priv->send_stream_start = TRUE;
self->priv->send_segment = TRUE;
self->priv->send_eos = TRUE;
GST_FORMAT_TIME, aggpad->priv->head_position);
else
aggpad->priv->head_time = GST_CLOCK_TIME_NONE;
+
+ if (!GST_CLOCK_TIME_IS_VALID (aggpad->priv->tail_time))
+ aggpad->priv->tail_time = aggpad->priv->head_time;
} else {
if (GST_CLOCK_TIME_IS_VALID (aggpad->priv->tail_position) &&
aggpad->segment.format == GST_FORMAT_TIME)
SRC_UNLOCK (self);
}
-static GstPad *
-gst_aggregator_request_new_pad (GstElement * element,
+static GstAggregatorPad *
+gst_aggregator_default_create_new_pad (GstAggregator * self,
GstPadTemplate * templ, const gchar * req_name, const GstCaps * caps)
{
- GstAggregator *self;
GstAggregatorPad *agg_pad;
+ GstAggregatorPrivate *priv = self->priv;
+ gint serial = 0;
+ gchar *name = NULL;
- GstElementClass *klass = GST_ELEMENT_GET_CLASS (element);
- GstAggregatorPrivate *priv = GST_AGGREGATOR (element)->priv;
+ if (templ->direction != GST_PAD_SINK ||
+ g_strcmp0 (templ->name_template, "sink_%u") != 0)
+ goto not_sink;
- self = GST_AGGREGATOR (element);
+ GST_OBJECT_LOCK (self);
+ if (req_name == NULL || strlen (req_name) < 6
+ || !g_str_has_prefix (req_name, "sink_")) {
+ /* no name given when requesting the pad, use next available int */
+ serial = ++priv->max_padserial;
+ } else {
+ /* parse serial number from requested padname */
+ serial = g_ascii_strtoull (&req_name[5], NULL, 10);
+ if (serial > priv->max_padserial)
+ priv->max_padserial = serial;
+ }
- if (templ == gst_element_class_get_pad_template (klass, "sink_%u")) {
- gint serial = 0;
- gchar *name = NULL;
+ name = g_strdup_printf ("sink_%u", serial);
+ agg_pad = g_object_new (GST_AGGREGATOR_GET_CLASS (self)->sinkpads_type,
+ "name", name, "direction", GST_PAD_SINK, "template", templ, NULL);
+ g_free (name);
- GST_OBJECT_LOCK (element);
- if (req_name == NULL || strlen (req_name) < 6
- || !g_str_has_prefix (req_name, "sink_")) {
- /* no name given when requesting the pad, use next available int */
- priv->padcount++;
- } else {
- /* parse serial number from requested padname */
- serial = g_ascii_strtoull (&req_name[5], NULL, 10);
- if (serial >= priv->padcount)
- priv->padcount = serial;
- }
+ GST_OBJECT_UNLOCK (self);
- name = g_strdup_printf ("sink_%u", priv->padcount);
- agg_pad = g_object_new (GST_AGGREGATOR_GET_CLASS (self)->sinkpads_type,
- "name", name, "direction", GST_PAD_SINK, "template", templ, NULL);
- g_free (name);
+ return agg_pad;
- GST_OBJECT_UNLOCK (element);
+ /* errors */
+not_sink:
+ {
+ GST_WARNING_OBJECT (self, "request new pad that is not a SINK pad\n");
+ return NULL;
+ }
+}
- } else {
+static GstPad *
+gst_aggregator_request_new_pad (GstElement * element,
+ GstPadTemplate * templ, const gchar * req_name, const GstCaps * caps)
+{
+ GstAggregator *self;
+ GstAggregatorPad *agg_pad;
+ GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (element);
+ GstAggregatorPrivate *priv = GST_AGGREGATOR (element)->priv;
+
+ self = GST_AGGREGATOR (element);
+
+ agg_pad = klass->create_new_pad (self, templ, req_name, caps);
+ if (!agg_pad) {
+ GST_ERROR_OBJECT (element, "Couldn't create new pad");
return NULL;
}
else
max = GST_CLOCK_TIME_NONE;
- if (live && min > max) {
- GST_ELEMENT_WARNING (self, CORE, NEGOTIATION,
- ("%s", "Latency too big"),
- ("The requested latency value is too big for the current pipeline. "
- "Limiting to %" G_GINT64_FORMAT, max));
- min = max;
- /* FIXME: This could in theory become negative, but in
- * that case all is lost anyway */
- self->priv->latency -= min - max;
- /* FIXME: shouldn't we g_object_notify() the change here? */
- }
-
SRC_BROADCAST (self);
GST_DEBUG_OBJECT (self, "configured latency live:%s min:%" G_GINT64_FORMAT
GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad);
if (peer) {
- ret = gst_pad_send_event (peer, gst_event_ref (evdata->event));
- GST_DEBUG_OBJECT (pad, "return of event push is %d", ret);
- gst_object_unref (peer);
+ if (evdata->only_to_active_pads && aggpad->priv->first_buffer) {
+ GST_DEBUG_OBJECT (pad, "not sending event to inactive pad");
+ ret = TRUE;
+ } else {
+ ret = gst_pad_send_event (peer, gst_event_ref (evdata->event));
+ GST_DEBUG_OBJECT (pad, "return of event push is %d", ret);
+ gst_object_unref (peer);
+ }
}
if (ret == FALSE) {
- if (GST_EVENT_TYPE (evdata->event) == GST_EVENT_SEEK)
- GST_ERROR_OBJECT (pad, "Event %" GST_PTR_FORMAT " failed", evdata->event);
-
if (GST_EVENT_TYPE (evdata->event) == GST_EVENT_SEEK) {
GstQuery *seeking = gst_query_new_seeking (GST_FORMAT_TIME);
+ GST_DEBUG_OBJECT (pad, "Event %" GST_PTR_FORMAT " failed", evdata->event);
+
if (gst_pad_query (peer, seeking)) {
gboolean seekable;
static EventData
gst_aggregator_forward_event_to_all_sinkpads (GstAggregator * self,
- GstEvent * event, gboolean flush)
+ GstEvent * event, gboolean flush, gboolean only_to_active_pads)
{
EventData evdata;
evdata.result = TRUE;
evdata.flush = flush;
evdata.one_actually_seeked = FALSE;
+ evdata.only_to_active_pads = only_to_active_pads;
/* We first need to set all pads as flushing in a first pass
* as flush_start flush_stop is sometimes sent synchronously
GST_OBJECT_UNLOCK (self);
/* forward the seek upstream */
- evdata = gst_aggregator_forward_event_to_all_sinkpads (self, event, flush);
+ evdata =
+ gst_aggregator_forward_event_to_all_sinkpads (self, event, flush, FALSE);
event = NULL;
if (!evdata.result || !evdata.one_actually_seeked) {
}
}
- evdata = gst_aggregator_forward_event_to_all_sinkpads (self, event, FALSE);
+ /* Don't forward QOS events to pads that had no active buffer yet. Otherwise
+ * they will receive a QOS event that has earliest_time=0 (because we can't
+ * have negative timestamps), and consider their buffer as too late */
+ evdata =
+ gst_aggregator_forward_event_to_all_sinkpads (self, event, FALSE,
+ GST_EVENT_TYPE (event) == GST_EVENT_QOS);
res = evdata.result;
done:
* Gets the latency value. See gst_aggregator_set_latency for
* more details.
*
- * Returns: The time in nanoseconds to wait for data to arrive on a sink pad
+ * Returns: The time in nanoseconds to wait for data to arrive on a sink pad
* before a pad is deemed unresponsive. A value of -1 means an
* unlimited time.
*/
klass->src_event = gst_aggregator_default_src_event;
klass->src_query = gst_aggregator_default_src_query;
+ klass->create_new_pad = gst_aggregator_default_create_new_pad;
+
gstelement_class->request_new_pad =
GST_DEBUG_FUNCPTR (gst_aggregator_request_new_pad);
gstelement_class->send_event = GST_DEBUG_FUNCPTR (gst_aggregator_send_event);
gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src");
g_return_if_fail (pad_template != NULL);
- priv->padcount = -1;
+ priv->max_padserial = -1;
priv->tags_changed = FALSE;
self->priv->peer_latency_live = FALSE;
return type;
}
-/* Must be called with PAD lock held */
+/* Must be called with SRC lock and PAD lock held */
static gboolean
gst_aggregator_pad_has_space (GstAggregator * self, GstAggregatorPad * aggpad)
{
if (g_queue_get_length (&aggpad->priv->buffers) == 0)
return TRUE;
+ /* We also want at least two buffers, one is being processed and one is ready
+ * for the next iteration when we operate in live mode. */
+ if (self->priv->peer_latency_live && aggpad->priv->num_buffers < 2)
+ return TRUE;
+
/* zero latency, if there is a buffer, it's full */
if (self->priv->latency == 0)
return FALSE;
gst_aggregator_pad_chain_internal (GstAggregator * self,
GstAggregatorPad * aggpad, GstBuffer * buffer, gboolean head)
{
- GstBuffer *actual_buf = buffer;
GstAggregatorClass *aggclass = GST_AGGREGATOR_GET_CLASS (self);
GstFlowReturn flow_return;
GstClockTime buf_pts;
if (aggpad->priv->pending_eos == TRUE)
goto eos;
- flow_return = aggpad->priv->flow_return;
- if (flow_return != GST_FLOW_OK)
- goto flushing;
-
PAD_UNLOCK (aggpad);
if (aggclass->clip && head) {
- aggclass->clip (self, aggpad, buffer, &actual_buf);
+ buffer = aggclass->clip (self, aggpad, buffer);
}
- if (actual_buf == NULL) {
- GST_LOG_OBJECT (actual_buf, "Buffer dropped by clip function");
+ if (buffer == NULL) {
+ GST_LOG_OBJECT (aggpad, "Buffer dropped by clip function");
goto done;
}
- buf_pts = GST_BUFFER_PTS (actual_buf);
+ buf_pts = GST_BUFFER_PTS (buffer);
+
+ aggpad->priv->first_buffer = FALSE;
for (;;) {
SRC_LOCK (self);
+ GST_OBJECT_LOCK (self);
PAD_LOCK (aggpad);
if (gst_aggregator_pad_has_space (self, aggpad)
&& aggpad->priv->flow_return == GST_FLOW_OK) {
if (head)
- g_queue_push_head (&aggpad->priv->buffers, actual_buf);
+ g_queue_push_head (&aggpad->priv->buffers, buffer);
else
- g_queue_push_tail (&aggpad->priv->buffers, actual_buf);
- apply_buffer (aggpad, actual_buf, head);
- actual_buf = buffer = NULL;
+ g_queue_push_tail (&aggpad->priv->buffers, buffer);
+ apply_buffer (aggpad, buffer, head);
+ aggpad->priv->num_buffers++;
+ buffer = NULL;
SRC_BROADCAST (self);
break;
}
flow_return = aggpad->priv->flow_return;
if (flow_return != GST_FLOW_OK) {
+ GST_OBJECT_UNLOCK (self);
SRC_UNLOCK (self);
goto flushing;
}
GST_DEBUG_OBJECT (aggpad, "Waiting for buffer to be consumed");
+ GST_OBJECT_UNLOCK (self);
SRC_UNLOCK (self);
PAD_WAIT_EVENT (aggpad);
start_time = 0;
break;
case GST_AGGREGATOR_START_TIME_SELECTION_FIRST:
+ GST_OBJECT_LOCK (aggpad);
if (aggpad->segment.format == GST_FORMAT_TIME) {
start_time = buf_pts;
if (start_time != -1) {
"as the segment is a %s segment instead of a time segment",
gst_format_get_name (aggpad->segment.format));
}
+ GST_OBJECT_UNLOCK (aggpad);
break;
case GST_AGGREGATOR_START_TIME_SELECTION_SET:
start_time = self->priv->start_time;
self->segment.position = start_time;
else
self->segment.position = MIN (start_time, self->segment.position);
- self->segment.start = MIN (start_time, self->segment.start);
- self->segment.time = MIN (start_time, self->segment.time);
GST_DEBUG_OBJECT (self, "Selecting start time %" GST_TIME_FORMAT,
GST_TIME_ARGS (start_time));
}
PAD_UNLOCK (aggpad);
+ GST_OBJECT_UNLOCK (self);
SRC_UNLOCK (self);
done:
return FALSE;
}
-static gboolean
+static GstFlowReturn
gst_aggregator_pad_event_func (GstPad * pad, GstObject * parent,
GstEvent * event)
{
+ GstFlowReturn ret = GST_FLOW_OK;
GstAggregator *self = GST_AGGREGATOR (parent);
GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad);
GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (parent);
PAD_LOCK (aggpad);
if (aggpad->priv->flow_return != GST_FLOW_OK
- && GST_EVENT_TYPE (event) != GST_EVENT_FLUSH_STOP)
+ && GST_EVENT_TYPE (event) != GST_EVENT_FLUSH_STOP) {
+ ret = aggpad->priv->flow_return;
goto flushing;
+ }
if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
GST_OBJECT_LOCK (aggpad);
SRC_UNLOCK (self);
}
- if (event)
- return klass->sink_event (self, aggpad, event);
- else
- return TRUE;
+ if (event) {
+ gboolean is_caps = (GST_EVENT_TYPE (event) == GST_EVENT_CAPS);
+
+ if (!klass->sink_event (self, aggpad, event)) {
+ /* Copied from GstPad to convert boolean to a GstFlowReturn in
+ * the event handling func */
+ ret = is_caps ? GST_FLOW_NOT_NEGOTIATED : GST_FLOW_ERROR;
+ }
+ }
+
+ return ret;
flushing:
GST_DEBUG_OBJECT (aggpad, "Pad is %s, dropping event",
if (GST_EVENT_IS_STICKY (event))
gst_pad_store_sticky_event (pad, event);
gst_event_unref (event);
- return FALSE;
+
+ return ret;
}
static gboolean
gst_pad_set_chain_function (pad,
GST_DEBUG_FUNCPTR (gst_aggregator_pad_chain));
- gst_pad_set_event_function (pad,
- GST_DEBUG_FUNCPTR (gst_aggregator_pad_event_func));
+ gst_pad_set_event_full_function_full (pad,
+ GST_DEBUG_FUNCPTR (gst_aggregator_pad_event_func), NULL, NULL);
gst_pad_set_query_function (pad,
GST_DEBUG_FUNCPTR (gst_aggregator_pad_query_func));
gst_pad_set_activatemode_function (pad,
g_mutex_init (&pad->priv->flush_lock);
g_mutex_init (&pad->priv->lock);
+
+ gst_aggregator_pad_reset_unlocked (pad);
}
/**
if (buffer) {
apply_buffer (pad, buffer, FALSE);
+ pad->priv->num_buffers--;
GST_TRACE_OBJECT (pad, "Consuming buffer");
if (gst_aggregator_pad_queue_is_empty (pad) && pad->priv->pending_eos) {
pad->priv->pending_eos = FALSE;