* * When data is queued on all pads, the aggregate vmethod is called.
*
* * One can peek at the data on any given GstAggregatorPad with the
- * gst_aggregator_pad_get_buffer () method, and take ownership of it
- * with the gst_aggregator_pad_steal_buffer () method. When a buffer
- * has been taken with steal_buffer (), a new buffer can be queued
+ * gst_aggregator_pad_peek_buffer () method, and remove it from the pad
+ * with the gst_aggregator_pad_pop_buffer () method. When a buffer
+ * has been taken with pop_buffer (), a new buffer can be queued
* on that pad.
*
* * If the subclass wishes to push a buffer downstream in its aggregate
}
/* Might become API */
+#if 0
static void gst_aggregator_merge_tags (GstAggregator * aggregator,
const GstTagList * tags, GstTagMergeMode mode);
+#endif
static void gst_aggregator_set_latency_property (GstAggregator * agg,
GstClockTime latency);
static GstClockTime gst_aggregator_get_latency_property (GstAggregator * agg);
static GstClockTime gst_aggregator_get_latency_unlocked (GstAggregator * self);
+static void gst_aggregator_pad_buffer_consumed (GstAggregatorPad * pad);
+
GST_DEBUG_CATEGORY_STATIC (aggregator_debug);
#define GST_CAT_DEFAULT aggregator_debug
static gboolean
gst_aggregator_check_pads_ready (GstAggregator * self)
{
- GstAggregatorPad *pad;
+ GstAggregatorPad *pad = NULL;
GList *l, *sinkpads;
gboolean have_buffer = TRUE;
gboolean have_event_or_query = FALSE;
GST_PAD_STREAM_UNLOCK (self->srcpad);
}
-/**
- * gst_aggregator_finish_buffer:
- * @self: The #GstAggregator
- * @buffer: (transfer full): the #GstBuffer to push.
- *
- * This method will push the provided output buffer downstream. If needed,
- * mandatory events such as stream-start, caps, and segment events will be
- * sent before pushing the buffer.
- */
-GstFlowReturn
-gst_aggregator_finish_buffer (GstAggregator * self, GstBuffer * buffer)
+static GstFlowReturn
+gst_aggregator_default_finish_buffer (GstAggregator * self, GstBuffer * buffer)
{
gst_aggregator_push_mandatory_events (self);
}
}
+/**
+ * gst_aggregator_finish_buffer:
+ * @aggregator: The #GstAggregator
+ * @buffer: (transfer full): the #GstBuffer to push.
+ *
+ * This method will push the provided output buffer downstream. If needed,
+ * mandatory events such as stream-start, caps, and segment events will be
+ * sent before pushing the buffer.
+ */
+GstFlowReturn
+gst_aggregator_finish_buffer (GstAggregator * aggregator, GstBuffer * buffer)
+{
+ GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (aggregator);
+
+ g_assert (klass->finish_buffer != NULL);
+
+ return klass->finish_buffer (aggregator, buffer);
+}
+
static void
gst_aggregator_push_eos (GstAggregator * self)
{
return TRUE;
}
+static gboolean
+gst_aggregator_pad_skip_buffers (GstElement * self, GstPad * epad,
+ gpointer user_data)
+{
+ GList *item;
+ GstAggregatorPad *aggpad = (GstAggregatorPad *) epad;
+ GstAggregator *agg = (GstAggregator *) self;
+ GstAggregatorPadClass *klass = GST_AGGREGATOR_PAD_GET_CLASS (aggpad);
+
+ if (!klass->skip_buffer)
+ return FALSE;
+
+ PAD_LOCK (aggpad);
+
+ item = g_queue_peek_head_link (&aggpad->priv->data);
+ while (item) {
+ GList *next = item->next;
+
+ if (GST_IS_BUFFER (item->data)
+ && klass->skip_buffer (aggpad, agg, item->data)) {
+ GST_LOG_OBJECT (aggpad, "Skipping %" GST_PTR_FORMAT, item->data);
+ gst_aggregator_pad_buffer_consumed (aggpad);
+ gst_buffer_unref (item->data);
+ g_queue_delete_link (&aggpad->priv->data, item);
+ } else {
+ break;
+ }
+
+ item = next;
+ }
+
+ PAD_UNLOCK (aggpad);
+
+ return TRUE;
+}
+
static void
gst_aggregator_pad_set_flushing (GstAggregatorPad * aggpad,
GstFlowReturn flow_return, gboolean full)
gst_element_foreach_sink_pad (GST_ELEMENT_CAST (self),
gst_aggregator_do_events_and_queries, NULL);
+ if (self->priv->peer_latency_live)
+ gst_element_foreach_sink_pad (GST_ELEMENT_CAST (self),
+ gst_aggregator_pad_skip_buffers, NULL);
+
/* Ensure we have buffers ready (either in clipped_buffer or at the head of
* the queue */
if (!gst_aggregator_wait_and_check (self, &timeout))
goto eat;
}
case GST_EVENT_TAG:
- {
- GstTagList *tags;
-
- gst_event_parse_tag (event, &tags);
-
- if (gst_tag_list_get_scope (tags) == GST_TAG_SCOPE_STREAM) {
- gst_aggregator_merge_tags (self, tags, GST_TAG_MERGE_REPLACE);
- gst_event_unref (event);
- event = NULL;
- goto eat;
- }
- break;
- }
+ goto eat;
default:
{
break;
GST_DEBUG_CATEGORY_INIT (aggregator_debug, "aggregator",
GST_DEBUG_FG_MAGENTA, "GstAggregator");
+ klass->finish_buffer = gst_aggregator_default_finish_buffer;
+
klass->sink_event = gst_aggregator_default_sink_event;
klass->sink_query = gst_aggregator_default_sink_query;
"Start time to use if start-time-selection=set", 0,
G_MAXUINT64,
DEFAULT_START_TIME, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
-
- GST_DEBUG_REGISTER_FUNCPTR (gst_aggregator_do_events_and_queries);
}
static void
GstFlowReturn flow_return;
GstClockTime buf_pts;
- GST_DEBUG_OBJECT (aggpad, "Start chaining a buffer %" GST_PTR_FORMAT, buffer);
-
PAD_LOCK (aggpad);
flow_return = aggpad->priv->flow_return;
if (flow_return != GST_FLOW_OK)
}
/**
- * gst_aggregator_pad_steal_buffer:
+ * gst_aggregator_pad_pop_buffer:
* @pad: the pad to get buffer from
*
* Steal the ref to the buffer currently queued in @pad.
* queued. You should unref the buffer after usage.
*/
GstBuffer *
-gst_aggregator_pad_steal_buffer (GstAggregatorPad * pad)
+gst_aggregator_pad_pop_buffer (GstAggregatorPad * pad)
{
GstBuffer *buffer;
{
GstBuffer *buf;
- buf = gst_aggregator_pad_steal_buffer (pad);
+ buf = gst_aggregator_pad_pop_buffer (pad);
if (buf == NULL)
return FALSE;
}
/**
- * gst_aggregator_pad_get_buffer:
+ * gst_aggregator_pad_peek_buffer:
* @pad: the pad to get buffer from
*
* Returns: (transfer full): A reference to the buffer in @pad or
* usage.
*/
GstBuffer *
-gst_aggregator_pad_get_buffer (GstAggregatorPad * pad)
+gst_aggregator_pad_peek_buffer (GstAggregatorPad * pad)
{
GstBuffer *buffer;
return is_eos;
}
-/**
+#if 0
+/*
* gst_aggregator_merge_tags:
* @self: a #GstAggregator
* @tags: a #GstTagList to merge
self->priv->tags_changed = TRUE;
GST_OBJECT_UNLOCK (self);
}
+#endif
/**
* gst_aggregator_set_latency: