-/* GStreamer
+/* GStreamer aggregator base class
* Copyright (C) 2014 Mathieu Duponchelle <mathieu.duponchelle@opencreed.com>
* Copyright (C) 2014 Thibault Saunier <tsaunier@gnome.org>
*
* Control is given to the subclass when all pads have data.
* <itemizedlist>
* <listitem><para>
- * Base class for mixers and muxers. Implementers should at least implement
- * the aggregate () vmethod.
+ * Base class for mixers and muxers. Subclasses should at least implement
+ * the #GstAggregatorClass.aggregate() virtual method.
* </para></listitem>
* <listitem><para>
* When data is queued on all pads, tha aggregate vmethod is called.
#define GST_CAT_DEFAULT aggregator_debug
/* GstAggregatorPad definitions */
-#define PAD_LOCK_EVENT(pad) G_STMT_START { \
- GST_LOG_OBJECT (pad, "Taking EVENT lock from thread %p", \
+#define PAD_LOCK(pad) G_STMT_START { \
+ GST_TRACE_OBJECT (pad, "Taking PAD lock from thread %p", \
g_thread_self()); \
- g_mutex_lock(&pad->priv->event_lock); \
- GST_LOG_OBJECT (pad, "Took EVENT lock from thread %p", \
+ g_mutex_lock(&pad->priv->lock); \
+ GST_TRACE_OBJECT (pad, "Took PAD lock from thread %p", \
g_thread_self()); \
} G_STMT_END
-#define PAD_UNLOCK_EVENT(pad) G_STMT_START { \
- GST_LOG_OBJECT (pad, "Releasing EVENT lock from thread %p", \
+#define PAD_UNLOCK(pad) G_STMT_START { \
+ GST_TRACE_OBJECT (pad, "Releasing PAD lock from thread %p", \
g_thread_self()); \
- g_mutex_unlock(&pad->priv->event_lock); \
- GST_LOG_OBJECT (pad, "Release EVENT lock from thread %p", \
+ g_mutex_unlock(&pad->priv->lock); \
+ GST_TRACE_OBJECT (pad, "Release PAD lock from thread %p", \
g_thread_self()); \
} G_STMT_END
#define PAD_WAIT_EVENT(pad) G_STMT_START { \
- GST_LOG_OBJECT (pad, "Waiting for EVENT on thread %p", \
+ GST_LOG_OBJECT (pad, "Waiting for EVENT on thread %p", \
g_thread_self()); \
- g_cond_wait(&(((GstAggregatorPad* )pad)->priv->event_cond), \
- &(pad->priv->event_lock)); \
- GST_LOG_OBJECT (pad, "DONE Waiting for EVENT on thread %p", \
+ g_cond_wait(&(((GstAggregatorPad* )pad)->priv->event_cond), \
+ (&((GstAggregatorPad*)pad)->priv->lock)); \
+ GST_LOG_OBJECT (pad, "DONE Waiting for EVENT on thread %p", \
g_thread_self()); \
} G_STMT_END
-#define PAD_BROADCAST_EVENT(pad) { \
+#define PAD_BROADCAST_EVENT(pad) G_STMT_START { \
GST_LOG_OBJECT (pad, "Signaling EVENT from thread %p", \
- g_thread_self()); \
- g_cond_broadcast(&(((GstAggregatorPad* )pad)->priv->event_cond)); \
- }
-
-#define GST_AGGREGATOR_SETCAPS_LOCK(self) G_STMT_START { \
- GST_LOG_OBJECT (self, "Taking SETCAPS lock from thread %p", \
- g_thread_self()); \
- g_mutex_lock(&self->priv->setcaps_lock); \
- GST_LOG_OBJECT (self, "Took SETCAPS lock from thread %p", \
- g_thread_self()); \
+ g_thread_self()); \
+ g_cond_broadcast(&(((GstAggregatorPad* )pad)->priv->event_cond)); \
} G_STMT_END
-#define GST_AGGREGATOR_SETCAPS_UNLOCK(self) G_STMT_START { \
- GST_LOG_OBJECT (self, "Releasing SETCAPS lock from thread %p", \
- g_thread_self()); \
- g_mutex_unlock(&self->priv->setcaps_lock); \
- GST_LOG_OBJECT (self, "Took SETCAPS lock from thread %p", \
- g_thread_self()); \
- } G_STMT_END
-#define PAD_STREAM_LOCK(pad) G_STMT_START { \
- GST_LOG_OBJECT (pad, "Taking lock from thread %p", \
+#define PAD_STREAM_LOCK(pad) G_STMT_START { \
+ GST_TRACE_OBJECT (pad, "Taking lock from thread %p", \
g_thread_self()); \
- g_mutex_lock(&pad->priv->stream_lock); \
- GST_LOG_OBJECT (pad, "Took lock from thread %p", \
+ g_mutex_lock(&pad->priv->stream_lock); \
+ GST_TRACE_OBJECT (pad, "Took lock from thread %p", \
g_thread_self()); \
} G_STMT_END
-#define PAD_STREAM_UNLOCK(pad) G_STMT_START { \
- GST_LOG_OBJECT (pad, "Releasing lock from thread %p", \
+#define PAD_STREAM_UNLOCK(pad) G_STMT_START { \
+ GST_TRACE_OBJECT (pad, "Releasing lock from thread %p", \
g_thread_self()); \
- g_mutex_unlock(&pad->priv->stream_lock); \
- GST_LOG_OBJECT (pad, "Release lock from thread %p", \
+ g_mutex_unlock(&pad->priv->stream_lock); \
+ GST_TRACE_OBJECT (pad, "Release lock from thread %p", \
g_thread_self()); \
} G_STMT_END
#define SRC_STREAM_LOCK(self) G_STMT_START { \
- GST_LOG_OBJECT (self, "Taking src STREAM lock from thread %p", \
+ GST_TRACE_OBJECT (self, "Taking src STREAM lock from thread %p", \
g_thread_self()); \
g_mutex_lock(&self->priv->src_lock); \
- GST_LOG_OBJECT (self, "Took src STREAM lock from thread %p", \
+ GST_TRACE_OBJECT (self, "Took src STREAM lock from thread %p", \
g_thread_self()); \
} G_STMT_END
#define SRC_STREAM_UNLOCK(self) G_STMT_START { \
- GST_LOG_OBJECT (self, "Releasing src STREAM lock from thread %p", \
+ GST_TRACE_OBJECT (self, "Releasing src STREAM lock from thread %p", \
g_thread_self()); \
g_mutex_unlock(&self->priv->src_lock); \
- GST_LOG_OBJECT (self, "Release src STREAM lock from thread %p", \
+ GST_TRACE_OBJECT (self, "Released src STREAM lock from thread %p", \
g_thread_self()); \
} G_STMT_END
g_thread_self()); \
} G_STMT_END
-#define SRC_STREAM_BROADCAST(self) G_STMT_START { \
- GST_LOG_OBJECT (self, "Signaling src STREAM from thread %p", \
+#define SRC_STREAM_BROADCAST(self) G_STMT_START { \
+ GST_LOG_OBJECT (self, "Signaling src STREAM from thread %p", \
g_thread_self()); \
- g_cond_broadcast(&(self->priv->src_cond)); \
- } G_STMT_END
-
-#define KICK_SRC_THREAD(self) G_STMT_START { \
- SRC_STREAM_LOCK (self); \
- GST_LOG_OBJECT (self, "kicking src STREAM from thread %p", \
- g_thread_self ()); \
if (self->priv->aggregate_id) \
gst_clock_id_unschedule (self->priv->aggregate_id); \
- self->priv->n_kicks++; \
- SRC_STREAM_BROADCAST (self); \
- SRC_STREAM_UNLOCK (self); \
+ g_cond_broadcast(&(self->priv->src_cond)); \
} G_STMT_END
struct _GstAggregatorPadPrivate
{
+ /* To always be used atomically */
+ gboolean flushing;
+
+ /* Following fields are protected by the PAD_LOCK */
gboolean pending_flush_start;
gboolean pending_flush_stop;
gboolean pending_eos;
- gboolean flushing;
- GMutex event_lock;
- GCond event_cond;
+ GstBuffer *buffer;
+ gboolean eos;
+ GMutex lock;
+ GCond event_cond;
GMutex stream_lock;
};
static gboolean
-_aggpad_flush (GstAggregatorPad * aggpad, GstAggregator * agg)
+gst_aggregator_pad_flush (GstAggregatorPad * aggpad, GstAggregator * agg)
{
GstAggregatorPadClass *klass = GST_AGGREGATOR_PAD_GET_CLASS (aggpad);
- aggpad->eos = FALSE;
+ PAD_LOCK (aggpad);
+ aggpad->priv->eos = FALSE;
aggpad->priv->flushing = FALSE;
+ PAD_UNLOCK (aggpad);
if (klass->flush)
return klass->flush (aggpad, agg);
*************************************/
static GstElementClass *aggregator_parent_class = NULL;
+/* All members are protected by the object lock unless otherwise noted */
+
struct _GstAggregatorPrivate
{
gint padcount;
/* Our state is >= PAUSED */
- gboolean running;
-
+ gboolean running; /* protected by SRC_STREAM_LOCK */
gint seqnum;
- gboolean send_stream_start;
+ gboolean send_stream_start; /* protected by srcpad stream lock */
gboolean send_segment;
gboolean flush_seeking;
gboolean pending_flush_start;
- gboolean send_eos;
+ gboolean send_eos; /* protected by srcpad stream lock */
GstFlowReturn flow_return;
- GstCaps *srccaps;
+ GstCaps *srccaps; /* protected by the srcpad stream lock */
GstTagList *tags;
gboolean tags_changed;
- /* Lock to prevent two src setcaps from happening at the same time */
- GMutex setcaps_lock;
-
gboolean latency_live;
GstClockTime latency_min;
GstClockTime latency_max;
+ GstClockTime sub_latency_min;
+ GstClockTime sub_latency_max;
+
/* aggregate */
- GstClockID aggregate_id;
- gint n_kicks;
+ GstClockID aggregate_id; /* protected by src_lock */
GMutex src_lock;
GCond src_cond;
+
+ /* properties */
+ gint64 latency;
};
typedef struct
gboolean one_actually_seeked;
} EventData;
-#define DEFAULT_LATENCY -1
+#define DEFAULT_LATENCY 0
enum
{
/**
* gst_aggregator_iterate_sinkpads:
* @self: The #GstAggregator
- * @func: The function to call.
- * @user_data: The data to pass to @func.
+ * @func: (scope call): The function to call.
+ * @user_data: (closure): The data to pass to @func.
*
* Iterate the sinkpads of aggregator to call a function on them.
*
switch (gst_iterator_next (iter, &item)) {
case GST_ITERATOR_OK:
{
- GstPad *pad;
+ GstAggregatorPad *pad;
pad = g_value_get_object (&item);
break;
}
- GST_LOG_OBJECT (self, "calling function on pad %s:%s",
- GST_DEBUG_PAD_NAME (pad));
+ GST_LOG_OBJECT (pad, "calling function %s on pad",
+ GST_DEBUG_FUNCPTR_NAME (func));
+
result = func (self, pad, user_data);
done = !result;
return result;
}
-static inline gboolean
-_check_all_pads_with_data_or_eos (GstAggregator * self,
- GstAggregatorPad * aggpad, gpointer user_data)
+static gboolean
+gst_aggregator_check_pads_ready (GstAggregator * self)
{
- if (aggpad->buffer || aggpad->eos) {
- return TRUE;
+ GstAggregatorPad *pad;
+ GList *l, *sinkpads;
+
+ GST_LOG_OBJECT (self, "checking pads");
+
+ GST_OBJECT_LOCK (self);
+
+ sinkpads = GST_ELEMENT_CAST (self)->sinkpads;
+ if (sinkpads == NULL)
+ goto no_sinkpads;
+
+ for (l = sinkpads; l != NULL; l = l->next) {
+ pad = l->data;
+
+ PAD_LOCK (pad);
+ if (pad->priv->buffer == NULL && !pad->priv->eos) {
+ PAD_UNLOCK (pad);
+ goto pad_not_ready;
+ }
+ PAD_UNLOCK (pad);
+
}
- GST_LOG_OBJECT (aggpad, "Not ready to be aggregated");
+ GST_OBJECT_UNLOCK (self);
+ GST_LOG_OBJECT (self, "pads are ready");
+ return TRUE;
- return FALSE;
+no_sinkpads:
+ {
+ GST_LOG_OBJECT (self, "pads not ready: no sink pads");
+ GST_OBJECT_UNLOCK (self);
+ return FALSE;
+ }
+pad_not_ready:
+ {
+ GST_LOG_OBJECT (pad, "pad not ready to be aggregated yet");
+ GST_OBJECT_UNLOCK (self);
+ return FALSE;
+ }
}
static void
-_reset_flow_values (GstAggregator * self)
+gst_aggregator_reset_flow_values (GstAggregator * self)
{
+ GST_OBJECT_LOCK (self);
self->priv->flow_return = GST_FLOW_FLUSHING;
self->priv->send_stream_start = TRUE;
self->priv->send_segment = TRUE;
gst_segment_init (&self->segment, GST_FORMAT_TIME);
+ GST_OBJECT_UNLOCK (self);
}
static inline void
-_push_mandatory_events (GstAggregator * self)
+gst_aggregator_push_mandatory_events (GstAggregator * self)
{
GstAggregatorPrivate *priv = self->priv;
+ GstEvent *segment = NULL;
+ GstEvent *tags = NULL;
- if (g_atomic_int_get (&self->priv->send_stream_start)) {
+ if (self->priv->send_stream_start) {
gchar s_id[32];
GST_INFO_OBJECT (self, "pushing stream start");
if (!gst_pad_push_event (self->srcpad, gst_event_new_stream_start (s_id))) {
GST_WARNING_OBJECT (self->srcpad, "Sending stream start event failed");
}
- g_atomic_int_set (&self->priv->send_stream_start, FALSE);
+ self->priv->send_stream_start = FALSE;
}
if (self->priv->srccaps) {
self->priv->srccaps = NULL;
}
- if (g_atomic_int_get (&self->priv->send_segment)) {
- if (!g_atomic_int_get (&self->priv->flush_seeking)) {
- GstEvent *segev = gst_event_new_segment (&self->segment);
+ GST_OBJECT_LOCK (self);
+ if (self->priv->send_segment && !self->priv->flush_seeking) {
+ segment = gst_event_new_segment (&self->segment);
- if (!self->priv->seqnum)
- self->priv->seqnum = gst_event_get_seqnum (segev);
- else
- gst_event_set_seqnum (segev, self->priv->seqnum);
+ if (!self->priv->seqnum)
+ self->priv->seqnum = gst_event_get_seqnum (segment);
+ else
+ gst_event_set_seqnum (segment, self->priv->seqnum);
+ self->priv->send_segment = FALSE;
- GST_DEBUG_OBJECT (self, "pushing segment %" GST_PTR_FORMAT, segev);
- gst_pad_push_event (self->srcpad, segev);
- g_atomic_int_set (&self->priv->send_segment, FALSE);
- }
+ GST_DEBUG_OBJECT (self, "pushing segment %" GST_PTR_FORMAT, segment);
}
if (priv->tags && priv->tags_changed) {
- gst_pad_push_event (self->srcpad,
- gst_event_new_tag (gst_tag_list_ref (priv->tags)));
+ tags = gst_event_new_tag (gst_tag_list_ref (priv->tags));
priv->tags_changed = FALSE;
}
+ GST_OBJECT_UNLOCK (self);
+
+ if (segment)
+ gst_pad_push_event (self->srcpad, segment);
+ if (tags)
+ gst_pad_push_event (self->srcpad, tags);
+
}
/**
void
gst_aggregator_set_src_caps (GstAggregator * self, GstCaps * caps)
{
- GST_AGGREGATOR_SETCAPS_LOCK (self);
+ GST_PAD_STREAM_LOCK (self->srcpad);
gst_caps_replace (&self->priv->srccaps, caps);
- _push_mandatory_events (self);
- GST_AGGREGATOR_SETCAPS_UNLOCK (self);
+ gst_aggregator_push_mandatory_events (self);
+ GST_PAD_STREAM_UNLOCK (self->srcpad);
}
/**
* gst_aggregator_finish_buffer:
* @self: The #GstAggregator
- * @buffer: the #GstBuffer to push.
+ * @buffer: (transfer full): the #GstBuffer to push.
*
- * This method will take care of sending mandatory events before pushing
- * the provided buffer.
+ * This method will push the provided output buffer downstream. If needed,
+ * mandatory events such as stream-start, caps, and segment events will be
+ * sent before pushing the buffer.
*/
GstFlowReturn
gst_aggregator_finish_buffer (GstAggregator * self, GstBuffer * buffer)
{
- _push_mandatory_events (self);
+ gst_aggregator_push_mandatory_events (self);
- if (!g_atomic_int_get (&self->priv->flush_seeking) &&
- gst_pad_is_active (self->srcpad)) {
+ GST_OBJECT_LOCK (self);
+ if (!self->priv->flush_seeking && gst_pad_is_active (self->srcpad)) {
GST_TRACE_OBJECT (self, "pushing buffer %" GST_PTR_FORMAT, buffer);
+ GST_OBJECT_UNLOCK (self);
return gst_pad_push (self->srcpad, buffer);
} else {
GST_INFO_OBJECT (self, "Not pushing (active: %i, flushing: %i)",
- g_atomic_int_get (&self->priv->flush_seeking),
- gst_pad_is_active (self->srcpad));
+ self->priv->flush_seeking, gst_pad_is_active (self->srcpad));
+ GST_OBJECT_UNLOCK (self);
gst_buffer_unref (buffer);
return GST_FLOW_OK;
}
}
static void
-_push_eos (GstAggregator * self)
+gst_aggregator_push_eos (GstAggregator * self)
{
GstEvent *event;
- _push_mandatory_events (self);
+ gst_aggregator_push_mandatory_events (self);
- self->priv->send_eos = FALSE;
event = gst_event_new_eos ();
+
+ GST_OBJECT_LOCK (self);
+ self->priv->send_eos = FALSE;
gst_event_set_seqnum (event, self->priv->seqnum);
+ GST_OBJECT_UNLOCK (self);
+
gst_pad_push_event (self->srcpad, event);
}
/* called with the src STREAM lock */
static gboolean
-_wait_and_check (GstAggregator * self, gboolean * timeout)
+gst_aggregator_wait_and_check (GstAggregator * self, gboolean * timeout)
{
GstClockTime latency_max, latency_min;
GstClockTime start;
- gboolean live;
+ gboolean live, res;
*timeout = FALSE;
- gst_aggregator_get_latency (self, &live, &latency_min, &latency_max);
+ SRC_STREAM_LOCK (self);
+
+ GST_OBJECT_LOCK (self);
+ gst_aggregator_get_latency_unlocked (self, &live, &latency_min, &latency_max);
+ GST_OBJECT_UNLOCK (self);
- if (gst_aggregator_iterate_sinkpads (self,
- (GstAggregatorPadForeachFunc) _check_all_pads_with_data_or_eos,
- NULL)) {
+ if (gst_aggregator_check_pads_ready (self)) {
GST_DEBUG_OBJECT (self, "all pads have data");
+ SRC_STREAM_UNLOCK (self);
+
return TRUE;
}
- SRC_STREAM_LOCK (self);
+ /* Before waiting, check if we're actually still running */
+ if (!self->priv->running || !self->priv->send_eos) {
+ SRC_STREAM_UNLOCK (self);
+
+ return FALSE;
+ }
+
start = gst_aggregator_get_next_time (self);
if (!live || !GST_IS_CLOCK (GST_ELEMENT_CLOCK (self))
|| !GST_CLOCK_TIME_IS_VALID (start)) {
- while (self->priv->n_kicks <= 0)
- SRC_STREAM_WAIT (self);
- self->priv->n_kicks--;
+ /* We wake up here when something happened, and below
+ * then check if we're ready now. If we return FALSE,
+ * we will be directly called again.
+ */
+ SRC_STREAM_WAIT (self);
} else {
GstClockTime base_time, time;
GstClock *clock;
GstClockReturn status;
+ GstClockTimeDiff jitter;
GST_DEBUG_OBJECT (self, "got subclass start time: %" GST_TIME_FORMAT,
GST_TIME_ARGS (start));
clock = GST_ELEMENT_CLOCK (self);
if (clock)
gst_object_ref (clock);
- GST_OBJECT_UNLOCK (self);
time = base_time + start;
-
- if (GST_CLOCK_TIME_IS_VALID (latency_max)) {
- time += latency_max;
- } else if (GST_CLOCK_TIME_IS_VALID (latency_min)) {
- time += latency_min;
- } else {
- time += self->latency;
- }
+ time += latency_min;
GST_DEBUG_OBJECT (self, "possibly waiting for clock to reach %"
GST_TIME_FORMAT " (base %" GST_TIME_FORMAT " start %" GST_TIME_FORMAT
GST_TIME_ARGS (latency_min),
GST_TIME_ARGS (gst_clock_get_time (clock)));
+ GST_OBJECT_UNLOCK (self);
+
self->priv->aggregate_id = gst_clock_new_single_shot_id (clock, time);
gst_object_unref (clock);
SRC_STREAM_UNLOCK (self);
- status = gst_clock_id_wait (self->priv->aggregate_id, NULL);
+ jitter = 0;
+ status = gst_clock_id_wait (self->priv->aggregate_id, &jitter);
SRC_STREAM_LOCK (self);
if (self->priv->aggregate_id) {
gst_clock_id_unref (self->priv->aggregate_id);
self->priv->aggregate_id = NULL;
}
- self->priv->n_kicks--;
- GST_DEBUG_OBJECT (self, "clock returned %d", status);
+ GST_DEBUG_OBJECT (self,
+ "clock returned %d (jitter: %s%" GST_TIME_FORMAT ")",
+ status, (jitter < 0 ? "-" : " "),
+ GST_TIME_ARGS ((jitter < 0 ? -jitter : jitter)));
/* we timed out */
if (status == GST_CLOCK_OK || status == GST_CLOCK_EARLY) {
return TRUE;
}
}
+
+ res = gst_aggregator_check_pads_ready (self);
SRC_STREAM_UNLOCK (self);
- return gst_aggregator_iterate_sinkpads (self,
- (GstAggregatorPadForeachFunc) _check_all_pads_with_data_or_eos, NULL);
+ return res;
}
static void
-aggregate_func (GstAggregator * self)
+gst_aggregator_aggregate_func (GstAggregator * self)
{
GstAggregatorPrivate *priv = self->priv;
GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (self);
GST_LOG_OBJECT (self, "Checking aggregate");
while (priv->send_eos && priv->running) {
- if (!_wait_and_check (self, &timeout))
+ GstFlowReturn flow_return;
+
+ if (!gst_aggregator_wait_and_check (self, &timeout))
continue;
GST_TRACE_OBJECT (self, "Actually aggregating!");
- priv->flow_return = klass->aggregate (self, timeout);
+ flow_return = klass->aggregate (self, timeout);
- if (priv->flow_return == GST_FLOW_EOS) {
- _push_eos (self);
- }
-
- if (priv->flow_return == GST_FLOW_FLUSHING &&
- g_atomic_int_get (&priv->flush_seeking))
+ GST_OBJECT_LOCK (self);
+ if (flow_return == GST_FLOW_FLUSHING && priv->flush_seeking)
priv->flow_return = GST_FLOW_OK;
+ else
+ priv->flow_return = flow_return;
+ GST_OBJECT_UNLOCK (self);
+
+ if (flow_return == GST_FLOW_EOS) {
+ gst_aggregator_push_eos (self);
+ }
- GST_LOG_OBJECT (self, "flow return is %s",
- gst_flow_get_name (priv->flow_return));
+ GST_LOG_OBJECT (self, "flow return is %s", gst_flow_get_name (flow_return));
- if (priv->flow_return != GST_FLOW_OK)
+ if (flow_return != GST_FLOW_OK)
break;
}
+
+ /* Pause the task here, the only ways to get here are:
+ * 1) We're stopping, in which case the task is stopped anyway
+ * 2) We got a flow error above, in which case it might take
+ * some time to forward the flow return upstream and we
+ * would otherwise call the task function over and over
+ * again without doing anything
+ */
+ gst_pad_pause_task (self->srcpad);
}
static gboolean
-_start (GstAggregator * self)
+gst_aggregator_start (GstAggregator * self)
{
+ GstAggregatorClass *klass;
+ gboolean result;
+
self->priv->running = TRUE;
self->priv->send_stream_start = TRUE;
self->priv->send_segment = TRUE;
self->priv->srccaps = NULL;
self->priv->flow_return = GST_FLOW_OK;
- return TRUE;
+ klass = GST_AGGREGATOR_GET_CLASS (self);
+
+ if (klass->start)
+ result = klass->start (self);
+ else
+ result = TRUE;
+
+ return result;
}
static gboolean
_check_pending_flush_stop (GstAggregatorPad * pad)
{
- return (!pad->priv->pending_flush_stop && !pad->priv->pending_flush_start);
+ gboolean res;
+
+ PAD_LOCK (pad);
+ res = (!pad->priv->pending_flush_stop && !pad->priv->pending_flush_start);
+ PAD_UNLOCK (pad);
+
+ return res;
}
static gboolean
-_stop_srcpad_task (GstAggregator * self, GstEvent * flush_start)
+gst_aggregator_stop_srcpad_task (GstAggregator * self, GstEvent * flush_start)
{
gboolean res = TRUE;
GST_INFO_OBJECT (self, "%s srcpad task",
flush_start ? "Pausing" : "Stopping");
+ SRC_STREAM_LOCK (self);
self->priv->running = FALSE;
- KICK_SRC_THREAD (self);
+ SRC_STREAM_BROADCAST (self);
+ SRC_STREAM_UNLOCK (self);
if (flush_start) {
res = gst_pad_push_event (self->srcpad, flush_start);
}
gst_pad_stop_task (self->srcpad);
- KICK_SRC_THREAD (self);
return res;
}
static void
-_start_srcpad_task (GstAggregator * self)
+gst_aggregator_start_srcpad_task (GstAggregator * self)
{
GST_INFO_OBJECT (self, "Starting srcpad task");
self->priv->running = TRUE;
- self->priv->n_kicks = 0;
gst_pad_start_task (GST_PAD (self->srcpad),
- (GstTaskFunction) aggregate_func, self, NULL);
+ (GstTaskFunction) gst_aggregator_aggregate_func, self, NULL);
}
static GstFlowReturn
-_flush (GstAggregator * self)
+gst_aggregator_flush (GstAggregator * self)
{
GstFlowReturn ret = GST_FLOW_OK;
GstAggregatorPrivate *priv = self->priv;
GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (self);
GST_DEBUG_OBJECT (self, "Flushing everything");
- g_atomic_int_set (&priv->send_segment, TRUE);
- g_atomic_int_set (&priv->flush_seeking, FALSE);
- g_atomic_int_set (&priv->tags_changed, FALSE);
+ GST_OBJECT_LOCK (self);
+ priv->send_segment = TRUE;
+ priv->flush_seeking = FALSE;
+ priv->tags_changed = FALSE;
+ GST_OBJECT_UNLOCK (self);
if (klass->flush)
ret = klass->flush (self);
return ret;
}
+
+/* Called with GstAggregator's object lock held */
+
static gboolean
-_all_flush_stop_received (GstAggregator * self)
+gst_aggregator_all_flush_stop_received_locked (GstAggregator * self)
{
GList *tmp;
GstAggregatorPad *tmppad;
- GST_OBJECT_LOCK (self);
for (tmp = GST_ELEMENT (self)->sinkpads; tmp; tmp = tmp->next) {
tmppad = (GstAggregatorPad *) tmp->data;
if (_check_pending_flush_stop (tmppad) == FALSE) {
GST_DEBUG_OBJECT (tmppad, "Is not last %i -- %i",
tmppad->priv->pending_flush_start, tmppad->priv->pending_flush_stop);
- GST_OBJECT_UNLOCK (self);
return FALSE;
}
}
- GST_OBJECT_UNLOCK (self);
return TRUE;
}
static void
-_flush_start (GstAggregator * self, GstAggregatorPad * aggpad, GstEvent * event)
+gst_aggregator_flush_start (GstAggregator * self, GstAggregatorPad * aggpad,
+ GstEvent * event)
{
GstBuffer *tmpbuf;
GstAggregatorPrivate *priv = self->priv;
/* Remove pad buffer and wake up the streaming thread */
tmpbuf = gst_aggregator_pad_steal_buffer (aggpad);
gst_buffer_replace (&tmpbuf, NULL);
+
PAD_STREAM_LOCK (aggpad);
- if (g_atomic_int_compare_and_exchange (&padpriv->pending_flush_start,
- TRUE, FALSE) == TRUE) {
+ PAD_LOCK (aggpad);
+ if (padpriv->pending_flush_start) {
GST_DEBUG_OBJECT (aggpad, "Expecting FLUSH_STOP now");
- g_atomic_int_set (&padpriv->pending_flush_stop, TRUE);
+
+ padpriv->pending_flush_start = FALSE;
+ padpriv->pending_flush_stop = TRUE;
}
+ PAD_UNLOCK (aggpad);
- if (g_atomic_int_get (&priv->flush_seeking)) {
+ GST_OBJECT_LOCK (self);
+ if (priv->flush_seeking) {
/* If flush_seeking we forward the first FLUSH_START */
- if (g_atomic_int_compare_and_exchange (&priv->pending_flush_start,
- TRUE, FALSE) == TRUE) {
+ if (priv->pending_flush_start) {
+ priv->pending_flush_start = FALSE;
+ GST_OBJECT_UNLOCK (self);
GST_INFO_OBJECT (self, "Flushing, pausing srcpad task");
- _stop_srcpad_task (self, event);
+ gst_aggregator_stop_srcpad_task (self, event);
priv->flow_return = GST_FLOW_OK;
GST_INFO_OBJECT (self, "Getting STREAM_LOCK while seeking");
GST_PAD_STREAM_LOCK (self->srcpad);
GST_LOG_OBJECT (self, "GOT STREAM_LOCK");
event = NULL;
+ } else {
+ GST_OBJECT_UNLOCK (self);
+ gst_event_unref (event);
}
} else {
+ GST_OBJECT_UNLOCK (self);
gst_event_unref (event);
}
PAD_STREAM_UNLOCK (aggpad);
/* GstAggregator vmethods default implementations */
static gboolean
-_sink_event (GstAggregator * self, GstAggregatorPad * aggpad, GstEvent * event)
+gst_aggregator_default_sink_event (GstAggregator * self,
+ GstAggregatorPad * aggpad, GstEvent * event)
{
gboolean res = TRUE;
GstPad *pad = GST_PAD (aggpad);
switch (GST_EVENT_TYPE (event)) {
case GST_EVENT_FLUSH_START:
{
- _flush_start (self, aggpad, event);
+ gst_aggregator_flush_start (self, aggpad, event);
/* We forward only in one case: right after flush_seeking */
event = NULL;
goto eat;
{
GST_DEBUG_OBJECT (aggpad, "Got FLUSH_STOP");
- _aggpad_flush (aggpad, self);
- if (g_atomic_int_get (&priv->flush_seeking)) {
+ gst_aggregator_pad_flush (aggpad, self);
+ GST_OBJECT_LOCK (self);
+ if (priv->flush_seeking) {
g_atomic_int_set (&aggpad->priv->pending_flush_stop, FALSE);
-
- if (g_atomic_int_get (&priv->flush_seeking)) {
- if (_all_flush_stop_received (self)) {
- /* That means we received FLUSH_STOP/FLUSH_STOP on
- * all sinkpads -- Seeking is Done... sending FLUSH_STOP */
- _flush (self);
- gst_pad_push_event (self->srcpad, event);
- priv->send_eos = TRUE;
- event = NULL;
- KICK_SRC_THREAD (self);
-
- GST_INFO_OBJECT (self, "Releasing source pad STREAM_LOCK");
- GST_PAD_STREAM_UNLOCK (self->srcpad);
- _start_srcpad_task (self);
- }
+ if (gst_aggregator_all_flush_stop_received_locked (self)) {
+ GST_OBJECT_UNLOCK (self);
+ /* That means we received FLUSH_STOP/FLUSH_STOP on
+ * all sinkpads -- Seeking is Done... sending FLUSH_STOP */
+ gst_aggregator_flush (self);
+ gst_pad_push_event (self->srcpad, event);
+ event = NULL;
+ SRC_STREAM_LOCK (self);
+ priv->send_eos = TRUE;
+ SRC_STREAM_BROADCAST (self);
+ SRC_STREAM_UNLOCK (self);
+
+ GST_INFO_OBJECT (self, "Releasing source pad STREAM_LOCK");
+ GST_PAD_STREAM_UNLOCK (self->srcpad);
+ gst_aggregator_start_srcpad_task (self);
+ } else {
+ GST_OBJECT_UNLOCK (self);
}
+ } else {
+ GST_OBJECT_UNLOCK (self);
}
/* We never forward the event */
* check for it. Mark pending_eos, eos will be set when steal_buffer is
* called
*/
- PAD_LOCK_EVENT (aggpad);
- if (!aggpad->buffer) {
- aggpad->eos = TRUE;
+ SRC_STREAM_LOCK (self);
+ PAD_LOCK (aggpad);
+ if (!aggpad->priv->buffer) {
+ aggpad->priv->eos = TRUE;
} else {
aggpad->priv->pending_eos = TRUE;
}
- PAD_UNLOCK_EVENT (aggpad);
+ PAD_UNLOCK (aggpad);
- KICK_SRC_THREAD (self);
+ SRC_STREAM_BROADCAST (self);
+ SRC_STREAM_UNLOCK (self);
goto eat;
}
case GST_EVENT_SEGMENT:
{
- PAD_LOCK_EVENT (aggpad);
+ GST_OBJECT_LOCK (aggpad);
gst_event_copy_segment (event, &aggpad->segment);
+ GST_OBJECT_UNLOCK (aggpad);
+
+ GST_OBJECT_LOCK (self);
self->priv->seqnum = gst_event_get_seqnum (event);
- PAD_UNLOCK_EVENT (aggpad);
+ GST_OBJECT_UNLOCK (self);
goto eat;
}
case GST_EVENT_STREAM_START:
return res;
}
-static gboolean
-_stop_pad (GstAggregator * self, GstAggregatorPad * pad, gpointer unused_udata)
+static inline gboolean
+gst_aggregator_stop_pad (GstAggregator * self, GstAggregatorPad * pad,
+ gpointer unused_udata)
{
- _aggpad_flush (pad, self);
+ gst_aggregator_pad_flush (pad, self);
return TRUE;
}
static gboolean
-_stop (GstAggregator * agg)
+gst_aggregator_stop (GstAggregator * agg)
{
- _reset_flow_values (agg);
+ GstAggregatorClass *klass;
+ gboolean result;
+
+ gst_aggregator_reset_flow_values (agg);
+
+ gst_aggregator_iterate_sinkpads (agg, gst_aggregator_stop_pad, NULL);
- gst_aggregator_iterate_sinkpads (agg,
- (GstAggregatorPadForeachFunc) _stop_pad, NULL);
+ klass = GST_AGGREGATOR_GET_CLASS (agg);
+
+ if (klass->stop)
+ result = klass->stop (agg);
+ else
+ result = TRUE;
if (agg->priv->tags)
gst_tag_list_unref (agg->priv->tags);
agg->priv->tags = NULL;
- return TRUE;
+ return result;
}
/* GstElement vmethods implementations */
static GstStateChangeReturn
-_change_state (GstElement * element, GstStateChange transition)
+gst_aggregator_change_state (GstElement * element, GstStateChange transition)
{
GstStateChangeReturn ret;
GstAggregator *self = GST_AGGREGATOR (element);
- GstAggregatorClass *agg_class = GST_AGGREGATOR_GET_CLASS (self);
-
switch (transition) {
case GST_STATE_CHANGE_READY_TO_PAUSED:
- agg_class->start (self);
+ if (!gst_aggregator_start (self))
+ goto error_start;
break;
default:
break;
switch (transition) {
case GST_STATE_CHANGE_PAUSED_TO_READY:
- agg_class->stop (self);
+ if (!gst_aggregator_stop (self)) {
+ /* What to do in this case? Error out? */
+ GST_ERROR_OBJECT (self, "Subclass failed to stop.");
+ }
break;
default:
break;
return ret;
+/* ERRORS */
failure:
{
GST_ERROR_OBJECT (element, "parent failed state change");
return ret;
}
+error_start:
+ {
+ GST_ERROR_OBJECT (element, "Subclass failed to start");
+ return GST_STATE_CHANGE_FAILURE;
+ }
}
static void
-_release_pad (GstElement * element, GstPad * pad)
+gst_aggregator_release_pad (GstElement * element, GstPad * pad)
{
GstAggregator *self = GST_AGGREGATOR (element);
GstBuffer *tmpbuf;
GST_INFO_OBJECT (pad, "Removing pad");
+ SRC_STREAM_LOCK (self);
g_atomic_int_set (&aggpad->priv->flushing, TRUE);
tmpbuf = gst_aggregator_pad_steal_buffer (aggpad);
gst_buffer_replace (&tmpbuf, NULL);
gst_element_remove_pad (element, pad);
- KICK_SRC_THREAD (self);
+ SRC_STREAM_BROADCAST (self);
+ SRC_STREAM_UNLOCK (self);
}
static GstPad *
-_request_new_pad (GstElement * element,
+gst_aggregator_request_new_pad (GstElement * element,
GstPadTemplate * templ, const gchar * req_name, const GstCaps * caps)
{
GstAggregator *self;
} LatencyData;
static gboolean
-_latency_query (GstAggregator * self, GstPad * pad, gpointer user_data)
+gst_aggregator_query_sink_latency_foreach (GstAggregator * self,
+ GstAggregatorPad * pad, gpointer user_data)
{
LatencyData *data = user_data;
GstClockTime min, max;
gboolean live, res;
query = gst_query_new_latency ();
- res = gst_pad_peer_query (pad, query);
+ res = gst_pad_peer_query (GST_PAD_CAST (pad), query);
if (res) {
gst_query_parse_latency (query, &live, &min, &max);
- GST_LOG_OBJECT (self, "got latency live:%s min:%" G_GINT64_FORMAT
+ GST_LOG_OBJECT (pad, "got latency live:%s min:%" G_GINT64_FORMAT
" max:%" G_GINT64_FORMAT, live ? "true" : "false", min, max);
- if (min > data->min)
+ if (min != GST_CLOCK_TIME_NONE && min > data->min)
data->min = min;
if (max != GST_CLOCK_TIME_NONE &&
}
/**
- * gst_aggregator_get_latency:
+ * gst_aggregator_get_latency_unlocked:
* @self: a #GstAggregator
* @live: (out) (allow-none): whether @self is live
* @min_latency: (out) (allow-none): the configured minimum latency of @self
* query.
*
* Typically only called by subclasses.
+ *
+ * MUST be called with the object lock held.
*/
void
-gst_aggregator_get_latency (GstAggregator * self, gboolean * live,
+gst_aggregator_get_latency_unlocked (GstAggregator * self, gboolean * live,
GstClockTime * min_latency, GstClockTime * max_latency)
{
GstClockTime min, max;
g_return_if_fail (GST_IS_AGGREGATOR (self));
+ /* latency_min is never GST_CLOCK_TIME_NONE by construction */
min = self->priv->latency_min;
max = self->priv->latency_max;
- if (GST_CLOCK_TIME_IS_VALID (self->latency)) {
- min += self->latency;
- if (GST_CLOCK_TIME_IS_VALID (max))
- max += self->latency;
- }
+ /* add our own */
+ min += self->priv->latency;
+ min += self->priv->sub_latency_min;
+ if (GST_CLOCK_TIME_IS_VALID (max)
+ && GST_CLOCK_TIME_IS_VALID (self->priv->sub_latency_max))
+ max += self->priv->sub_latency_max;
+ else if (GST_CLOCK_TIME_IS_VALID (self->priv->sub_latency_max))
+ max = self->priv->sub_latency_max;
if (live)
*live = self->priv->latency_live;
static gboolean
gst_aggregator_query_latency (GstAggregator * self, GstQuery * query)
{
+ GstClockTime our_latency;
LatencyData data;
data.min = 0;
data.live = FALSE;
/* query upstream's latency */
+ SRC_STREAM_LOCK (self);
gst_aggregator_iterate_sinkpads (self,
- (GstAggregatorPadForeachFunc) _latency_query, &data);
+ gst_aggregator_query_sink_latency_foreach, &data);
+ SRC_STREAM_UNLOCK (self);
- if (data.live && GST_CLOCK_TIME_IS_VALID (self->latency) &&
- self->latency > data.max) {
- GST_ELEMENT_WARNING (self, CORE, NEGOTIATION,
- ("%s", "Latency too big"),
- ("The requested latency value is too big for the current pipeline. "
- "Limiting to %" G_GINT64_FORMAT, data.max));
- self->latency = data.max;
- }
+ GST_OBJECT_LOCK (self);
+ our_latency = self->priv->latency;
if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (data.min))) {
GST_WARNING_OBJECT (self, "Invalid minimum latency, using 0");
data.min = 0;
}
+ if (G_UNLIKELY (data.min > data.max)) {
+ GST_WARNING_OBJECT (self, "Minimum latency is greater than maximum latency "
+ "(%" G_GINT64_FORMAT " > %" G_GINT64_FORMAT "). "
+ "Clamping it at the maximum latency", data.min, data.max);
+ data.min = data.max;
+ }
+
self->priv->latency_live = data.live;
self->priv->latency_min = data.min;
self->priv->latency_max = data.max;
/* add our own */
- if (GST_CLOCK_TIME_IS_VALID (self->latency)) {
- if (GST_CLOCK_TIME_IS_VALID (data.min))
- data.min += self->latency;
- if (GST_CLOCK_TIME_IS_VALID (data.max))
- data.max += self->latency;
+ data.min += our_latency;
+ data.min += self->priv->sub_latency_min;
+ if (GST_CLOCK_TIME_IS_VALID (self->priv->sub_latency_max)
+ && GST_CLOCK_TIME_IS_VALID (data.max))
+ data.max += self->priv->sub_latency_max;
+ else if (GST_CLOCK_TIME_IS_VALID (self->priv->sub_latency_max))
+ data.max = self->priv->sub_latency_max;
+
+ if (data.live && data.min > data.max) {
+ GST_ELEMENT_WARNING (self, CORE, NEGOTIATION,
+ ("%s", "Latency too big"),
+ ("The requested latency value is too big for the current pipeline. "
+ "Limiting to %" G_GINT64_FORMAT, data.max));
+ data.min = data.max;
+ /* FIXME: This could in theory become negative, but in
+ * that case all is lost anyway */
+ self->priv->latency -= data.min - data.max;
+ /* FIXME: shouldn't we g_object_notify() the change here? */
}
+ GST_OBJECT_UNLOCK (self);
+
GST_DEBUG_OBJECT (self, "configured latency live:%s min:%" G_GINT64_FORMAT
" max:%" G_GINT64_FORMAT, data.live ? "true" : "false", data.min,
data.max);
}
static gboolean
-_send_event (GstElement * element, GstEvent * event)
+gst_aggregator_send_event (GstElement * element, GstEvent * event)
{
GstAggregator *self = GST_AGGREGATOR (element);
gst_event_parse_seek (event, &rate, &fmt, &flags, &start_type,
&start, &stop_type, &stop);
+
+ GST_OBJECT_LOCK (self);
gst_segment_do_seek (&self->segment, rate, fmt, flags, start_type, start,
stop_type, stop, NULL);
-
self->priv->seqnum = gst_event_get_seqnum (event);
+ GST_OBJECT_UNLOCK (self);
+
GST_DEBUG_OBJECT (element, "Storing segment %" GST_PTR_FORMAT, event);
}
GST_STATE_UNLOCK (element);
}
static gboolean
-_src_query (GstAggregator * self, GstQuery * query)
+gst_aggregator_default_src_query (GstAggregator * self, GstQuery * query)
{
gboolean res = TRUE;
}
case GST_QUERY_LATENCY:
{
- return gst_aggregator_query_latency (self, query);
+ gboolean ret;
+
+ ret = gst_aggregator_query_latency (self, query);
+ /* Wake up the src thread again, due to changed latencies
+ * or changed live-ness we might have to adjust if we wait
+ * on a deadline at all and how long.
+ * This is only to unschedule the clock id, we don't really care
+ * about the GCond here.
+ */
+ SRC_STREAM_LOCK (self);
+ SRC_STREAM_BROADCAST (self);
+ SRC_STREAM_UNLOCK (self);
+ return ret;
}
default:
break;
}
static gboolean
-event_forward_func (GstPad * pad, EventData * evdata)
+gst_aggregator_event_forward_func (GstPad * pad, gpointer user_data)
{
+ EventData *evdata = user_data;
gboolean ret = TRUE;
GstPad *peer = gst_pad_get_peer (pad);
- GstAggregatorPadPrivate *padpriv = GST_AGGREGATOR_PAD (pad)->priv;
+ GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad);
if (peer) {
ret = gst_pad_send_event (peer, gst_event_ref (evdata->event));
} else {
GST_ERROR_OBJECT (pad, "Query seeking FAILED");
}
+
+ gst_query_unref (seeking);
}
if (evdata->flush) {
- padpriv->pending_flush_start = FALSE;
- padpriv->pending_flush_stop = FALSE;
+ PAD_LOCK (aggpad);
+ aggpad->priv->pending_flush_start = FALSE;
+ aggpad->priv->pending_flush_stop = FALSE;
+ PAD_UNLOCK (aggpad);
}
} else {
evdata->one_actually_seeked = TRUE;
return FALSE;
}
-static gboolean
-_set_flush_pending (GstAggregator * self, GstAggregatorPad * pad,
- gpointer udata)
-{
- pad->priv->pending_flush_start = TRUE;
- pad->priv->pending_flush_stop = FALSE;
-
- return TRUE;
-}
-
static EventData
-_forward_event_to_all_sinkpads (GstAggregator * self, GstEvent * event,
- gboolean flush)
+gst_aggregator_forward_event_to_all_sinkpads (GstAggregator * self,
+ GstEvent * event, gboolean flush)
{
EventData evdata;
/* We first need to set all pads as flushing in a first pass
* as flush_start flush_stop is sometimes sent synchronously
* while we send the seek event */
- if (flush)
- gst_aggregator_iterate_sinkpads (self,
- (GstAggregatorPadForeachFunc) _set_flush_pending, NULL);
- gst_pad_forward (self->srcpad, (GstPadForwardFunction) event_forward_func,
- &evdata);
+ if (flush) {
+ GList *l;
+
+ GST_OBJECT_LOCK (self);
+ for (l = GST_ELEMENT_CAST (self)->sinkpads; l != NULL; l = l->next) {
+ GstAggregatorPad *pad = l->data;
+
+ PAD_LOCK (pad);
+ pad->priv->pending_flush_start = TRUE;
+ pad->priv->pending_flush_stop = FALSE;
+ PAD_UNLOCK (pad);
+ }
+ GST_OBJECT_UNLOCK (self);
+ }
+
+ gst_pad_forward (self->srcpad, gst_aggregator_event_forward_func, &evdata);
gst_event_unref (event);
}
static gboolean
-_do_seek (GstAggregator * self, GstEvent * event)
+gst_aggregator_do_seek (GstAggregator * self, GstEvent * event)
{
gdouble rate;
GstFormat fmt;
flush = flags & GST_SEEK_FLAG_FLUSH;
+ GST_OBJECT_LOCK (self);
if (flush) {
- g_atomic_int_set (&priv->pending_flush_start, TRUE);
- g_atomic_int_set (&priv->flush_seeking, TRUE);
+ priv->pending_flush_start = TRUE;
+ priv->flush_seeking = TRUE;
}
gst_segment_do_seek (&self->segment, rate, fmt, flags, start_type, start,
stop_type, stop, NULL);
+ GST_OBJECT_UNLOCK (self);
/* forward the seek upstream */
- evdata = _forward_event_to_all_sinkpads (self, event, flush);
+ evdata = gst_aggregator_forward_event_to_all_sinkpads (self, event, flush);
event = NULL;
if (!evdata.result || !evdata.one_actually_seeked) {
- g_atomic_int_set (&priv->flush_seeking, FALSE);
- g_atomic_int_set (&priv->pending_flush_start, FALSE);
+ GST_OBJECT_LOCK (self);
+ priv->flush_seeking = FALSE;
+ priv->pending_flush_start = FALSE;
+ GST_OBJECT_UNLOCK (self);
}
GST_INFO_OBJECT (self, "seek done, result: %d", evdata.result);
}
static gboolean
-_src_event (GstAggregator * self, GstEvent * event)
+gst_aggregator_default_src_event (GstAggregator * self, GstEvent * event)
{
EventData evdata;
gboolean res = TRUE;
case GST_EVENT_SEEK:
{
gst_event_ref (event);
- res = _do_seek (self, event);
+ res = gst_aggregator_do_seek (self, event);
gst_event_unref (event);
event = NULL;
goto done;
}
}
- evdata = _forward_event_to_all_sinkpads (self, event, FALSE);
+ evdata = gst_aggregator_forward_event_to_all_sinkpads (self, event, FALSE);
res = evdata.result;
done:
}
static gboolean
-src_event_func (GstPad * pad, GstObject * parent, GstEvent * event)
+gst_aggregator_src_pad_event_func (GstPad * pad, GstObject * parent,
+ GstEvent * event)
{
GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (parent);
}
static gboolean
-src_query_func (GstPad * pad, GstObject * parent, GstQuery * query)
+gst_aggregator_src_pad_query_func (GstPad * pad, GstObject * parent,
+ GstQuery * query)
{
GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (parent);
}
static gboolean
-src_activate_mode (GstPad * pad,
+gst_aggregator_src_pad_activate_mode_func (GstPad * pad,
GstObject * parent, GstPadMode mode, gboolean active)
{
GstAggregator *self = GST_AGGREGATOR (parent);
case GST_PAD_MODE_PUSH:
{
GST_INFO_OBJECT (pad, "Activating pad!");
- _start_srcpad_task (self);
+ gst_aggregator_start_srcpad_task (self);
return TRUE;
}
default:
/* deactivating */
GST_INFO_OBJECT (self, "Deactivating srcpad");
- _stop_srcpad_task (self, FALSE);
+ gst_aggregator_stop_srcpad_task (self, FALSE);
return TRUE;
}
static gboolean
-_sink_query (GstAggregator * self, GstAggregatorPad * aggpad, GstQuery * query)
+gst_aggregator_default_sink_query (GstAggregator * self,
+ GstAggregatorPad * aggpad, GstQuery * query)
{
GstPad *pad = GST_PAD (aggpad);
{
GstAggregator *self = (GstAggregator *) object;
- gst_object_unref (self->clock);
- g_mutex_clear (&self->priv->setcaps_lock);
g_mutex_clear (&self->priv->src_lock);
g_cond_clear (&self->priv->src_cond);
G_OBJECT_CLASS (aggregator_parent_class)->finalize (object);
}
-static void
-gst_aggregator_dispose (GObject * object)
-{
- G_OBJECT_CLASS (aggregator_parent_class)->dispose (object);
-}
-
/*
* gst_aggregator_set_latency_property:
* @agg: a #GstAggregator
static void
gst_aggregator_set_latency_property (GstAggregator * self, gint64 latency)
{
+ gboolean changed;
+ GstClockTime min, max;
+
g_return_if_fail (GST_IS_AGGREGATOR (self));
+ g_return_if_fail (GST_CLOCK_TIME_IS_VALID (latency));
GST_OBJECT_LOCK (self);
-
- if (self->priv->latency_live && self->priv->latency_max != 0 &&
- GST_CLOCK_TIME_IS_VALID (latency) && latency > self->priv->latency_max) {
- GST_ELEMENT_WARNING (self, CORE, NEGOTIATION,
- ("%s", "Latency too big"),
- ("The requested latency value is too big for the latency in the "
- "current pipeline. Limiting to %" G_GINT64_FORMAT,
- self->priv->latency_max));
- latency = self->priv->latency_max;
+ if (self->priv->latency_live) {
+ min = self->priv->latency_min;
+ max = self->priv->latency_max;
+ /* add our own */
+ min += latency;
+ min += self->priv->sub_latency_min;
+ if (GST_CLOCK_TIME_IS_VALID (self->priv->sub_latency_max)
+ && GST_CLOCK_TIME_IS_VALID (max))
+ max += self->priv->sub_latency_max;
+ else if (GST_CLOCK_TIME_IS_VALID (self->priv->sub_latency_max))
+ max = self->priv->sub_latency_max;
+
+ if (GST_CLOCK_TIME_IS_VALID (max) && min > max) {
+ GST_ELEMENT_WARNING (self, CORE, NEGOTIATION,
+ ("%s", "Latency too big"),
+ ("The requested latency value is too big for the latency in the "
+ "current pipeline. Limiting to %" G_GINT64_FORMAT, max));
+ /* FIXME: This could in theory become negative, but in
+ * that case all is lost anyway */
+ latency -= min - max;
+ /* FIXME: shouldn't we g_object_notify() the change here? */
+ }
}
- self->latency = latency;
+ changed = (self->priv->latency != latency);
+ self->priv->latency = latency;
GST_OBJECT_UNLOCK (self);
+
+ if (changed)
+ gst_element_post_message (GST_ELEMENT_CAST (self),
+ gst_message_new_latency (GST_OBJECT_CAST (self)));
}
/*
g_return_val_if_fail (GST_IS_AGGREGATOR (agg), -1);
GST_OBJECT_LOCK (agg);
- res = agg->latency;
+ res = agg->priv->latency;
GST_OBJECT_UNLOCK (agg);
return res;
GST_DEBUG_FG_MAGENTA, "GstAggregator");
klass->sinkpads_type = GST_TYPE_AGGREGATOR_PAD;
- klass->start = _start;
- klass->stop = _stop;
- klass->sink_event = _sink_event;
- klass->sink_query = _sink_query;
+ klass->sink_event = gst_aggregator_default_sink_event;
+ klass->sink_query = gst_aggregator_default_sink_query;
- klass->src_event = _src_event;
- klass->src_query = _src_query;
+ klass->src_event = gst_aggregator_default_src_event;
+ klass->src_query = gst_aggregator_default_src_query;
- gstelement_class->request_new_pad = GST_DEBUG_FUNCPTR (_request_new_pad);
- gstelement_class->send_event = GST_DEBUG_FUNCPTR (_send_event);
- gstelement_class->release_pad = GST_DEBUG_FUNCPTR (_release_pad);
- gstelement_class->change_state = GST_DEBUG_FUNCPTR (_change_state);
+ gstelement_class->request_new_pad =
+ GST_DEBUG_FUNCPTR (gst_aggregator_request_new_pad);
+ gstelement_class->send_event = GST_DEBUG_FUNCPTR (gst_aggregator_send_event);
+ gstelement_class->release_pad =
+ GST_DEBUG_FUNCPTR (gst_aggregator_release_pad);
+ gstelement_class->change_state =
+ GST_DEBUG_FUNCPTR (gst_aggregator_change_state);
gobject_class->set_property = gst_aggregator_set_property;
gobject_class->get_property = gst_aggregator_get_property;
gobject_class->finalize = gst_aggregator_finalize;
- gobject_class->dispose = gst_aggregator_dispose;
g_object_class_install_property (gobject_class, PROP_LATENCY,
g_param_spec_int64 ("latency", "Buffer latency",
- "Number of nanoseconds to wait for a buffer to arrive on a sink pad"
- "before the pad is deemed unresponsive (-1 unlimited)", -1,
+ "Additional latency in live mode to allow upstream "
+ "to take longer to produce buffers for the current "
+ "position", 0,
(G_MAXLONG == G_MAXINT64) ? G_MAXINT64 : (G_MAXLONG * GST_SECOND - 1),
DEFAULT_LATENCY, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ GST_DEBUG_REGISTER_FUNCPTR (gst_aggregator_stop_pad);
+ GST_DEBUG_REGISTER_FUNCPTR (gst_aggregator_query_sink_latency_foreach);
}
static void
priv->tags_changed = FALSE;
self->priv->latency_live = FALSE;
- self->priv->latency_min = 0;
- self->priv->latency_max = GST_CLOCK_TIME_NONE;
- _reset_flow_values (self);
+ self->priv->latency_min = self->priv->sub_latency_min = 0;
+ self->priv->latency_max = self->priv->sub_latency_max = GST_CLOCK_TIME_NONE;
+ gst_aggregator_reset_flow_values (self);
self->srcpad = gst_pad_new_from_template (pad_template, "src");
gst_pad_set_event_function (self->srcpad,
- GST_DEBUG_FUNCPTR ((GstPadEventFunction) src_event_func));
+ GST_DEBUG_FUNCPTR (gst_aggregator_src_pad_event_func));
gst_pad_set_query_function (self->srcpad,
- GST_DEBUG_FUNCPTR ((GstPadQueryFunction) src_query_func));
+ GST_DEBUG_FUNCPTR (gst_aggregator_src_pad_query_func));
gst_pad_set_activatemode_function (self->srcpad,
- GST_DEBUG_FUNCPTR ((GstPadActivateModeFunction) src_activate_mode));
+ GST_DEBUG_FUNCPTR (gst_aggregator_src_pad_activate_mode_func));
gst_element_add_pad (GST_ELEMENT (self), self->srcpad);
- self->clock = gst_system_clock_obtain ();
- self->latency = -1;
+ self->priv->latency = DEFAULT_LATENCY;
- g_mutex_init (&self->priv->setcaps_lock);
g_mutex_init (&self->priv->src_lock);
g_cond_init (&self->priv->src_cond);
}
}
static GstFlowReturn
-_chain (GstPad * pad, GstObject * object, GstBuffer * buffer)
+gst_aggregator_pad_chain (GstPad * pad, GstObject * object, GstBuffer * buffer)
{
GstBuffer *actual_buf = buffer;
GstAggregator *self = GST_AGGREGATOR (object);
GstAggregatorPrivate *priv = self->priv;
GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad);
GstAggregatorClass *aggclass = GST_AGGREGATOR_GET_CLASS (object);
+ GstFlowReturn flow_return;
GST_DEBUG_OBJECT (aggpad, "Start chaining a buffer %" GST_PTR_FORMAT, buffer);
if (g_atomic_int_get (&aggpad->priv->flushing) == TRUE)
goto flushing;
- if (g_atomic_int_get (&aggpad->priv->pending_eos) == TRUE)
+ PAD_LOCK (aggpad);
+ if (aggpad->priv->pending_eos == TRUE)
goto eos;
- PAD_LOCK_EVENT (aggpad);
-
- while (aggpad->buffer && g_atomic_int_get (&aggpad->priv->flushing) == FALSE) {
+ while (aggpad->priv->buffer
+ && g_atomic_int_get (&aggpad->priv->flushing) == FALSE) {
GST_DEBUG_OBJECT (aggpad, "Waiting for buffer to be consumed");
PAD_WAIT_EVENT (aggpad);
}
- PAD_UNLOCK_EVENT (aggpad);
+ PAD_UNLOCK (aggpad);
if (g_atomic_int_get (&aggpad->priv->flushing) == TRUE)
goto flushing;
aggclass->clip (self, aggpad, buffer, &actual_buf);
}
- PAD_LOCK_EVENT (aggpad);
- if (aggpad->buffer)
- gst_buffer_unref (aggpad->buffer);
- aggpad->buffer = actual_buf;
- PAD_UNLOCK_EVENT (aggpad);
+ SRC_STREAM_LOCK (self);
+ PAD_LOCK (aggpad);
+ if (aggpad->priv->buffer)
+ gst_buffer_unref (aggpad->priv->buffer);
+ aggpad->priv->buffer = actual_buf;
+ PAD_UNLOCK (aggpad);
PAD_STREAM_UNLOCK (aggpad);
- if (gst_aggregator_iterate_sinkpads (self,
- (GstAggregatorPadForeachFunc) _check_all_pads_with_data_or_eos, NULL))
- KICK_SRC_THREAD (self);
+ SRC_STREAM_BROADCAST (self);
+ SRC_STREAM_UNLOCK (self);
GST_DEBUG_OBJECT (aggpad, "Done chaining");
- return priv->flow_return;
+ GST_OBJECT_LOCK (self);
+ flow_return = priv->flow_return;
+ GST_OBJECT_UNLOCK (self);
+
+ return flow_return;
flushing:
PAD_STREAM_UNLOCK (aggpad);
return GST_FLOW_FLUSHING;
eos:
+ PAD_UNLOCK (aggpad);
PAD_STREAM_UNLOCK (aggpad);
gst_buffer_unref (buffer);
}
static gboolean
-pad_query_func (GstPad * pad, GstObject * parent, GstQuery * query)
+gst_aggregator_pad_query_func (GstPad * pad, GstObject * parent,
+ GstQuery * query)
{
GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad);
GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (parent);
if (GST_QUERY_IS_SERIALIZED (query)) {
- PAD_LOCK_EVENT (aggpad);
+ PAD_LOCK (aggpad);
if (g_atomic_int_get (&aggpad->priv->flushing) == TRUE) {
- PAD_UNLOCK_EVENT (aggpad);
+ PAD_UNLOCK (aggpad);
goto flushing;
}
- while (aggpad->buffer
+ while (aggpad->priv->buffer
&& g_atomic_int_get (&aggpad->priv->flushing) == FALSE) {
GST_DEBUG_OBJECT (aggpad, "Waiting for buffer to be consumed");
PAD_WAIT_EVENT (aggpad);
}
- PAD_UNLOCK_EVENT (aggpad);
+ PAD_UNLOCK (aggpad);
if (g_atomic_int_get (&aggpad->priv->flushing) == TRUE)
goto flushing;
}
static gboolean
-pad_event_func (GstPad * pad, GstObject * parent, GstEvent * event)
+gst_aggregator_pad_event_func (GstPad * pad, GstObject * parent,
+ GstEvent * event)
{
GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad);
GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (parent);
if (GST_EVENT_IS_SERIALIZED (event) && GST_EVENT_TYPE (event) != GST_EVENT_EOS
&& GST_EVENT_TYPE (event) != GST_EVENT_SEGMENT_DONE) {
- PAD_LOCK_EVENT (aggpad);
+ PAD_LOCK (aggpad);
if (g_atomic_int_get (&aggpad->priv->flushing) == TRUE
&& GST_EVENT_TYPE (event) != GST_EVENT_FLUSH_STOP) {
- PAD_UNLOCK_EVENT (aggpad);
+ PAD_UNLOCK (aggpad);
goto flushing;
}
- while (aggpad->buffer
+ while (aggpad->priv->buffer
&& g_atomic_int_get (&aggpad->priv->flushing) == FALSE) {
GST_DEBUG_OBJECT (aggpad, "Waiting for buffer to be consumed");
PAD_WAIT_EVENT (aggpad);
}
- PAD_UNLOCK_EVENT (aggpad);
+ PAD_UNLOCK (aggpad);
if (g_atomic_int_get (&aggpad->priv->flushing) == TRUE
&& GST_EVENT_TYPE (event) != GST_EVENT_FLUSH_STOP)
}
static gboolean
-pad_activate_mode_func (GstPad * pad,
+gst_aggregator_pad_activate_mode_func (GstPad * pad,
GstObject * parent, GstPadMode mode, gboolean active)
{
GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad);
if (active == FALSE) {
- PAD_LOCK_EVENT (aggpad);
+ PAD_LOCK (aggpad);
g_atomic_int_set (&aggpad->priv->flushing, TRUE);
- gst_buffer_replace (&aggpad->buffer, NULL);
+ gst_buffer_replace (&aggpad->priv->buffer, NULL);
PAD_BROADCAST_EVENT (aggpad);
- PAD_UNLOCK_EVENT (aggpad);
+ PAD_UNLOCK (aggpad);
} else {
+ PAD_LOCK (aggpad);
g_atomic_int_set (&aggpad->priv->flushing, FALSE);
- PAD_LOCK_EVENT (aggpad);
PAD_BROADCAST_EVENT (aggpad);
- PAD_UNLOCK_EVENT (aggpad);
+ PAD_UNLOCK (aggpad);
}
return TRUE;
/***********************************
* GstAggregatorPad implementation *
************************************/
-static GstPadClass *aggregator_pad_parent_class = NULL;
G_DEFINE_TYPE (GstAggregatorPad, gst_aggregator_pad, GST_TYPE_PAD);
static void
-_pad_constructed (GObject * object)
+gst_aggregator_pad_constructed (GObject * object)
{
GstPad *pad = GST_PAD (object);
gst_pad_set_chain_function (pad,
- GST_DEBUG_FUNCPTR ((GstPadChainFunction) _chain));
+ GST_DEBUG_FUNCPTR (gst_aggregator_pad_chain));
gst_pad_set_event_function (pad,
- GST_DEBUG_FUNCPTR ((GstPadEventFunction) pad_event_func));
+ GST_DEBUG_FUNCPTR (gst_aggregator_pad_event_func));
gst_pad_set_query_function (pad,
- GST_DEBUG_FUNCPTR ((GstPadQueryFunction) pad_query_func));
+ GST_DEBUG_FUNCPTR (gst_aggregator_pad_query_func));
gst_pad_set_activatemode_function (pad,
- GST_DEBUG_FUNCPTR ((GstPadActivateModeFunction) pad_activate_mode_func));
+ GST_DEBUG_FUNCPTR (gst_aggregator_pad_activate_mode_func));
}
static void
{
GstAggregatorPad *pad = (GstAggregatorPad *) object;
- g_mutex_clear (&pad->priv->event_lock);
g_cond_clear (&pad->priv->event_cond);
g_mutex_clear (&pad->priv->stream_lock);
+ g_mutex_clear (&pad->priv->lock);
- G_OBJECT_CLASS (aggregator_pad_parent_class)->finalize (object);
+ G_OBJECT_CLASS (gst_aggregator_pad_parent_class)->finalize (object);
}
static void
if (buf)
gst_buffer_unref (buf);
- G_OBJECT_CLASS (aggregator_pad_parent_class)->dispose (object);
+ G_OBJECT_CLASS (gst_aggregator_pad_parent_class)->dispose (object);
}
static void
{
GObjectClass *gobject_class = (GObjectClass *) klass;
- aggregator_pad_parent_class = g_type_class_peek_parent (klass);
g_type_class_add_private (klass, sizeof (GstAggregatorPadPrivate));
- gobject_class->constructed = GST_DEBUG_FUNCPTR (_pad_constructed);
- gobject_class->finalize = GST_DEBUG_FUNCPTR (gst_aggregator_pad_finalize);
- gobject_class->dispose = GST_DEBUG_FUNCPTR (gst_aggregator_pad_dispose);
+ gobject_class->constructed = gst_aggregator_pad_constructed;
+ gobject_class->finalize = gst_aggregator_pad_finalize;
+ gobject_class->dispose = gst_aggregator_pad_dispose;
}
static void
G_TYPE_INSTANCE_GET_PRIVATE (pad, GST_TYPE_AGGREGATOR_PAD,
GstAggregatorPadPrivate);
- pad->buffer = NULL;
- g_mutex_init (&pad->priv->event_lock);
+ pad->priv->buffer = NULL;
g_cond_init (&pad->priv->event_cond);
g_mutex_init (&pad->priv->stream_lock);
+ g_mutex_init (&pad->priv->lock);
}
/**
{
GstBuffer *buffer = NULL;
- PAD_LOCK_EVENT (pad);
- if (pad->buffer) {
+ PAD_LOCK (pad);
+ if (pad->priv->buffer) {
GST_TRACE_OBJECT (pad, "Consuming buffer");
- buffer = pad->buffer;
- pad->buffer = NULL;
+ buffer = pad->priv->buffer;
+ pad->priv->buffer = NULL;
if (pad->priv->pending_eos) {
pad->priv->pending_eos = FALSE;
- pad->eos = TRUE;
+ pad->priv->eos = TRUE;
}
PAD_BROADCAST_EVENT (pad);
GST_DEBUG_OBJECT (pad, "Consumed: %" GST_PTR_FORMAT, buffer);
}
- PAD_UNLOCK_EVENT (pad);
+ PAD_UNLOCK (pad);
return buffer;
}
{
GstBuffer *buffer = NULL;
- PAD_LOCK_EVENT (pad);
- if (pad->buffer)
- buffer = gst_buffer_ref (pad->buffer);
- PAD_UNLOCK_EVENT (pad);
+ PAD_LOCK (pad);
+ if (pad->priv->buffer)
+ buffer = gst_buffer_ref (pad->priv->buffer);
+ PAD_UNLOCK (pad);
return buffer;
}
+gboolean
+gst_aggregator_pad_is_eos (GstAggregatorPad * pad)
+{
+ gboolean is_eos;
+
+ PAD_LOCK (pad);
+ is_eos = pad->priv->eos;
+ PAD_UNLOCK (pad);
+
+ return is_eos;
+}
+
/**
* gst_aggregator_merge_tags:
* @self: a #GstAggregator
self->priv->tags_changed = TRUE;
GST_OBJECT_UNLOCK (self);
}
+
+/**
+ * gst_aggregator_set_latency:
+ * @self: a #GstAggregator
+ * @min_latency: minimum latency
+ * @max_latency: maximum latency
+ *
+ * Lets #GstAggregator sub-classes tell the baseclass what their internal
+ * latency is. Will also post a LATENCY message on the bus so the pipeline
+ * can reconfigure its global latency.
+ */
+void
+gst_aggregator_set_latency (GstAggregator * self,
+ GstClockTime min_latency, GstClockTime max_latency)
+{
+ g_return_if_fail (GST_IS_AGGREGATOR (self));
+ g_return_if_fail (GST_CLOCK_TIME_IS_VALID (min_latency));
+ g_return_if_fail (max_latency >= min_latency);
+
+ GST_OBJECT_LOCK (self);
+ self->priv->sub_latency_min = min_latency;
+ self->priv->sub_latency_max = max_latency;
+ GST_OBJECT_UNLOCK (self);
+
+ gst_element_post_message (GST_ELEMENT_CAST (self),
+ gst_message_new_latency (GST_OBJECT_CAST (self)));
+}