#define PAD_LOCK(pad) G_STMT_START { \
GST_TRACE_OBJECT (pad, "Taking PAD lock from thread %p", \
g_thread_self()); \
- GST_OBJECT_LOCK (pad); \
+ g_mutex_lock(&pad->priv->lock); \
GST_TRACE_OBJECT (pad, "Took PAD lock from thread %p", \
g_thread_self()); \
} G_STMT_END
#define PAD_UNLOCK(pad) G_STMT_START { \
GST_TRACE_OBJECT (pad, "Releasing PAD lock from thread %p", \
g_thread_self()); \
- GST_OBJECT_UNLOCK (pad); \
+ g_mutex_unlock(&pad->priv->lock); \
GST_TRACE_OBJECT (pad, "Release PAD lock from thread %p", \
g_thread_self()); \
} G_STMT_END
GST_LOG_OBJECT (pad, "Waiting for EVENT on thread %p", \
g_thread_self()); \
g_cond_wait(&(((GstAggregatorPad* )pad)->priv->event_cond), \
- GST_OBJECT_GET_LOCK (pad)); \
+ (&((GstAggregatorPad*)pad)->priv->lock)); \
GST_LOG_OBJECT (pad, "DONE Waiting for EVENT on thread %p", \
g_thread_self()); \
} G_STMT_END
struct _GstAggregatorPadPrivate
{
+ /* To always be used atomically */
+ gboolean flushing;
+
+ /* Following fields are protected by the PAD_LOCK */
gboolean pending_flush_start;
gboolean pending_flush_stop;
gboolean pending_eos;
- gboolean flushing;
- GCond event_cond;
+ GstBuffer *buffer;
+ gboolean eos;
+ GMutex lock;
+ GCond event_cond;
GMutex stream_lock;
};
GstAggregatorPadClass *klass = GST_AGGREGATOR_PAD_GET_CLASS (aggpad);
PAD_LOCK (aggpad);
- aggpad->eos = FALSE;
+ aggpad->priv->eos = FALSE;
aggpad->priv->flushing = FALSE;
PAD_UNLOCK (aggpad);
*************************************/
static GstElementClass *aggregator_parent_class = NULL;
+/* All members are protected by the object lock unless otherwise noted */
+
struct _GstAggregatorPrivate
{
gint padcount;
gboolean running; /* protected by SRC_STREAM_LOCK */
gint seqnum;
- gboolean send_stream_start;
+ gboolean send_stream_start; /* protected by srcpad stream lock */
gboolean send_segment;
gboolean flush_seeking;
gboolean pending_flush_start;
- gboolean send_eos;
- GstFlowReturn flow_return;
-
- gboolean send_stream_start; /* protected by srcpad stream lock */
- gboolean send_segment; /* protected by object lock */
- gboolean flush_seeking; /* protected by object lock */
- gboolean pending_flush_start; /* protected by object lock */
gboolean send_eos; /* protected by srcpad stream lock */
- GstFlowReturn flow_return; /* protected by object lock */
+ GstFlowReturn flow_return;
GstCaps *srccaps; /* protected by the srcpad stream lock */
- /* protected by object lock */
GstTagList *tags;
gboolean tags_changed;
GstClockTime sub_latency_max;
/* aggregate */
- GstClockID aggregate_id;
+ GstClockID aggregate_id; /* protected by src_lock */
GMutex src_lock;
GCond src_cond;
pad = l->data;
PAD_LOCK (pad);
- if (pad->buffer == NULL && !pad->eos) {
- GST_OBJECT_UNLOCK (pad);
+ if (pad->priv->buffer == NULL && !pad->priv->eos) {
+ PAD_UNLOCK (pad);
goto pad_not_ready;
}
PAD_UNLOCK (pad);
GstEvent *event;
gst_aggregator_push_mandatory_events (self);
- self->priv->send_eos = FALSE;
event = gst_event_new_eos ();
+
+ GST_OBJECT_LOCK (self);
+ self->priv->send_eos = FALSE;
gst_event_set_seqnum (event, self->priv->seqnum);
+ GST_OBJECT_UNLOCK (self);
+
gst_pad_push_event (self->srcpad, event);
}
clock = GST_ELEMENT_CLOCK (self);
if (clock)
gst_object_ref (clock);
- GST_OBJECT_UNLOCK (self);
time = base_time + start;
-
- if (GST_CLOCK_TIME_IS_VALID (latency_min)) {
- time += latency_min;
- } else {
- time += self->priv->latency;
- }
+ time += latency_min;
GST_DEBUG_OBJECT (self, "possibly waiting for clock to reach %"
GST_TIME_FORMAT " (base %" GST_TIME_FORMAT " start %" GST_TIME_FORMAT
GST_TIME_ARGS (latency_min),
GST_TIME_ARGS (gst_clock_get_time (clock)));
+ GST_OBJECT_UNLOCK (self);
+
self->priv->aggregate_id = gst_clock_new_single_shot_id (clock, time);
gst_object_unref (clock);
SRC_STREAM_UNLOCK (self);
if (flow_return != GST_FLOW_OK)
break;
}
+
+ /* Pause the task here, the only ways to get here are:
+ * 1) We're stopping, in which case the task is stopped anyway
+ * 2) We got a flow error above, in which case it might take
+ * some time to forward the flow return upstream and we
+ * would otherwise call the task function over and over
+ * again without doing anything
+ */
+ gst_pad_pause_task (self->srcpad);
}
static gboolean
static gboolean
_check_pending_flush_stop (GstAggregatorPad * pad)
{
- return (!pad->priv->pending_flush_stop && !pad->priv->pending_flush_start);
+ gboolean res;
+
+ PAD_LOCK (pad);
+ res = (!pad->priv->pending_flush_stop && !pad->priv->pending_flush_start);
+ PAD_UNLOCK (pad);
+
+ return res;
}
static gboolean
/* Remove pad buffer and wake up the streaming thread */
tmpbuf = gst_aggregator_pad_steal_buffer (aggpad);
gst_buffer_replace (&tmpbuf, NULL);
+
PAD_STREAM_LOCK (aggpad);
- if (g_atomic_int_compare_and_exchange (&padpriv->pending_flush_start,
- TRUE, FALSE) == TRUE) {
+ PAD_LOCK (aggpad);
+ if (padpriv->pending_flush_start) {
GST_DEBUG_OBJECT (aggpad, "Expecting FLUSH_STOP now");
- g_atomic_int_set (&padpriv->pending_flush_stop, TRUE);
+
+ padpriv->pending_flush_start = FALSE;
+ padpriv->pending_flush_stop = TRUE;
}
+ PAD_UNLOCK (aggpad);
GST_OBJECT_LOCK (self);
if (priv->flush_seeking) {
*/
SRC_STREAM_LOCK (self);
PAD_LOCK (aggpad);
- if (!aggpad->buffer) {
- aggpad->eos = TRUE;
+ if (!aggpad->priv->buffer) {
+ aggpad->priv->eos = TRUE;
} else {
aggpad->priv->pending_eos = TRUE;
}
}
case GST_EVENT_SEGMENT:
{
- PAD_LOCK (aggpad);
+ GST_OBJECT_LOCK (aggpad);
gst_event_copy_segment (event, &aggpad->segment);
+ GST_OBJECT_UNLOCK (aggpad);
+
+ GST_OBJECT_LOCK (self);
self->priv->seqnum = gst_event_get_seqnum (event);
- PAD_UNLOCK (aggpad);
+ GST_OBJECT_UNLOCK (self);
goto eat;
}
case GST_EVENT_STREAM_START:
gst_aggregator_get_latency_unlocked (GstAggregator * self, gboolean * live,
GstClockTime * min_latency, GstClockTime * max_latency)
{
- GstClockTime our_latency;
GstClockTime min, max;
g_return_if_fail (GST_IS_AGGREGATOR (self));
min = self->priv->latency_min;
max = self->priv->latency_max;
+ /* add our own */
+ min += self->priv->latency;
min += self->priv->sub_latency_min;
if (GST_CLOCK_TIME_IS_VALID (max)
&& GST_CLOCK_TIME_IS_VALID (self->priv->sub_latency_max))
max += self->priv->sub_latency_max;
-
- our_latency = self->priv->latency;
- if (GST_CLOCK_TIME_IS_VALID (our_latency)) {
- min += our_latency;
- if (GST_CLOCK_TIME_IS_VALID (max))
- max += our_latency;
- }
+ else if (GST_CLOCK_TIME_IS_VALID (self->priv->sub_latency_max))
+ max = self->priv->sub_latency_max;
if (live)
*live = self->priv->latency_live;
gst_aggregator_query_sink_latency_foreach, &data);
SRC_STREAM_UNLOCK (self);
+ GST_OBJECT_LOCK (self);
our_latency = self->priv->latency;
- if (data.live && GST_CLOCK_TIME_IS_VALID (our_latency) &&
- our_latency > data.max) {
- GST_ELEMENT_WARNING (self, CORE, NEGOTIATION,
- ("%s", "Latency too big"),
- ("The requested latency value is too big for the current pipeline. "
- "Limiting to %" G_GINT64_FORMAT, data.max));
- self->priv->latency = data.max;
- /* FIXME: shouldn't we g_object_notify() the change here? */
- }
-
if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (data.min))) {
GST_WARNING_OBJECT (self, "Invalid minimum latency, using 0");
data.min = 0;
self->priv->latency_max = data.max;
/* add our own */
- if (GST_CLOCK_TIME_IS_VALID (our_latency)) {
- if (GST_CLOCK_TIME_IS_VALID (data.min))
- data.min += our_latency;
- if (GST_CLOCK_TIME_IS_VALID (data.max))
- data.max += our_latency;
- }
-
- if (GST_CLOCK_TIME_IS_VALID (self->priv->sub_latency_min)
- && GST_CLOCK_TIME_IS_VALID (data.min))
- data.min += self->priv->sub_latency_min;
+ data.min += our_latency;
+ data.min += self->priv->sub_latency_min;
if (GST_CLOCK_TIME_IS_VALID (self->priv->sub_latency_max)
&& GST_CLOCK_TIME_IS_VALID (data.max))
data.max += self->priv->sub_latency_max;
+ else if (GST_CLOCK_TIME_IS_VALID (self->priv->sub_latency_max))
+ data.max = self->priv->sub_latency_max;
+
+ if (data.live && data.min > data.max) {
+ GST_ELEMENT_WARNING (self, CORE, NEGOTIATION,
+ ("%s", "Latency too big"),
+ ("The requested latency value is too big for the current pipeline. "
+ "Limiting to %" G_GINT64_FORMAT, data.max));
+ data.min = data.max;
+ /* FIXME: This could in theory become negative, but in
+ * that case all is lost anyway */
+ self->priv->latency -= data.min - data.max;
+ /* FIXME: shouldn't we g_object_notify() the change here? */
+ }
+
+ GST_OBJECT_UNLOCK (self);
GST_DEBUG_OBJECT (self, "configured latency live:%s min:%" G_GINT64_FORMAT
" max:%" G_GINT64_FORMAT, data.live ? "true" : "false", data.min,
gst_event_parse_seek (event, &rate, &fmt, &flags, &start_type,
&start, &stop_type, &stop);
+
+ GST_OBJECT_LOCK (self);
gst_segment_do_seek (&self->segment, rate, fmt, flags, start_type, start,
stop_type, stop, NULL);
-
self->priv->seqnum = gst_event_get_seqnum (event);
+ GST_OBJECT_UNLOCK (self);
+
GST_DEBUG_OBJECT (element, "Storing segment %" GST_PTR_FORMAT, event);
}
GST_STATE_UNLOCK (element);
EventData *evdata = user_data;
gboolean ret = TRUE;
GstPad *peer = gst_pad_get_peer (pad);
- GstAggregatorPadPrivate *padpriv = GST_AGGREGATOR_PAD (pad)->priv;
+ GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad);
if (peer) {
ret = gst_pad_send_event (peer, gst_event_ref (evdata->event));
}
if (evdata->flush) {
- padpriv->pending_flush_start = FALSE;
- padpriv->pending_flush_stop = FALSE;
+ PAD_LOCK (aggpad);
+ aggpad->priv->pending_flush_start = FALSE;
+ aggpad->priv->pending_flush_stop = FALSE;
+ PAD_UNLOCK (aggpad);
}
} else {
evdata->one_actually_seeked = TRUE;
for (l = GST_ELEMENT_CAST (self)->sinkpads; l != NULL; l = l->next) {
GstAggregatorPad *pad = l->data;
+ PAD_LOCK (pad);
pad->priv->pending_flush_start = TRUE;
pad->priv->pending_flush_stop = FALSE;
+ PAD_UNLOCK (pad);
}
GST_OBJECT_UNLOCK (self);
}
gst_aggregator_set_latency_property (GstAggregator * self, gint64 latency)
{
gboolean changed;
+ GstClockTime min, max;
g_return_if_fail (GST_IS_AGGREGATOR (self));
+ g_return_if_fail (GST_CLOCK_TIME_IS_VALID (latency));
GST_OBJECT_LOCK (self);
-
- if (self->priv->latency_live && self->priv->latency_max != 0 &&
- GST_CLOCK_TIME_IS_VALID (latency) && latency > self->priv->latency_max) {
- GST_ELEMENT_WARNING (self, CORE, NEGOTIATION,
- ("%s", "Latency too big"),
- ("The requested latency value is too big for the latency in the "
- "current pipeline. Limiting to %" G_GINT64_FORMAT,
- self->priv->latency_max));
- latency = self->priv->latency_max;
+ if (self->priv->latency_live) {
+ min = self->priv->latency_min;
+ max = self->priv->latency_max;
+ /* add our own */
+ min += latency;
+ min += self->priv->sub_latency_min;
+ if (GST_CLOCK_TIME_IS_VALID (self->priv->sub_latency_max)
+ && GST_CLOCK_TIME_IS_VALID (max))
+ max += self->priv->sub_latency_max;
+ else if (GST_CLOCK_TIME_IS_VALID (self->priv->sub_latency_max))
+ max = self->priv->sub_latency_max;
+
+ if (GST_CLOCK_TIME_IS_VALID (max) && min > max) {
+ GST_ELEMENT_WARNING (self, CORE, NEGOTIATION,
+ ("%s", "Latency too big"),
+ ("The requested latency value is too big for the latency in the "
+ "current pipeline. Limiting to %" G_GINT64_FORMAT, max));
+ /* FIXME: This could in theory become negative, but in
+ * that case all is lost anyway */
+ latency -= min - max;
+ /* FIXME: shouldn't we g_object_notify() the change here? */
+ }
}
changed = (self->priv->latency != latency);
if (g_atomic_int_get (&aggpad->priv->flushing) == TRUE)
goto flushing;
- if (g_atomic_int_get (&aggpad->priv->pending_eos) == TRUE)
- goto eos;
-
PAD_LOCK (aggpad);
+ if (aggpad->priv->pending_eos == TRUE)
+ goto eos;
- while (aggpad->buffer && g_atomic_int_get (&aggpad->priv->flushing) == FALSE) {
+ while (aggpad->priv->buffer
+ && g_atomic_int_get (&aggpad->priv->flushing) == FALSE) {
GST_DEBUG_OBJECT (aggpad, "Waiting for buffer to be consumed");
PAD_WAIT_EVENT (aggpad);
}
SRC_STREAM_LOCK (self);
PAD_LOCK (aggpad);
- if (aggpad->buffer)
- gst_buffer_unref (aggpad->buffer);
- aggpad->buffer = actual_buf;
+ if (aggpad->priv->buffer)
+ gst_buffer_unref (aggpad->priv->buffer);
+ aggpad->priv->buffer = actual_buf;
PAD_UNLOCK (aggpad);
PAD_STREAM_UNLOCK (aggpad);
- if (gst_aggregator_check_pads_ready (self))
- SRC_STREAM_BROADCAST (self);
+ SRC_STREAM_BROADCAST (self);
SRC_STREAM_UNLOCK (self);
GST_DEBUG_OBJECT (aggpad, "Done chaining");
return GST_FLOW_FLUSHING;
eos:
+ PAD_UNLOCK (aggpad);
PAD_STREAM_UNLOCK (aggpad);
gst_buffer_unref (buffer);
goto flushing;
}
- while (aggpad->buffer
+ while (aggpad->priv->buffer
&& g_atomic_int_get (&aggpad->priv->flushing) == FALSE) {
GST_DEBUG_OBJECT (aggpad, "Waiting for buffer to be consumed");
PAD_WAIT_EVENT (aggpad);
goto flushing;
}
- while (aggpad->buffer
+ while (aggpad->priv->buffer
&& g_atomic_int_get (&aggpad->priv->flushing) == FALSE) {
GST_DEBUG_OBJECT (aggpad, "Waiting for buffer to be consumed");
PAD_WAIT_EVENT (aggpad);
if (active == FALSE) {
PAD_LOCK (aggpad);
g_atomic_int_set (&aggpad->priv->flushing, TRUE);
- gst_buffer_replace (&aggpad->buffer, NULL);
+ gst_buffer_replace (&aggpad->priv->buffer, NULL);
PAD_BROADCAST_EVENT (aggpad);
PAD_UNLOCK (aggpad);
} else {
g_cond_clear (&pad->priv->event_cond);
g_mutex_clear (&pad->priv->stream_lock);
+ g_mutex_clear (&pad->priv->lock);
G_OBJECT_CLASS (gst_aggregator_pad_parent_class)->finalize (object);
}
G_TYPE_INSTANCE_GET_PRIVATE (pad, GST_TYPE_AGGREGATOR_PAD,
GstAggregatorPadPrivate);
- pad->buffer = NULL;
+ pad->priv->buffer = NULL;
g_cond_init (&pad->priv->event_cond);
g_mutex_init (&pad->priv->stream_lock);
+ g_mutex_init (&pad->priv->lock);
}
/**
- * gst_aggregator_pad_steal_buffer_unlocked:
+ * gst_aggregator_pad_steal_buffer:
* @pad: the pad to get buffer from
*
* Steal the ref to the buffer currently queued in @pad.
*
- * MUST be called with the pad's object lock held.
- *
* Returns: (transfer full): The buffer in @pad or NULL if no buffer was
* queued. You should unref the buffer after usage.
*/
GstBuffer *
-gst_aggregator_pad_steal_buffer_unlocked (GstAggregatorPad * pad)
+gst_aggregator_pad_steal_buffer (GstAggregatorPad * pad)
{
GstBuffer *buffer = NULL;
- if (pad->buffer) {
+ PAD_LOCK (pad);
+ if (pad->priv->buffer) {
GST_TRACE_OBJECT (pad, "Consuming buffer");
- buffer = pad->buffer;
- pad->buffer = NULL;
+ buffer = pad->priv->buffer;
+ pad->priv->buffer = NULL;
if (pad->priv->pending_eos) {
pad->priv->pending_eos = FALSE;
- pad->eos = TRUE;
+ pad->priv->eos = TRUE;
}
PAD_BROADCAST_EVENT (pad);
GST_DEBUG_OBJECT (pad, "Consumed: %" GST_PTR_FORMAT, buffer);
}
+ PAD_UNLOCK (pad);
return buffer;
}
/**
- * gst_aggregator_pad_steal_buffer:
+ * gst_aggregator_pad_get_buffer:
* @pad: the pad to get buffer from
*
- * Steal the ref to the buffer currently queued in @pad.
- *
- * Returns: (transfer full): The buffer in @pad or NULL if no buffer was
- * queued. You should unref the buffer after usage.
+ * Returns: (transfer full): A reference to the buffer in @pad or
+ * NULL if no buffer was queued. You should unref the buffer after
+ * usage.
*/
GstBuffer *
-gst_aggregator_pad_steal_buffer (GstAggregatorPad * pad)
+gst_aggregator_pad_get_buffer (GstAggregatorPad * pad)
{
GstBuffer *buffer = NULL;
PAD_LOCK (pad);
- buffer = gst_aggregator_pad_steal_buffer_unlocked (pad);
+ if (pad->priv->buffer)
+ buffer = gst_buffer_ref (pad->priv->buffer);
PAD_UNLOCK (pad);
return buffer;
}
-/**
- * gst_aggregator_pad_get_buffer:
- * @pad: the pad to get buffer from
- *
- * Returns: (transfer full): A reference to the buffer in @pad or
- * NULL if no buffer was queued. You should unref the buffer after
- * usage.
- */
-GstBuffer *
-gst_aggregator_pad_get_buffer (GstAggregatorPad * pad)
+gboolean
+gst_aggregator_pad_is_eos (GstAggregatorPad * pad)
{
- GstBuffer *buffer = NULL;
+ gboolean is_eos;
PAD_LOCK (pad);
- if (pad->buffer)
- buffer = gst_buffer_ref (pad->buffer);
+ is_eos = pad->priv->eos;
PAD_UNLOCK (pad);
- return buffer;
+ return is_eos;
}
/**