2004-07-09 Thomas Vander Stichele <thomas (at) apestaart (dot) org>
+ * gst/gstqueue.c: (gst_queue_class_init), (gst_queue_finalize),
+ (gst_queue_chain), (gst_queue_get), (gst_queue_handle_src_event),
+ (gst_queue_release_locks), (gst_queue_change_state),
+ (gst_queue_set_property):
+ add proper lock debugging. Change dispose to finalize, since
+ we're freeing mutexes and other stuff which should happen only once.
+
+2004-07-09 Thomas Vander Stichele <thomas (at) apestaart (dot) org>
+
* docs/gst/tmpl/gstelement.sgml:
* docs/gst/tmpl/gstplugin.sgml:
* docs/gst/tmpl/gsttypes.sgml:
/* FILL ME */
};
+#define GST_QUEUE_MUTEX_LOCK G_STMT_START { \
+ GST_CAT_LOG_OBJECT (queue_dataflow, queue, \
+ "locking qlock from thread %p", \
+ g_thread_self ()); \
+ g_mutex_lock (queue->qlock); \
+ GST_CAT_LOG_OBJECT (queue_dataflow, queue, \
+ "locked qlock from thread %p", \
+ g_thread_self ()); \
+} G_STMT_END
+
+#define GST_QUEUE_MUTEX_UNLOCK G_STMT_START { \
+ GST_CAT_LOG_OBJECT (queue_dataflow, queue, \
+ "unlocking qlock from thread %p", \
+ g_thread_self ()); \
+ g_mutex_unlock (queue->qlock); \
+} G_STMT_END
+
+
typedef struct _GstQueueEventResponse
{
GstEvent *event;
static void gst_queue_base_init (GstQueueClass * klass);
static void gst_queue_class_init (GstQueueClass * klass);
static void gst_queue_init (GstQueue * queue);
-static void gst_queue_dispose (GObject * object);
+static void gst_queue_finalize (GObject * object);
static void gst_queue_set_property (GObject * object,
guint prop_id, const GValue * value, GParamSpec * pspec);
0, G_MAXUINT64, -1, G_PARAM_READWRITE));
/* set several parent class virtual functions */
- gobject_class->dispose = GST_DEBUG_FUNCPTR (gst_queue_dispose);
+ gobject_class->finalize = GST_DEBUG_FUNCPTR (gst_queue_finalize);
gobject_class->set_property = GST_DEBUG_FUNCPTR (gst_queue_set_property);
gobject_class->get_property = GST_DEBUG_FUNCPTR (gst_queue_get_property);
"initialized queue's not_empty & not_full conditions");
}
+/* called only once, as opposed to dispose */
static void
-gst_queue_dispose (GObject * object)
+gst_queue_finalize (GObject * object)
{
GstQueue *queue = GST_QUEUE (object);
- gst_element_set_state (GST_ELEMENT (queue), GST_STATE_NULL);
+ GST_DEBUG_OBJECT (queue, "finalizing queue");
while (!g_queue_is_empty (queue->queue)) {
GstData *data = g_queue_pop_head (queue->queue);
g_mutex_free (queue->event_lock);
g_queue_free (queue->events);
- if (G_OBJECT_CLASS (parent_class)->dispose)
- G_OBJECT_CLASS (parent_class)->dispose (object);
+ if (G_OBJECT_CLASS (parent_class)->finalize)
+ G_OBJECT_CLASS (parent_class)->finalize (object);
}
static GstCaps *
restart:
/* we have to lock the queue since we span threads */
- GST_CAT_LOG_OBJECT (queue_dataflow, queue, "locking t:%p", g_thread_self ());
- g_mutex_lock (queue->qlock);
- GST_CAT_LOG_OBJECT (queue_dataflow, queue, "locked t:%p", g_thread_self ());
+ GST_QUEUE_MUTEX_LOCK;
gst_queue_handle_pending_events (queue);
queue->cur_level.bytes >= queue->max_size.bytes) ||
(queue->max_size.time > 0 &&
queue->cur_level.time >= queue->max_size.time))) {
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_OVERRUN], 0);
- g_mutex_lock (queue->qlock);
+ GST_QUEUE_MUTEX_LOCK;
/* how are we going to make space for this buffer? */
switch (queue->leaky) {
GST_CAT_DEBUG_OBJECT (queue_dataflow, queue,
"queue is full, leaking buffer on upstream end");
/* now we can clean up and exit right away */
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
goto out_unref;
/* leak first buffer in the queue */
* half of state change executes */
if (queue->interrupt) {
GST_CAT_DEBUG_OBJECT (queue_dataflow, queue, "interrupted");
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
if (gst_scheduler_interrupt (gst_pad_get_scheduler (queue->sinkpad),
GST_ELEMENT (queue))) {
goto out_unref;
/* this means the other end is shut down. Try to
* signal to resolve the error */
if (!queue->may_deadlock) {
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
gst_data_unref (data);
GST_ELEMENT_ERROR (queue, CORE, THREAD, (NULL),
("deadlock found, shutting down source pad elements"));
* that, we handle pending upstream events here, too. */
gst_queue_handle_pending_events (queue);
- STATUS (queue, "waiting for item_del signal");
+ STATUS (queue, "waiting for item_del signal from thread using qlock");
g_cond_wait (queue->item_del, queue->qlock);
- STATUS (queue, "received item_del signal");
+ STATUS (queue, "received item_del signal from thread using qlock");
}
STATUS (queue, "post-full wait");
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_RUNNING], 0);
- g_mutex_lock (queue->qlock);
+ GST_QUEUE_MUTEX_LOCK;
break;
}
}
GST_CAT_LOG_OBJECT (queue_dataflow, queue, "signalling item_add");
g_cond_signal (queue->item_add);
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
return;
restart:
/* have to lock for thread-safety */
- GST_CAT_LOG_OBJECT (queue_dataflow, queue, "locking t:%p", g_thread_self ());
- g_mutex_lock (queue->qlock);
- GST_CAT_LOG_OBJECT (queue_dataflow, queue, "locked t:%p", g_thread_self ());
+ GST_QUEUE_MUTEX_LOCK;
if (queue->queue->length == 0 ||
(queue->min_threshold.buffers > 0 &&
queue->cur_level.bytes < queue->min_threshold.bytes) ||
(queue->min_threshold.time > 0 &&
queue->cur_level.time < queue->min_threshold.time)) {
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_UNDERRUN], 0);
- g_mutex_lock (queue->qlock);
+ GST_QUEUE_MUTEX_LOCK;
STATUS (queue, "pre-empty wait");
while (queue->queue->length == 0 ||
* change executes. */
if (queue->interrupt) {
GST_CAT_DEBUG_OBJECT (queue_dataflow, queue, "interrupted");
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
if (gst_scheduler_interrupt (gst_pad_get_scheduler (queue->srcpad),
GST_ELEMENT (queue)))
return GST_DATA (gst_event_new (GST_EVENT_INTERRUPT));
if (GST_STATE (queue) != GST_STATE_PLAYING) {
/* this means the other end is shut down */
if (!queue->may_deadlock) {
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
GST_ELEMENT_ERROR (queue, CORE, THREAD, (NULL),
("deadlock found, shutting down sink pad elements"));
goto restart;
g_get_current_time (&timeout);
g_time_val_add (&timeout, queue->block_timeout / 1000);
+ GST_LOG_OBJECT (queue, "g_cond_time_wait using qlock from thread %p",
+ g_thread_self ());
if (!g_cond_timed_wait (queue->item_add, queue->qlock, &timeout)) {
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
GST_CAT_WARNING_OBJECT (queue_dataflow, queue,
"Sending filler event");
return GST_DATA (gst_event_new_filler ());
}
} else {
+ GST_LOG_OBJECT (queue, "doing g_cond_wait using qlock from thread %p",
+ g_thread_self ());
g_cond_wait (queue->item_add, queue->qlock);
+ GST_LOG_OBJECT (queue, "done g_cond_wait using qlock from thread %p",
+ g_thread_self ());
}
STATUS (queue, "got item_add signal");
}
STATUS (queue, "post-empty wait");
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_RUNNING], 0);
- g_mutex_lock (queue->qlock);
+ GST_QUEUE_MUTEX_LOCK;
}
/* There's something in the list now, whatever it is */
GST_CAT_LOG_OBJECT (queue_dataflow, queue, "signalling item_del");
g_cond_signal (queue->item_del);
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
/* FIXME: I suppose this needs to be locked, since the EOS
* bit affects the pipeline state. However, that bit is
GST_CAT_DEBUG_OBJECT (queue_dataflow, queue, "got event %p (%d)",
event, GST_EVENT_TYPE (event));
- g_mutex_lock (queue->qlock);
+ GST_QUEUE_MUTEX_LOCK;
if (gst_element_get_state (GST_ELEMENT (queue)) == GST_STATE_PLAYING) {
GstQueueEventResponse er;
g_get_current_time (&timeout);
g_time_val_add (&timeout, 500 * 1000); /* half a second */
+ GST_LOG_OBJECT (queue, "doing g_cond_wait using qlock from thread %p",
+ g_thread_self ());
if (!g_cond_timed_wait (queue->event_done, queue->qlock, &timeout) &&
!er.handled) {
GST_CAT_WARNING_OBJECT (queue_dataflow, queue,
}
}
handled:
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
return res;
}
queue = GST_QUEUE (element);
- g_mutex_lock (queue->qlock);
+ GST_QUEUE_MUTEX_LOCK;
queue->interrupt = TRUE;
g_cond_signal (queue->item_add);
g_cond_signal (queue->item_del);
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
return TRUE;
}
/* lock the queue so another thread (not in sync with this thread's state)
* can't call this queue's _get (or whatever)
*/
- g_mutex_lock (queue->qlock);
+ GST_QUEUE_MUTEX_LOCK;
switch (GST_STATE_TRANSITION (element)) {
case GST_STATE_NULL_TO_READY:
g_cond_signal (queue->item_add);
ret = GST_STATE_FAILURE;
- goto error;
+ goto unlock;
} else {
GstScheduler *src_sched, *sink_sched;
GST_ELEMENT_NAME (queue));
ret = GST_STATE_FAILURE;
- goto error;
+ goto unlock;
}
}
queue->interrupt = FALSE;
gst_pad_set_active (queue->sinkpad, TRUE);
gst_pad_set_active (queue->srcpad, TRUE);
-error:
- g_mutex_unlock (queue->qlock);
+unlock:
+ GST_QUEUE_MUTEX_UNLOCK;
GST_CAT_LOG_OBJECT (GST_CAT_STATES, element, "done with state change");
/* someone could change levels here, and since this
* affects the get/put funcs, we need to lock for safety. */
- g_mutex_lock (queue->qlock);
+ GST_QUEUE_MUTEX_LOCK;
switch (prop_id) {
case ARG_MAX_SIZE_BYTES:
break;
}
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
}
static void
/* FILL ME */
};
+#define GST_QUEUE_MUTEX_LOCK G_STMT_START { \
+ GST_CAT_LOG_OBJECT (queue_dataflow, queue, \
+ "locking qlock from thread %p", \
+ g_thread_self ()); \
+ g_mutex_lock (queue->qlock); \
+ GST_CAT_LOG_OBJECT (queue_dataflow, queue, \
+ "locked qlock from thread %p", \
+ g_thread_self ()); \
+} G_STMT_END
+
+#define GST_QUEUE_MUTEX_UNLOCK G_STMT_START { \
+ GST_CAT_LOG_OBJECT (queue_dataflow, queue, \
+ "unlocking qlock from thread %p", \
+ g_thread_self ()); \
+ g_mutex_unlock (queue->qlock); \
+} G_STMT_END
+
+
typedef struct _GstQueueEventResponse
{
GstEvent *event;
static void gst_queue_base_init (GstQueueClass * klass);
static void gst_queue_class_init (GstQueueClass * klass);
static void gst_queue_init (GstQueue * queue);
-static void gst_queue_dispose (GObject * object);
+static void gst_queue_finalize (GObject * object);
static void gst_queue_set_property (GObject * object,
guint prop_id, const GValue * value, GParamSpec * pspec);
0, G_MAXUINT64, -1, G_PARAM_READWRITE));
/* set several parent class virtual functions */
- gobject_class->dispose = GST_DEBUG_FUNCPTR (gst_queue_dispose);
+ gobject_class->finalize = GST_DEBUG_FUNCPTR (gst_queue_finalize);
gobject_class->set_property = GST_DEBUG_FUNCPTR (gst_queue_set_property);
gobject_class->get_property = GST_DEBUG_FUNCPTR (gst_queue_get_property);
"initialized queue's not_empty & not_full conditions");
}
+/* called only once, as opposed to dispose */
static void
-gst_queue_dispose (GObject * object)
+gst_queue_finalize (GObject * object)
{
GstQueue *queue = GST_QUEUE (object);
- gst_element_set_state (GST_ELEMENT (queue), GST_STATE_NULL);
+ GST_DEBUG_OBJECT (queue, "finalizing queue");
while (!g_queue_is_empty (queue->queue)) {
GstData *data = g_queue_pop_head (queue->queue);
g_mutex_free (queue->event_lock);
g_queue_free (queue->events);
- if (G_OBJECT_CLASS (parent_class)->dispose)
- G_OBJECT_CLASS (parent_class)->dispose (object);
+ if (G_OBJECT_CLASS (parent_class)->finalize)
+ G_OBJECT_CLASS (parent_class)->finalize (object);
}
static GstCaps *
restart:
/* we have to lock the queue since we span threads */
- GST_CAT_LOG_OBJECT (queue_dataflow, queue, "locking t:%p", g_thread_self ());
- g_mutex_lock (queue->qlock);
- GST_CAT_LOG_OBJECT (queue_dataflow, queue, "locked t:%p", g_thread_self ());
+ GST_QUEUE_MUTEX_LOCK;
gst_queue_handle_pending_events (queue);
queue->cur_level.bytes >= queue->max_size.bytes) ||
(queue->max_size.time > 0 &&
queue->cur_level.time >= queue->max_size.time))) {
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_OVERRUN], 0);
- g_mutex_lock (queue->qlock);
+ GST_QUEUE_MUTEX_LOCK;
/* how are we going to make space for this buffer? */
switch (queue->leaky) {
GST_CAT_DEBUG_OBJECT (queue_dataflow, queue,
"queue is full, leaking buffer on upstream end");
/* now we can clean up and exit right away */
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
goto out_unref;
/* leak first buffer in the queue */
* half of state change executes */
if (queue->interrupt) {
GST_CAT_DEBUG_OBJECT (queue_dataflow, queue, "interrupted");
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
if (gst_scheduler_interrupt (gst_pad_get_scheduler (queue->sinkpad),
GST_ELEMENT (queue))) {
goto out_unref;
/* this means the other end is shut down. Try to
* signal to resolve the error */
if (!queue->may_deadlock) {
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
gst_data_unref (data);
GST_ELEMENT_ERROR (queue, CORE, THREAD, (NULL),
("deadlock found, shutting down source pad elements"));
* that, we handle pending upstream events here, too. */
gst_queue_handle_pending_events (queue);
- STATUS (queue, "waiting for item_del signal");
+ STATUS (queue, "waiting for item_del signal from thread using qlock");
g_cond_wait (queue->item_del, queue->qlock);
- STATUS (queue, "received item_del signal");
+ STATUS (queue, "received item_del signal from thread using qlock");
}
STATUS (queue, "post-full wait");
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_RUNNING], 0);
- g_mutex_lock (queue->qlock);
+ GST_QUEUE_MUTEX_LOCK;
break;
}
}
GST_CAT_LOG_OBJECT (queue_dataflow, queue, "signalling item_add");
g_cond_signal (queue->item_add);
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
return;
restart:
/* have to lock for thread-safety */
- GST_CAT_LOG_OBJECT (queue_dataflow, queue, "locking t:%p", g_thread_self ());
- g_mutex_lock (queue->qlock);
- GST_CAT_LOG_OBJECT (queue_dataflow, queue, "locked t:%p", g_thread_self ());
+ GST_QUEUE_MUTEX_LOCK;
if (queue->queue->length == 0 ||
(queue->min_threshold.buffers > 0 &&
queue->cur_level.bytes < queue->min_threshold.bytes) ||
(queue->min_threshold.time > 0 &&
queue->cur_level.time < queue->min_threshold.time)) {
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_UNDERRUN], 0);
- g_mutex_lock (queue->qlock);
+ GST_QUEUE_MUTEX_LOCK;
STATUS (queue, "pre-empty wait");
while (queue->queue->length == 0 ||
* change executes. */
if (queue->interrupt) {
GST_CAT_DEBUG_OBJECT (queue_dataflow, queue, "interrupted");
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
if (gst_scheduler_interrupt (gst_pad_get_scheduler (queue->srcpad),
GST_ELEMENT (queue)))
return GST_DATA (gst_event_new (GST_EVENT_INTERRUPT));
if (GST_STATE (queue) != GST_STATE_PLAYING) {
/* this means the other end is shut down */
if (!queue->may_deadlock) {
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
GST_ELEMENT_ERROR (queue, CORE, THREAD, (NULL),
("deadlock found, shutting down sink pad elements"));
goto restart;
g_get_current_time (&timeout);
g_time_val_add (&timeout, queue->block_timeout / 1000);
+ GST_LOG_OBJECT (queue, "g_cond_time_wait using qlock from thread %p",
+ g_thread_self ());
if (!g_cond_timed_wait (queue->item_add, queue->qlock, &timeout)) {
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
GST_CAT_WARNING_OBJECT (queue_dataflow, queue,
"Sending filler event");
return GST_DATA (gst_event_new_filler ());
}
} else {
+ GST_LOG_OBJECT (queue, "doing g_cond_wait using qlock from thread %p",
+ g_thread_self ());
g_cond_wait (queue->item_add, queue->qlock);
+ GST_LOG_OBJECT (queue, "done g_cond_wait using qlock from thread %p",
+ g_thread_self ());
}
STATUS (queue, "got item_add signal");
}
STATUS (queue, "post-empty wait");
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_RUNNING], 0);
- g_mutex_lock (queue->qlock);
+ GST_QUEUE_MUTEX_LOCK;
}
/* There's something in the list now, whatever it is */
GST_CAT_LOG_OBJECT (queue_dataflow, queue, "signalling item_del");
g_cond_signal (queue->item_del);
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
/* FIXME: I suppose this needs to be locked, since the EOS
* bit affects the pipeline state. However, that bit is
GST_CAT_DEBUG_OBJECT (queue_dataflow, queue, "got event %p (%d)",
event, GST_EVENT_TYPE (event));
- g_mutex_lock (queue->qlock);
+ GST_QUEUE_MUTEX_LOCK;
if (gst_element_get_state (GST_ELEMENT (queue)) == GST_STATE_PLAYING) {
GstQueueEventResponse er;
g_get_current_time (&timeout);
g_time_val_add (&timeout, 500 * 1000); /* half a second */
+ GST_LOG_OBJECT (queue, "doing g_cond_wait using qlock from thread %p",
+ g_thread_self ());
if (!g_cond_timed_wait (queue->event_done, queue->qlock, &timeout) &&
!er.handled) {
GST_CAT_WARNING_OBJECT (queue_dataflow, queue,
}
}
handled:
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
return res;
}
queue = GST_QUEUE (element);
- g_mutex_lock (queue->qlock);
+ GST_QUEUE_MUTEX_LOCK;
queue->interrupt = TRUE;
g_cond_signal (queue->item_add);
g_cond_signal (queue->item_del);
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
return TRUE;
}
/* lock the queue so another thread (not in sync with this thread's state)
* can't call this queue's _get (or whatever)
*/
- g_mutex_lock (queue->qlock);
+ GST_QUEUE_MUTEX_LOCK;
switch (GST_STATE_TRANSITION (element)) {
case GST_STATE_NULL_TO_READY:
g_cond_signal (queue->item_add);
ret = GST_STATE_FAILURE;
- goto error;
+ goto unlock;
} else {
GstScheduler *src_sched, *sink_sched;
GST_ELEMENT_NAME (queue));
ret = GST_STATE_FAILURE;
- goto error;
+ goto unlock;
}
}
queue->interrupt = FALSE;
gst_pad_set_active (queue->sinkpad, TRUE);
gst_pad_set_active (queue->srcpad, TRUE);
-error:
- g_mutex_unlock (queue->qlock);
+unlock:
+ GST_QUEUE_MUTEX_UNLOCK;
GST_CAT_LOG_OBJECT (GST_CAT_STATES, element, "done with state change");
/* someone could change levels here, and since this
* affects the get/put funcs, we need to lock for safety. */
- g_mutex_lock (queue->qlock);
+ GST_QUEUE_MUTEX_LOCK;
switch (prop_id) {
case ARG_MAX_SIZE_BYTES:
break;
}
- g_mutex_unlock (queue->qlock);
+ GST_QUEUE_MUTEX_UNLOCK;
}
static void