}
static GstFlowReturn
-gst_v4l2_buffer_pool_poll (GstV4l2BufferPool * pool)
+gst_v4l2_buffer_pool_poll (GstV4l2BufferPool * pool, gboolean wait)
{
gint ret;
+ GstClockTime timeout;
+
+ if (wait)
+ timeout = GST_CLOCK_TIME_NONE;
+ else
+ timeout = 0;
/* In RW mode there is no queue, hence no need to wait while the queue is
* empty */
if (pool->obj->mode != GST_V4L2_IO_RW) {
GST_OBJECT_LOCK (pool);
+
+ if (!wait && pool->empty) {
+ GST_OBJECT_UNLOCK (pool);
+ goto no_buffers;
+ }
+
while (pool->empty)
g_cond_wait (&pool->empty_cond, GST_OBJECT_GET_LOCK (pool));
+
GST_OBJECT_UNLOCK (pool);
}
- if (!pool->can_poll_device)
- goto done;
+ if (!pool->can_poll_device) {
+ if (wait)
+ goto done;
+ else
+ goto no_buffers;
+ }
GST_LOG_OBJECT (pool, "polling device");
again:
- ret = gst_poll_wait (pool->poll, GST_CLOCK_TIME_NONE);
+ ret = gst_poll_wait (pool->poll, timeout);
if (G_UNLIKELY (ret < 0)) {
switch (errno) {
case EBUSY:
if (gst_poll_fd_has_error (pool->poll, &pool->pollfd))
goto select_error;
+ if (ret == 0)
+ goto no_buffers;
+
done:
return GST_FLOW_OK;
("poll error %d: %s (%d)", ret, g_strerror (errno), errno));
return GST_FLOW_ERROR;
}
+no_buffers:
+ return GST_FLOW_CUSTOM_SUCCESS;
}
static GstFlowReturn
}
static GstFlowReturn
-gst_v4l2_buffer_pool_dqbuf (GstV4l2BufferPool * pool, GstBuffer ** buffer)
+gst_v4l2_buffer_pool_dqbuf (GstV4l2BufferPool * pool, GstBuffer ** buffer,
+ gboolean wait)
{
GstFlowReturn res;
- GstBuffer *outbuf;
+ GstBuffer *outbuf = NULL;
GstV4l2Object *obj = pool->obj;
GstClockTime timestamp;
GstV4l2MemoryGroup *group;
gsize size;
gint i;
- if ((res = gst_v4l2_buffer_pool_poll (pool)) != GST_FLOW_OK)
+ if ((res = gst_v4l2_buffer_pool_poll (pool, wait)) < GST_FLOW_OK)
goto poll_failed;
+ if (res == GST_FLOW_CUSTOM_SUCCESS) {
+ GST_LOG_OBJECT (pool, "nothing to dequeue");
+ goto done;
+ }
+
GST_LOG_OBJECT (pool, "dequeueing a buffer");
res = gst_v4l2_allocator_dqbuf (pool->vallocator, &group);
done:
*buffer = outbuf;
- return GST_FLOW_OK;
+ return res;
/* ERRORS */
poll_failed:
/* just dequeue a buffer, we basically use the queue of v4l2 as the
* storage for our buffers. This function does poll first so we can
* interrupt it fine. */
- ret = gst_v4l2_buffer_pool_dqbuf (pool, buffer);
+ ret = gst_v4l2_buffer_pool_dqbuf (pool, buffer, TRUE);
break;
}
default:
gst_buffer_map (buf, &map, GST_MAP_WRITE);
do {
- if ((res = gst_v4l2_buffer_pool_poll (pool)) != GST_FLOW_OK)
+ if ((res = gst_v4l2_buffer_pool_poll (pool, TRUE)) != GST_FLOW_OK)
goto poll_error;
amount = obj->read (obj->video_fd, map.data, toread);
}
/* buffer not from our pool, grab a frame and copy it into the target */
- if ((ret = gst_v4l2_buffer_pool_dqbuf (pool, &tmp)) != GST_FLOW_OK)
+ if ((ret = gst_v4l2_buffer_pool_dqbuf (pool, &tmp, TRUE))
+ != GST_FLOW_OK)
goto done;
/* An empty buffer on capture indicates the end of stream */
case GST_V4L2_IO_MMAP:
{
GstBuffer *to_queue = NULL;
+ GstBuffer *buffer;
GstV4l2MemoryGroup *group;
gint index;
* otherwise the pool will think it is outstanding and will refuse to stop. */
gst_buffer_unref (to_queue);
+ /* release as many buffer as possible */
+ while (gst_v4l2_buffer_pool_dqbuf (pool, &buffer, FALSE) ==
+ GST_FLOW_OK) {
+ if (buffer->pool == NULL)
+ gst_v4l2_buffer_pool_release_buffer (bpool, buffer);
+ }
+
if (g_atomic_int_get (&pool->num_queued) >= pool->min_latency) {
- GstBuffer *out;
/* all buffers are queued, try to dequeue one and release it back
* into the pool so that _acquire can get to it again. */
- ret = gst_v4l2_buffer_pool_dqbuf (pool, &out);
- if (ret == GST_FLOW_OK && out->pool == NULL)
+ ret = gst_v4l2_buffer_pool_dqbuf (pool, &buffer, TRUE);
+ if (ret == GST_FLOW_OK && buffer->pool == NULL)
/* release the rendered buffer back into the pool. This wakes up any
* thread waiting for a buffer in _acquire(). */
- gst_v4l2_buffer_pool_release_buffer (bpool, out);
+ gst_v4l2_buffer_pool_release_buffer (bpool, buffer);
}
break;
}