goto start_failed;
if (!V4L2_TYPE_IS_OUTPUT (obj->type)) {
+ if (g_atomic_int_get (&pool->num_queued) < min_buffers)
+ goto queue_failed;
+
pool->group_released_handler =
g_signal_connect_swapped (pool->vallocator, "group-released",
G_CALLBACK (gst_v4l2_buffer_pool_resurect_buffer), pool);
GST_PTR_FORMAT, pool->other_pool);
return FALSE;
}
+queue_failed:
+ {
+ GST_ERROR_OBJECT (pool, "failed to queue buffers into the capture queue");
+ return FALSE;
+ }
}
static gboolean
}
static GstFlowReturn
-gst_v4l2_buffer_pool_poll (GstV4l2BufferPool * pool)
+gst_v4l2_buffer_pool_poll (GstV4l2BufferPool * pool, gboolean wait)
{
gint ret;
+ GstClockTime timeout;
+
+ if (wait)
+ timeout = GST_CLOCK_TIME_NONE;
+ else
+ timeout = 0;
/* In RW mode there is no queue, hence no need to wait while the queue is
* empty */
if (pool->obj->mode != GST_V4L2_IO_RW) {
GST_OBJECT_LOCK (pool);
+
+ if (!wait && pool->empty) {
+ GST_OBJECT_UNLOCK (pool);
+ goto no_buffers;
+ }
+
while (pool->empty)
g_cond_wait (&pool->empty_cond, GST_OBJECT_GET_LOCK (pool));
+
GST_OBJECT_UNLOCK (pool);
}
- if (!pool->can_poll_device)
- goto done;
+ if (!pool->can_poll_device) {
+ if (wait)
+ goto done;
+ else
+ goto no_buffers;
+ }
GST_LOG_OBJECT (pool, "polling device");
again:
- ret = gst_poll_wait (pool->poll, GST_CLOCK_TIME_NONE);
+ ret = gst_poll_wait (pool->poll, timeout);
if (G_UNLIKELY (ret < 0)) {
switch (errno) {
case EBUSY:
if (gst_poll_fd_has_error (pool->poll, &pool->pollfd))
goto select_error;
+ if (ret == 0)
+ goto no_buffers;
+
done:
return GST_FLOW_OK;
("poll error %d: %s (%d)", ret, g_strerror (errno), errno));
return GST_FLOW_ERROR;
}
+no_buffers:
+ return GST_FLOW_CUSTOM_SUCCESS;
}
static GstFlowReturn
-gst_v4l2_buffer_pool_qbuf (GstV4l2BufferPool * pool, GstBuffer * buf)
+gst_v4l2_buffer_pool_qbuf (GstV4l2BufferPool * pool, GstBuffer * buf,
+ GstV4l2MemoryGroup * group)
{
- GstV4l2MemoryGroup *group = NULL;
const GstV4l2Object *obj = pool->obj;
GstClockTime timestamp;
gint index;
- if (!gst_v4l2_is_buffer_valid (buf, &group)) {
- GST_ERROR_OBJECT (pool, "invalid buffer %p", buf);
- return GST_FLOW_ERROR;
- }
-
index = group->buffer.index;
if (pool->buffers[index] != NULL)
}
static GstFlowReturn
-gst_v4l2_buffer_pool_dqbuf (GstV4l2BufferPool * pool, GstBuffer ** buffer)
+gst_v4l2_buffer_pool_dqbuf (GstV4l2BufferPool * pool, GstBuffer ** buffer,
+ gboolean wait)
{
GstFlowReturn res;
- GstBuffer *outbuf;
+ GstBuffer *outbuf = NULL;
GstV4l2Object *obj = pool->obj;
GstClockTime timestamp;
GstV4l2MemoryGroup *group;
+ GstVideoMeta *vmeta;
+ gsize size;
gint i;
- if ((res = gst_v4l2_buffer_pool_poll (pool)) != GST_FLOW_OK)
+ if ((res = gst_v4l2_buffer_pool_poll (pool, wait)) < GST_FLOW_OK)
goto poll_failed;
+ if (res == GST_FLOW_CUSTOM_SUCCESS) {
+ GST_LOG_OBJECT (pool, "nothing to dequeue");
+ goto done;
+ }
+
GST_LOG_OBJECT (pool, "dequeueing a buffer");
res = gst_v4l2_allocator_dqbuf (pool->vallocator, &group);
timestamp = GST_TIMEVAL_TO_TIME (group->buffer.timestamp);
-#ifndef GST_DISABLE_GST_DEBUG
+ size = 0;
+ vmeta = gst_buffer_get_video_meta (outbuf);
for (i = 0; i < group->n_mem; i++) {
GST_LOG_OBJECT (pool,
"dequeued buffer %p seq:%d (ix=%d), mem %p used %d, plane=%d, flags %08x, ts %"
group->buffer.sequence, group->buffer.index, group->mem[i],
group->planes[i].bytesused, i, group->buffer.flags,
GST_TIME_ARGS (timestamp), pool->num_queued, outbuf);
+
+ if (vmeta) {
+ vmeta->offset[i] = size;
+ size += gst_memory_get_sizes (group->mem[i], NULL, NULL);
+ }
}
-#endif
/* Ignore timestamp and field for OUTPUT device */
if (V4L2_TYPE_IS_OUTPUT (obj->type))
done:
*buffer = outbuf;
- return GST_FLOW_OK;
+ return res;
/* ERRORS */
poll_failed:
/* just dequeue a buffer, we basically use the queue of v4l2 as the
* storage for our buffers. This function does poll first so we can
* interrupt it fine. */
- ret = gst_v4l2_buffer_pool_dqbuf (pool, buffer);
+ ret = gst_v4l2_buffer_pool_dqbuf (pool, buffer, TRUE);
break;
}
default:
/* queue back in the device */
if (pool->other_pool)
gst_v4l2_buffer_pool_prepare_buffer (pool, buffer, NULL);
- if (gst_v4l2_buffer_pool_qbuf (pool, buffer) != GST_FLOW_OK)
+ if (gst_v4l2_buffer_pool_qbuf (pool, buffer, group) != GST_FLOW_OK)
pclass->release_buffer (bpool, buffer);
} else {
/* Simply release invalide/modified buffer, the allocator will
gst_buffer_map (buf, &map, GST_MAP_WRITE);
do {
- if ((res = gst_v4l2_buffer_pool_poll (pool)) != GST_FLOW_OK)
+ if ((res = gst_v4l2_buffer_pool_poll (pool, TRUE)) != GST_FLOW_OK)
goto poll_error;
amount = obj->read (obj->video_fd, map.data, toread);
guint num_queued;
gsize size = gst_buffer_get_size (*buf);
- if (size == 0) {
- if (GST_BUFFER_FLAG_IS_SET (*buf, GST_BUFFER_FLAG_CORRUPTED))
- goto buffer_corrupted;
- else
- goto eos;
- }
+ /* Legacy M2M devices return empty buffer when drained */
+ if (size == 0 && GST_V4L2_IS_M2M (obj->device_caps))
+ goto eos;
- /* verify that buffer contains a full frame for raw video */
- if (GST_VIDEO_INFO_FORMAT (&obj->info) != GST_VIDEO_FORMAT_ENCODED
- && size < GST_VIDEO_INFO_SIZE (&obj->info)) {
- GST_WARNING_OBJECT (pool, "Invalid buffer size, this is likely "
- "due to a bug in your driver, dropping");
- goto buffer_corrupted;
- }
+ if (GST_VIDEO_INFO_FORMAT (&pool->caps_info) !=
+ GST_VIDEO_FORMAT_ENCODED && size < pool->size)
+ goto buffer_truncated;
num_queued = g_atomic_int_get (&pool->num_queued);
GST_TRACE_OBJECT (pool, "Only %i buffer left in the capture queue.",
}
/* buffer not from our pool, grab a frame and copy it into the target */
- if ((ret = gst_v4l2_buffer_pool_dqbuf (pool, &tmp)) != GST_FLOW_OK)
+ if ((ret = gst_v4l2_buffer_pool_dqbuf (pool, &tmp, TRUE))
+ != GST_FLOW_OK)
goto done;
/* An empty buffer on capture indicates the end of stream */
if (gst_buffer_get_size (tmp) == 0) {
- gboolean corrupted = GST_BUFFER_FLAG_IS_SET (tmp,
- GST_BUFFER_FLAG_CORRUPTED);
-
gst_v4l2_buffer_pool_release_buffer (bpool, tmp);
- if (corrupted)
- goto buffer_corrupted;
- else
+ /* Legacy M2M devices return empty buffer when drained */
+ if (GST_V4L2_IS_M2M (obj->device_caps))
goto eos;
}
case GST_V4L2_IO_MMAP:
{
GstBuffer *to_queue = NULL;
+ GstBuffer *buffer;
GstV4l2MemoryGroup *group;
gint index;
gst_buffer_unref (to_queue);
goto prepare_failed;
}
+
+ /* retreive the group */
+ gst_v4l2_is_buffer_valid (to_queue, &group);
}
- if ((ret = gst_v4l2_buffer_pool_qbuf (pool, to_queue)) != GST_FLOW_OK)
+ if ((ret = gst_v4l2_buffer_pool_qbuf (pool, to_queue, group))
+ != GST_FLOW_OK)
goto queue_failed;
/* if we are not streaming yet (this is the first buffer, start
* otherwise the pool will think it is outstanding and will refuse to stop. */
gst_buffer_unref (to_queue);
+ /* release as many buffer as possible */
+ while (gst_v4l2_buffer_pool_dqbuf (pool, &buffer, FALSE) ==
+ GST_FLOW_OK) {
+ if (buffer->pool == NULL)
+ gst_v4l2_buffer_pool_release_buffer (bpool, buffer);
+ }
+
if (g_atomic_int_get (&pool->num_queued) >= pool->min_latency) {
- GstBuffer *out;
/* all buffers are queued, try to dequeue one and release it back
* into the pool so that _acquire can get to it again. */
- ret = gst_v4l2_buffer_pool_dqbuf (pool, &out);
- if (ret == GST_FLOW_OK && out->pool == NULL)
+ ret = gst_v4l2_buffer_pool_dqbuf (pool, &buffer, TRUE);
+ if (ret == GST_FLOW_OK && buffer->pool == NULL)
/* release the rendered buffer back into the pool. This wakes up any
* thread waiting for a buffer in _acquire(). */
- gst_v4l2_buffer_pool_release_buffer (bpool, out);
+ gst_v4l2_buffer_pool_release_buffer (bpool, buffer);
}
break;
}
GST_ERROR_OBJECT (pool, "failed to copy buffer");
return ret;
}
-buffer_corrupted:
+buffer_truncated:
{
- GST_WARNING_OBJECT (pool, "Dropping corrupted buffer without payload");
+ GST_WARNING_OBJECT (pool,
+ "Dropping truncated buffer, this is likely a driver bug.");
gst_buffer_unref (*buf);
*buf = NULL;
return GST_V4L2_FLOW_CORRUPTED_BUFFER;