/* GStreamer
*
* Copyright (C) 2001-2002 Ronald Bultje <rbultje@ronald.bitfreak.net>
- * 2006 Edgard Lima <edgard.lima@indt.org.br>
+ * 2006 Edgard Lima <edgard.lima@gmail.com>
* 2009 Texas Instruments, Inc - http://www.ti.com/
*
* gstv4l2bufferpool.c V4L2 buffer pool class
#include <gstv4l2bufferpool.h>
-#include "v4l2_calls.h"
+#include "gstv4l2object.h"
#include "gst/gst-i18n-plugin.h"
#include <gst/glib-compat-private.h>
+#ifdef TIZEN_FEATURE_V4L2_TBM_SUPPORT
+#include <gst/allocators/gsttizenmemory.h>
+#endif /* TIZEN_FEATURE_V4L2_TBM_SUPPORT */
-GST_DEBUG_CATEGORY_EXTERN (v4l2_debug);
-GST_DEBUG_CATEGORY_EXTERN (GST_CAT_PERFORMANCE);
-#define GST_CAT_DEFAULT v4l2_debug
+GST_DEBUG_CATEGORY_STATIC (v4l2bufferpool_debug);
+GST_DEBUG_CATEGORY_STATIC (CAT_PERFORMANCE);
+#define GST_CAT_DEFAULT v4l2bufferpool_debug
#define GST_V4L2_IMPORT_QUARK gst_v4l2_buffer_pool_import_quark ()
-
/*
* GstV4l2BufferPool:
*/
enum _GstV4l2BufferPoolAcquireFlags
{
- GST_V4L2_POOL_ACQUIRE_FLAG_RESURECT = GST_BUFFER_POOL_ACQUIRE_FLAG_LAST,
- GST_V4L2_BUFFER_POOL_ACQUIRE_FAG_LAST
+ GST_V4L2_BUFFER_POOL_ACQUIRE_FLAG_RESURRECT =
+ GST_BUFFER_POOL_ACQUIRE_FLAG_LAST,
+ GST_V4L2_BUFFER_POOL_ACQUIRE_FLAG_LAST
};
static void gst_v4l2_buffer_pool_release_buffer (GstBufferPool * bpool,
GstBuffer * buffer);
+#ifdef TIZEN_FEATURE_V4L2_TBM_SUPPORT
+typedef struct _GstV4l2TizenBuffer GstV4l2TizenBuffer;
+struct _GstV4l2TizenBuffer {
+ int index;
+ GstBuffer *gst_buffer;
+ GstBuffer *v4l2_buffer;
+ GstV4l2BufferPool *v4l2_pool;
+};
+
+static void gst_v4l2_tizen_buffer_finalize (GstV4l2TizenBuffer *tizen_buffer)
+{
+ GstV4l2BufferPool *pool = NULL;
+
+ if (!tizen_buffer) {
+ GST_ERROR ("NULL buffer");
+ return;
+ }
+
+ pool = tizen_buffer->v4l2_pool;
+
+ gst_v4l2_buffer_pool_release_buffer (GST_BUFFER_POOL_CAST (pool), tizen_buffer->v4l2_buffer);
+
+ g_mutex_lock (&pool->buffer_lock);
+
+ pool->live_buffer_count--;
+
+ GST_DEBUG_OBJECT (pool, "release buffer[%d][tizen:%p,v4l2:%p,gst:%p], live[%d]",
+ tizen_buffer->index, tizen_buffer, tizen_buffer->v4l2_buffer,
+ tizen_buffer->gst_buffer, pool->live_buffer_count);
+
+ g_cond_signal (&pool->buffer_cond);
+
+ g_mutex_unlock (&pool->buffer_lock);
+
+ gst_object_unref (pool);
+
+ g_free(tizen_buffer);
+}
+
+static GstV4l2TizenBuffer *gst_v4l2_tizen_buffer_new (GstBuffer *v4l2_buffer, int index, GstV4l2BufferPool *v4l2_pool)
+{
+ GstV4l2TizenBuffer *tizen_buffer = NULL;
+ GstMemory *memory = NULL;
+
+ tizen_buffer = g_new0 (GstV4l2TizenBuffer, 1);
+ tizen_buffer->index = index;
+ tizen_buffer->v4l2_buffer = v4l2_buffer;
+ tizen_buffer->gst_buffer = gst_buffer_new ();
+ tizen_buffer->v4l2_pool = gst_object_ref (v4l2_pool);
+
+ memory = gst_tizen_allocator_alloc_surface (v4l2_pool->tallocator,
+ &v4l2_pool->obj->info, v4l2_pool->vallocator->groups[index]->surface, (gpointer)tizen_buffer,
+ (GDestroyNotify)gst_v4l2_tizen_buffer_finalize);
+
+ gst_buffer_append_memory (tizen_buffer->gst_buffer, memory);
+ gst_buffer_set_size (tizen_buffer->gst_buffer, v4l2_pool->vallocator->s_info.size);
+
+ g_mutex_lock (&v4l2_pool->buffer_lock);
+
+ v4l2_pool->live_buffer_count++;
+
+ GST_DEBUG_OBJECT (v4l2_pool, "new buffer[tizen:%p,v4l2:%p,gst:%p], size[%d], live[%d]",
+ tizen_buffer, v4l2_buffer, tizen_buffer->gst_buffer,
+ v4l2_pool->vallocator->s_info.size, v4l2_pool->live_buffer_count);
+
+ g_mutex_unlock (&v4l2_pool->buffer_lock);
+
+ return tizen_buffer;
+}
+#endif /* TIZEN_FEATURE_V4L2_TBM_SUPPORT */
static gboolean
-gst_v4l2_is_buffer_valid (GstBuffer * buffer, GstV4l2MemoryGroup ** group)
+gst_v4l2_is_buffer_valid (GstBuffer * buffer, GstV4l2MemoryGroup ** out_group)
{
GstMemory *mem = gst_buffer_peek_memory (buffer, 0);
gboolean valid = FALSE;
if (mem && gst_is_v4l2_memory (mem)) {
GstV4l2Memory *vmem = (GstV4l2Memory *) mem;
+ GstV4l2MemoryGroup *group = vmem->group;
+ gint i;
+
+ if (group->n_mem != gst_buffer_n_memory (buffer))
+ goto done;
+
+ for (i = 0; i < group->n_mem; i++) {
+ if (group->mem[i] != gst_buffer_peek_memory (buffer, i))
+ goto done;
+
+ if (!gst_memory_is_writable (group->mem[i]))
+ goto done;
+ }
+
valid = TRUE;
- if (group)
- *group = vmem->group;
+ if (out_group)
+ *out_group = group;
}
done:
gst_buffer_resize (dest, 0, gst_buffer_get_size (src));
}
- GST_CAT_LOG_OBJECT (GST_CAT_PERFORMANCE, pool, "slow copy into buffer %p",
- dest);
+ gst_buffer_copy_into (dest, src,
+ GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS, 0, -1);
+
+ GST_CAT_LOG_OBJECT (CAT_PERFORMANCE, pool, "slow copy into buffer %p", dest);
return GST_FLOW_OK;
if (finfo && (finfo->format != GST_VIDEO_FORMAT_UNKNOWN &&
finfo->format != GST_VIDEO_FORMAT_ENCODED)) {
+ gsize size[GST_VIDEO_MAX_PLANES] = { 0, };
+ gint i;
+
data->is_frame = TRUE;
if (!gst_video_frame_map (&data->frame, &pool->caps_info, src, flags))
goto invalid_buffer;
+ for (i = 0; i < GST_VIDEO_FORMAT_INFO_N_PLANES (finfo); i++) {
+ if (GST_VIDEO_FORMAT_INFO_IS_TILED (finfo)) {
+ gint tinfo = GST_VIDEO_FRAME_PLANE_STRIDE (&data->frame, i);
+ gint pstride;
+ guint pheight;
+
+ pstride = GST_VIDEO_TILE_X_TILES (tinfo) <<
+ GST_VIDEO_FORMAT_INFO_TILE_WS (finfo);
+
+ pheight = GST_VIDEO_TILE_Y_TILES (tinfo) <<
+ GST_VIDEO_FORMAT_INFO_TILE_HS (finfo);
+
+ size[i] = pstride * pheight;
+ } else {
+ size[i] = GST_VIDEO_FRAME_PLANE_STRIDE (&data->frame, i) *
+ GST_VIDEO_FRAME_COMP_HEIGHT (&data->frame, i);
+ }
+ }
+
+ /* In the single planar API, planes must be contiguous in memory and
+ * therefore they must have expected size. ie: no padding.
+ * To check these conditions, we check that plane 'i' start address
+ * + plane 'i' size equals to plane 'i+1' start address */
+ if (!V4L2_TYPE_IS_MULTIPLANAR (pool->obj->type)) {
+ for (i = 0; i < (GST_VIDEO_FORMAT_INFO_N_PLANES (finfo) - 1); i++) {
+ const struct v4l2_pix_format *pix_fmt = &pool->obj->format.fmt.pix;
+ gpointer tmp;
+ gint estride = gst_v4l2_object_extrapolate_stride (finfo, i,
+ pix_fmt->bytesperline);
+ guint eheight = GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (finfo, i,
+ pix_fmt->height);
+
+ tmp = ((guint8 *) data->frame.data[i]) + estride * eheight;
+ if (tmp != data->frame.data[i + 1])
+ goto non_contiguous_mem;
+ }
+ }
+
if (!gst_v4l2_allocator_import_userptr (pool->vallocator, group,
- data->frame.info.size, finfo->n_planes, data->frame.data,
- data->frame.info.offset))
+ data->frame.info.size, finfo->n_planes, data->frame.data, size))
goto import_failed;
} else {
- gsize offset[1] = { 0 };
gpointer ptr[1];
+ gsize size[1];
data->is_frame = FALSE;
goto invalid_buffer;
ptr[0] = data->map.data;
+ size[0] = data->map.size;
if (!gst_v4l2_allocator_import_userptr (pool->vallocator, group,
- data->map.size, 1, ptr, offset))
+ data->map.size, 1, ptr, size))
goto import_failed;
}
gst_mini_object_set_qdata (GST_MINI_OBJECT (dest), GST_V4L2_IMPORT_QUARK,
data, (GDestroyNotify) _unmap_userptr_frame);
+ gst_buffer_copy_into (dest, src,
+ GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS, 0, -1);
+
return ret;
not_our_buffer:
g_slice_free (struct UserPtrData, data);
return GST_FLOW_ERROR;
}
+non_contiguous_mem:
+ {
+ GST_ERROR_OBJECT (pool, "memory is not contiguous or plane size mismatch");
+ _unmap_userptr_frame (data);
+ return GST_FLOW_ERROR;
+ }
import_failed:
{
GST_ERROR_OBJECT (pool, "failed to import data");
gst_mini_object_set_qdata (GST_MINI_OBJECT (dest), GST_V4L2_IMPORT_QUARK,
gst_buffer_ref (src), (GDestroyNotify) gst_buffer_unref);
+ gst_buffer_copy_into (dest, src,
+ GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS, 0, -1);
+
return GST_FLOW_OK;
not_our_buffer:
can_allocate = GST_V4L2_ALLOCATOR_CAN_ALLOCATE (pool->vallocator, DMABUF);
break;
case GST_V4L2_IO_RW:
- pool->allocator = g_object_ref (allocator);
+ if (allocator)
+ pool->allocator = g_object_ref (allocator);
pool->params = params;
/* No need to change the configuration */
goto done;
break;
}
+ /* libv4l2 conversion code does not handle CREATE_BUFS, and may lead to
+ * instability and crash, disable it for now */
+ if (can_allocate && obj->fmtdesc->flags & V4L2_FMT_FLAG_EMULATED) {
+ GST_WARNING_OBJECT (pool,
+ "libv4l2 converter detected, disabling CREATE_BUFS");
+ can_allocate = FALSE;
+ GST_OBJECT_FLAG_UNSET (pool->vallocator,
+ GST_V4L2_ALLOCATOR_FLAG_MMAP_CREATE_BUFS
+ | GST_V4L2_ALLOCATOR_FLAG_USERPTR_CREATE_BUFS
+ | GST_V4L2_ALLOCATOR_FLAG_DMABUF_CREATE_BUFS);
+ }
+
if (min_buffers < GST_V4L2_MIN_BUFFERS) {
updated = TRUE;
min_buffers = GST_V4L2_MIN_BUFFERS;
GST_INFO_OBJECT (pool, "increasing minimum buffers to %u", min_buffers);
}
+ /* respect driver requirements */
+ if (min_buffers < obj->min_buffers) {
+ updated = TRUE;
+ min_buffers = obj->min_buffers;
+ GST_INFO_OBJECT (pool, "increasing minimum buffers to %u", min_buffers);
+ }
+
if (max_buffers > VIDEO_MAX_FRAME || max_buffers == 0) {
updated = TRUE;
max_buffers = VIDEO_MAX_FRAME;
GST_BUFFER_POOL_OPTION_VIDEO_META);
}
- if (updated)
- gst_buffer_pool_config_set_params (config, caps, size, min_buffers,
- max_buffers);
+ /* Always update the config to ensure the configured size matches */
+ gst_buffer_pool_config_set_params (config, caps, obj->info.size, min_buffers,
+ max_buffers);
/* keep a GstVideoInfo with defaults for the when we need to copy */
gst_video_info_from_caps (&pool->caps_info, caps);
}
}
+static GstFlowReturn
+gst_v4l2_buffer_pool_resurrect_buffer (GstV4l2BufferPool * pool)
+{
+ GstBufferPoolAcquireParams params = { 0 };
+ GstBuffer *buffer = NULL;
+ GstFlowReturn ret;
+
+ GST_DEBUG_OBJECT (pool, "A buffer was lost, reallocating it");
+
+ /* block recursive calls to this function */
+ g_signal_handler_block (pool->vallocator, pool->group_released_handler);
+
+ params.flags =
+ (GstBufferPoolAcquireFlags) GST_V4L2_BUFFER_POOL_ACQUIRE_FLAG_RESURRECT |
+ GST_BUFFER_POOL_ACQUIRE_FLAG_DONTWAIT;
+ ret =
+ gst_buffer_pool_acquire_buffer (GST_BUFFER_POOL (pool), &buffer, ¶ms);
+
+ if (ret == GST_FLOW_OK)
+ gst_buffer_unref (buffer);
+
+ g_signal_handler_unblock (pool->vallocator, pool->group_released_handler);
+
+ return ret;
+}
+
static gboolean
gst_v4l2_buffer_pool_streamon (GstV4l2BufferPool * pool)
{
GstV4l2Object *obj = pool->obj;
+ if (pool->streaming)
+ return TRUE;
+
switch (obj->mode) {
case GST_V4L2_IO_MMAP:
case GST_V4L2_IO_USERPTR:
case GST_V4L2_IO_DMABUF:
case GST_V4L2_IO_DMABUF_IMPORT:
- if (!pool->streaming) {
- if (v4l2_ioctl (pool->video_fd, VIDIOC_STREAMON, &obj->type) < 0)
- goto streamon_failed;
+ if (!V4L2_TYPE_IS_OUTPUT (pool->obj->type)) {
+ guint num_queued;
+ guint i, n = 0;
+
+ num_queued = g_atomic_int_get (&pool->num_queued);
+ if (num_queued < pool->num_allocated)
+ n = pool->num_allocated - num_queued;
+
+ /* For captures, we need to enqueue buffers before we start streaming,
+ * so the driver don't underflow immediatly. As we have put then back
+ * into the base class queue, resurrect them, then releasing will queue
+ * them back. */
+ for (i = 0; i < n; i++)
+ gst_v4l2_buffer_pool_resurrect_buffer (pool);
+ }
- pool->streaming = TRUE;
+ if (obj->ioctl (pool->video_fd, VIDIOC_STREAMON, &obj->type) < 0)
+ goto streamon_failed;
- GST_DEBUG_OBJECT (pool, "Started streaming");
- }
+ pool->streaming = TRUE;
+
+ GST_DEBUG_OBJECT (pool, "Started streaming");
break;
default:
break;
}
}
-static gboolean
+/* Call with streamlock held, or when streaming threads are down */
+static void
gst_v4l2_buffer_pool_streamoff (GstV4l2BufferPool * pool)
{
+ GstBufferPoolClass *pclass = GST_BUFFER_POOL_CLASS (parent_class);
GstV4l2Object *obj = pool->obj;
+ gint i;
+#ifdef TIZEN_FEATURE_V4L2_TBM_SUPPORT
+ gint64 end_time = 0;
+#endif /* TIZEN_FEATURE_V4L2_TBM_SUPPORT */
+
+ if (!pool->streaming)
+ return;
+
+#ifdef TIZEN_FEATURE_V4L2_TBM_SUPPORT
+ if (obj->tbm_output && !V4L2_TYPE_IS_OUTPUT(pool->obj->type)) {
+ g_mutex_lock (&pool->buffer_lock);
+
+ GST_INFO_OBJECT (pool, "live buffer[%d]", pool->live_buffer_count);
+ if (pool->live_buffer_count > 0) {
+ end_time = g_get_monotonic_time () + G_TIME_SPAN_SECOND;
+
+ do {
+ GST_WARNING_OBJECT (pool, "wait for live buffer[%d]", pool->live_buffer_count);
+
+ if (!g_cond_wait_until (&pool->buffer_cond, &pool->buffer_lock, end_time)) {
+ GST_ERROR_OBJECT (pool, "failed to wait live buffer[%d]", pool->live_buffer_count);
+ break;
+ }
+
+ GST_WARNING_OBJECT (pool, "signal received, check again : live count[%d]",
+ pool->live_buffer_count);
+ } while (pool->live_buffer_count > 0);
+ }
+
+ g_mutex_unlock (&pool->buffer_lock);
+ }
+#endif /* TIZEN_FEATURE_V4L2_TBM_SUPPORT */
switch (obj->mode) {
case GST_V4L2_IO_MMAP:
case GST_V4L2_IO_USERPTR:
case GST_V4L2_IO_DMABUF:
case GST_V4L2_IO_DMABUF_IMPORT:
- if (pool->streaming) {
- if (v4l2_ioctl (pool->video_fd, VIDIOC_STREAMOFF, &obj->type) < 0)
- goto streamoff_failed;
- pool->streaming = FALSE;
+ if (obj->ioctl (pool->video_fd, VIDIOC_STREAMOFF, &obj->type) < 0)
+ GST_WARNING_OBJECT (pool, "STREAMOFF failed with errno %d (%s)",
+ errno, g_strerror (errno));
- GST_DEBUG_OBJECT (pool, "Stopped streaming");
- }
+ pool->streaming = FALSE;
+
+ GST_DEBUG_OBJECT (pool, "Stopped streaming");
+
+ if (pool->vallocator)
+ gst_v4l2_allocator_flush (pool->vallocator);
break;
default:
break;
}
- return TRUE;
-
-streamoff_failed:
- {
- GST_ERROR_OBJECT (pool, "error with STREAMOFF %d (%s)", errno,
- g_strerror (errno));
- return FALSE;
- }
-}
-
-static void
-gst_v4l2_buffer_pool_group_released (GstV4l2BufferPool * pool)
-{
- GstBufferPoolAcquireParams params = { 0 };
- GstBuffer *buffer = NULL;
- GstFlowReturn ret;
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (pool->buffers[i]) {
+ GstBuffer *buffer = pool->buffers[i];
+ GstBufferPool *bpool = GST_BUFFER_POOL (pool);
- GST_DEBUG_OBJECT (pool, "A buffer was lost, reallocating it");
+ pool->buffers[i] = NULL;
- params.flags =
- (GstBufferPoolAcquireFlags) GST_V4L2_POOL_ACQUIRE_FLAG_RESURECT;
- ret =
- gst_buffer_pool_acquire_buffer (GST_BUFFER_POOL (pool), &buffer, ¶ms);
+ if (V4L2_TYPE_IS_OUTPUT (pool->obj->type))
+ gst_v4l2_buffer_pool_release_buffer (bpool, buffer);
+ else /* Don't re-enqueue capture buffer on stop */
+ pclass->release_buffer (bpool, buffer);
- if (ret == GST_FLOW_OK)
- gst_buffer_unref (buffer);
+ g_atomic_int_add (&pool->num_queued, -1);
+ }
+ }
}
static gboolean
GstCaps *caps;
guint size, min_buffers, max_buffers;
guint max_latency, min_latency, copy_threshold = 0;
- gboolean can_allocate = FALSE;
+ gboolean can_allocate = FALSE, ret = TRUE;
GST_DEBUG_OBJECT (pool, "activating pool");
+ if (pool->other_pool) {
+ GstBuffer *buffer;
+
+ if (!gst_buffer_pool_set_active (pool->other_pool, TRUE))
+ goto other_pool_failed;
+
+ if (gst_buffer_pool_acquire_buffer (pool->other_pool, &buffer, NULL) !=
+ GST_FLOW_OK)
+ goto other_pool_failed;
+
+ if (!gst_v4l2_object_try_import (obj, buffer)) {
+ gst_buffer_unref (buffer);
+ goto cannot_import;
+ }
+ gst_buffer_unref (buffer);
+ }
+
config = gst_buffer_pool_get_config (bpool);
if (!gst_buffer_pool_config_get_params (config, &caps, &size, &min_buffers,
&max_buffers))
goto wrong_config;
- /* TODO Also consider min_buffers_for_output when implemented */
- min_latency = MAX (GST_V4L2_MIN_BUFFERS, obj->min_buffers_for_capture);
+ min_latency = MAX (GST_V4L2_MIN_BUFFERS, obj->min_buffers);
switch (obj->mode) {
case GST_V4L2_IO_RW:
can_allocate = TRUE;
+#ifdef HAVE_LIBV4L2
+ /* This workaround a unfixable bug in libv4l2 when RW is emulated on top
+ * of MMAP. In this case, the first read initialize the queues, but the
+ * poll before that will always fail. Doing an empty read, forces the
+ * queue to be initialized now. We only do this if we have a streaming
+ * driver. */
+ if (obj->device_caps & V4L2_CAP_STREAMING)
+ obj->read (obj->video_fd, NULL, 0);
+#endif
break;
case GST_V4L2_IO_DMABUF:
case GST_V4L2_IO_MMAP:
count = gst_v4l2_allocator_start (pool->vallocator, min_buffers,
V4L2_MEMORY_MMAP);
+ pool->num_allocated = count;
if (count < GST_V4L2_MIN_BUFFERS) {
min_buffers = count;
* falling back to copy if the pipeline needed more buffers. This also
* prevent having to do REQBUFS(N)/REQBUFS(0) everytime configure is
* called. */
- if (count != min_buffers) {
- GST_WARNING_OBJECT (pool, "using %u buffers instead of %u",
- count, min_buffers);
+ if (count != min_buffers || pool->enable_copy_threshold) {
+ GST_WARNING_OBJECT (pool,
+ "Uncertain or not enough buffers, enabling copy threshold");
min_buffers = count;
copy_threshold = min_latency;
-
- /* The initial minimum could be provide either by GstBufferPool or
- * driver needs. */
- min_buffers = count;
}
break;
else
max_latency = min_buffers;
- /* FIXME Encoder don't negotiate amount of buffers. If we can't grow the
- * pool, or the minimum is at V4L2 maximum, enabled copy on threshold
- * https://bugzilla.gnome.org/show_bug.cgi?id=732288 */
- if (!can_allocate || min_buffers == VIDEO_MAX_FRAME)
- copy_threshold = min_latency;
-
pool->size = size;
pool->copy_threshold = copy_threshold;
pool->max_latency = max_latency;
pool->min_latency = min_latency;
pool->num_queued = 0;
- if (max_buffers < min_buffers)
+ if (max_buffers != 0 && max_buffers < min_buffers)
max_buffers = min_buffers;
gst_buffer_pool_config_set_params (config, caps, size, min_buffers,
pclass->set_config (bpool, config);
gst_structure_free (config);
- if (pool->other_pool)
- if (!gst_buffer_pool_set_active (pool->other_pool, TRUE))
- goto other_pool_failed;
-
/* now, allocate the buffers: */
if (!pclass->start (bpool))
goto start_failed;
- if (!V4L2_TYPE_IS_OUTPUT (obj->type))
+ if (!V4L2_TYPE_IS_OUTPUT (obj->type)) {
+ if (g_atomic_int_get (&pool->num_queued) < min_buffers)
+ goto queue_failed;
+
pool->group_released_handler =
g_signal_connect_swapped (pool->vallocator, "group-released",
- G_CALLBACK (gst_v4l2_buffer_pool_group_released), pool);
+ G_CALLBACK (gst_v4l2_buffer_pool_resurrect_buffer), pool);
+ ret = gst_v4l2_buffer_pool_streamon (pool);
+ }
- return TRUE;
+ return ret;
/* ERRORS */
wrong_config:
}
start_failed:
{
- GST_ERROR_OBJECT (pool, "failed to start streaming");
+ GST_ERROR_OBJECT (pool, "allocate failed");
return FALSE;
}
other_pool_failed:
{
- GST_ERROR_OBJECT (pool, "failed to active the other pool %"
+ GST_ERROR_OBJECT (pool, "failed to activate the other pool %"
GST_PTR_FORMAT, pool->other_pool);
return FALSE;
}
+queue_failed:
+ {
+ GST_ERROR_OBJECT (pool, "failed to queue buffers into the capture queue");
+ return FALSE;
+ }
+cannot_import:
+ {
+ GST_ERROR_OBJECT (pool, "cannot import buffers from downstream pool");
+ return FALSE;
+ }
+}
+
+static gboolean
+gst_v4l2_buffer_pool_vallocator_stop (GstV4l2BufferPool * pool)
+{
+ GstV4l2Return vret;
+
+ if (!pool->vallocator)
+ return TRUE;
+
+ vret = gst_v4l2_allocator_stop (pool->vallocator);
+
+ if (vret == GST_V4L2_BUSY)
+ GST_WARNING_OBJECT (pool, "some buffers are still outstanding");
+
+ return (vret == GST_V4L2_OK);
}
static gboolean
gst_v4l2_buffer_pool_stop (GstBufferPool * bpool)
{
GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (bpool);
- GstBufferPoolClass *pclass = GST_BUFFER_POOL_CLASS (parent_class);
gboolean ret;
- gint i;
+
+ if (pool->orphaned)
+ return gst_v4l2_buffer_pool_vallocator_stop (pool);
GST_DEBUG_OBJECT (pool, "stopping pool");
}
if (pool->other_pool) {
+ gst_buffer_pool_set_active (pool->other_pool, FALSE);
gst_object_unref (pool->other_pool);
pool->other_pool = NULL;
}
- if (!gst_v4l2_buffer_pool_streamoff (pool))
- goto streamoff_failed;
+ gst_v4l2_buffer_pool_streamoff (pool);
- gst_v4l2_allocator_flush (pool->vallocator);
+ ret = GST_BUFFER_POOL_CLASS (parent_class)->stop (bpool);
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (pool->buffers[i]) {
- GstBuffer *buffer = pool->buffers[i];
+ if (ret)
+ ret = gst_v4l2_buffer_pool_vallocator_stop (pool);
- pool->buffers[i] = NULL;
+ return ret;
+}
- if (V4L2_TYPE_IS_OUTPUT (pool->obj->type))
- gst_buffer_unref (buffer);
- else
- pclass->release_buffer (bpool, buffer);
+gboolean
+gst_v4l2_buffer_pool_orphan (GstBufferPool ** bpool)
+{
+ GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (*bpool);
+ gboolean ret;
- g_atomic_int_add (&pool->num_queued, -1);
- }
- }
+ if (!GST_V4L2_ALLOCATOR_CAN_ORPHAN_BUFS (pool->vallocator))
+ return FALSE;
- ret = GST_BUFFER_POOL_CLASS (parent_class)->stop (bpool);
+ if (g_getenv ("GST_V4L2_FORCE_DRAIN"))
+ return FALSE;
- if (ret) {
- GstV4l2Return vret;
+ GST_DEBUG_OBJECT (pool, "orphaning pool");
- vret = gst_v4l2_allocator_stop (pool->vallocator);
+ gst_buffer_pool_set_active (*bpool, FALSE);
+ /*
+ * If the buffer pool has outstanding buffers, it will not be stopped
+ * by the base class when set inactive. Stop it manually and mark it
+ * as orphaned
+ */
+ ret = gst_v4l2_buffer_pool_stop (*bpool);
+ if (!ret)
+ ret = gst_v4l2_allocator_orphan (pool->vallocator);
- if (vret == GST_V4L2_BUSY)
- GST_WARNING_OBJECT (pool, "some buffers are still outstanding");
+ if (!ret)
+ goto orphan_failed;
- ret = (vret == GST_V4L2_OK);
- }
+ pool->orphaned = TRUE;
+ gst_object_unref (*bpool);
+ *bpool = NULL;
+orphan_failed:
return ret;
-
- /* ERRORS */
-streamoff_failed:
- GST_ERROR_OBJECT (pool, "device refused to stop streaming");
- return FALSE;
}
static void
gst_v4l2_buffer_pool_flush_stop (GstBufferPool * bpool)
{
GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (bpool);
- GstV4l2Object *obj = pool->obj;
- gint i;
GST_DEBUG_OBJECT (pool, "stop flushing");
- /* If we haven't started streaming yet, simply call streamon */
- if (!pool->streaming)
- goto streamon;
-
if (pool->other_pool)
gst_buffer_pool_set_flushing (pool->other_pool, FALSE);
- if (!gst_v4l2_buffer_pool_streamoff (pool))
- goto stop_failed;
-
- gst_v4l2_allocator_flush (pool->vallocator);
-
- /* Reset our state */
- switch (obj->mode) {
- case GST_V4L2_IO_RW:
- break;
- case GST_V4L2_IO_MMAP:
- case GST_V4L2_IO_USERPTR:
- case GST_V4L2_IO_DMABUF:
- case GST_V4L2_IO_DMABUF_IMPORT:
- {
- gsize num_allocated;
-
- num_allocated = gst_v4l2_allocator_num_allocated (pool->vallocator);
-
- for (i = 0; i < num_allocated; i++) {
- /* Re-enqueue buffers */
- if (pool->buffers[i]) {
- GstBufferPool *bpool = (GstBufferPool *) pool;
- GstBuffer *buffer = pool->buffers[i];
-
- pool->buffers[i] = NULL;
+ gst_poll_set_flushing (pool->poll, FALSE);
+}
- /* Remove qdata, this will unmap any map data in
- * userptr/dmabuf-import */
- gst_mini_object_set_qdata (GST_MINI_OBJECT (buffer),
- GST_V4L2_IMPORT_QUARK, NULL, NULL);
+static GstFlowReturn
+gst_v4l2_buffer_pool_poll (GstV4l2BufferPool * pool, gboolean wait)
+{
+ gint ret;
+ GstClockTime timeout;
- if (V4L2_TYPE_IS_OUTPUT (obj->type))
- gst_buffer_unref (buffer);
- else
- gst_v4l2_buffer_pool_release_buffer (bpool, buffer);
+ if (wait)
+ timeout = GST_CLOCK_TIME_NONE;
+ else
+ timeout = 0;
- g_atomic_int_add (&pool->num_queued, -1);
- }
- }
+ /* In RW mode there is no queue, hence no need to wait while the queue is
+ * empty */
+ if (pool->obj->mode != GST_V4L2_IO_RW) {
+ GST_OBJECT_LOCK (pool);
- break;
+ if (!wait && pool->empty) {
+ GST_OBJECT_UNLOCK (pool);
+ goto no_buffers;
}
- default:
- g_assert_not_reached ();
- break;
- }
-
-streamon:
- /* Start streaming on capture device only */
- if (!V4L2_TYPE_IS_OUTPUT (obj->type))
- gst_v4l2_buffer_pool_streamon (pool);
-
- gst_poll_set_flushing (pool->poll, FALSE);
- return;
+ while (pool->empty)
+ g_cond_wait (&pool->empty_cond, GST_OBJECT_GET_LOCK (pool));
- /* ERRORS */
-stop_failed:
- {
- GST_ERROR_OBJECT (pool, "device refused to flush");
+ GST_OBJECT_UNLOCK (pool);
}
-}
-
-static GstFlowReturn
-gst_v4l2_buffer_pool_poll (GstV4l2BufferPool * pool)
-{
- gint ret;
- GST_OBJECT_LOCK (pool);
- while (pool->empty)
- g_cond_wait (&pool->empty_cond, GST_OBJECT_GET_LOCK (pool));
- GST_OBJECT_UNLOCK (pool);
-
- if (!pool->can_poll_device)
- goto done;
+ if (!pool->can_poll_device) {
+ if (wait)
+ goto done;
+ else
+ goto no_buffers;
+ }
GST_LOG_OBJECT (pool, "polling device");
again:
- ret = gst_poll_wait (pool->poll, GST_CLOCK_TIME_NONE);
+ ret = gst_poll_wait (pool->poll, timeout);
if (G_UNLIKELY (ret < 0)) {
switch (errno) {
case EBUSY:
if (gst_poll_fd_has_error (pool->poll, &pool->pollfd))
goto select_error;
+ if (ret == 0)
+ goto no_buffers;
+
done:
return GST_FLOW_OK;
("poll error %d: %s (%d)", ret, g_strerror (errno), errno));
return GST_FLOW_ERROR;
}
+no_buffers:
+ return GST_FLOW_CUSTOM_SUCCESS;
}
static GstFlowReturn
-gst_v4l2_buffer_pool_qbuf (GstV4l2BufferPool * pool, GstBuffer * buf)
+gst_v4l2_buffer_pool_qbuf (GstV4l2BufferPool * pool, GstBuffer * buf,
+ GstV4l2MemoryGroup * group)
{
- GstV4l2MemoryGroup *group = NULL;
+ const GstV4l2Object *obj = pool->obj;
+ GstClockTime timestamp;
gint index;
- if (!gst_v4l2_is_buffer_valid (buf, &group)) {
- GST_LOG_OBJECT (pool, "unref copied/invalid buffer %p", buf);
- gst_buffer_unref (buf);
- return GST_FLOW_OK;
- }
-
index = group->buffer.index;
if (pool->buffers[index] != NULL)
GST_LOG_OBJECT (pool, "queuing buffer %i", index);
+ if (V4L2_TYPE_IS_OUTPUT (obj->type)) {
+ enum v4l2_field field;
+
+ /* Except when field is set to alternate, buffer field is the same as
+ * the one defined in format */
+ if (V4L2_TYPE_IS_MULTIPLANAR (obj->type))
+ field = obj->format.fmt.pix_mp.field;
+ else
+ field = obj->format.fmt.pix.field;
+
+ /* NB: At this moment, we can't have alternate mode because it not handled
+ * yet */
+ if (field == V4L2_FIELD_ALTERNATE) {
+ if (GST_BUFFER_FLAG_IS_SET (buf, GST_VIDEO_FRAME_FLAG_TFF))
+ field = V4L2_FIELD_TOP;
+ else
+ field = V4L2_FIELD_BOTTOM;
+ }
+
+ group->buffer.field = field;
+ }
+
+ if (GST_BUFFER_TIMESTAMP_IS_VALID (buf)) {
+ timestamp = GST_BUFFER_TIMESTAMP (buf);
+ GST_TIME_TO_TIMEVAL (timestamp, group->buffer.timestamp);
+ }
+
+ GST_OBJECT_LOCK (pool);
g_atomic_int_inc (&pool->num_queued);
pool->buffers[index] = buf;
if (!gst_v4l2_allocator_qbuf (pool->vallocator, group))
goto queue_failed;
- GST_OBJECT_LOCK (pool);
pool->empty = FALSE;
g_cond_signal (&pool->empty_cond);
GST_OBJECT_UNLOCK (pool);
GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_TAG_MEMORY);
g_atomic_int_add (&pool->num_queued, -1);
pool->buffers[index] = NULL;
+ GST_OBJECT_UNLOCK (pool);
return GST_FLOW_ERROR;
}
}
static GstFlowReturn
-gst_v4l2_buffer_pool_dqbuf (GstV4l2BufferPool * pool, GstBuffer ** buffer)
+gst_v4l2_buffer_pool_dqbuf (GstV4l2BufferPool * pool, GstBuffer ** buffer,
+ gboolean wait)
{
GstFlowReturn res;
- GstBuffer *outbuf;
+ GstBuffer *outbuf = NULL;
GstV4l2Object *obj = pool->obj;
GstClockTime timestamp;
GstV4l2MemoryGroup *group;
+ GstVideoMeta *vmeta;
+ gsize size;
gint i;
+#ifdef TIZEN_FEATURE_V4L2_TBM_SUPPORT
+ GstV4l2TizenBuffer *tizen_buffer = NULL;
+#endif /* TIZEN_FEATURE_V4L2_TBM_SUPPORT */
- if ((res = gst_v4l2_buffer_pool_poll (pool)) != GST_FLOW_OK)
+ if ((res = gst_v4l2_buffer_pool_poll (pool, wait)) < GST_FLOW_OK)
goto poll_failed;
+ if (res == GST_FLOW_CUSTOM_SUCCESS) {
+ GST_LOG_OBJECT (pool, "nothing to dequeue");
+ goto done;
+ }
+
GST_LOG_OBJECT (pool, "dequeueing a buffer");
- group = gst_v4l2_allocator_dqbuf (pool->vallocator);
- if (group == NULL)
+ res = gst_v4l2_allocator_dqbuf (pool->vallocator, &group);
+ if (res == GST_FLOW_EOS)
+ goto eos;
+ if (res != GST_FLOW_OK)
goto dqbuf_failed;
/* get our GstBuffer with that index from the pool, if the buffer was
timestamp = GST_TIMEVAL_TO_TIME (group->buffer.timestamp);
-#ifndef GST_DISABLE_GST_DEBUG
+ size = 0;
+ vmeta = gst_buffer_get_video_meta (outbuf);
for (i = 0; i < group->n_mem; i++) {
GST_LOG_OBJECT (pool,
"dequeued buffer %p seq:%d (ix=%d), mem %p used %d, plane=%d, flags %08x, ts %"
group->buffer.sequence, group->buffer.index, group->mem[i],
group->planes[i].bytesused, i, group->buffer.flags,
GST_TIME_ARGS (timestamp), pool->num_queued, outbuf);
+
+ if (vmeta) {
+ vmeta->offset[i] = size;
+ size += gst_memory_get_sizes (group->mem[i], NULL, NULL);
+ }
}
+
+ /* Ignore timestamp and field for OUTPUT device */
+ if (V4L2_TYPE_IS_OUTPUT (obj->type))
+ goto done;
+
+ /* Check for driver bug in reporting feild */
+ if (group->buffer.field == V4L2_FIELD_ANY) {
+ /* Only warn once to avoid the spamming */
+#ifndef GST_DISABLE_GST_DEBUG
+ if (!pool->has_warned_on_buggy_field) {
+ pool->has_warned_on_buggy_field = TRUE;
+ GST_WARNING_OBJECT (pool,
+ "Driver should never set v4l2_buffer.field to ANY");
+ }
+#endif
+
+ /* Use the value from the format (works for UVC bug) */
+ group->buffer.field = obj->format.fmt.pix.field;
+
+ /* If driver also has buggy S_FMT, assume progressive */
+ if (group->buffer.field == V4L2_FIELD_ANY) {
+#ifndef GST_DISABLE_GST_DEBUG
+ if (!pool->has_warned_on_buggy_field) {
+ pool->has_warned_on_buggy_field = TRUE;
+ GST_WARNING_OBJECT (pool,
+ "Driver should never set v4l2_format.pix.field to ANY");
+ }
#endif
+ group->buffer.field = V4L2_FIELD_NONE;
+ }
+ }
+
/* set top/bottom field first if v4l2_buffer has the information */
- if (group->buffer.field == V4L2_FIELD_INTERLACED_TB) {
- GST_BUFFER_FLAG_SET (outbuf, GST_VIDEO_BUFFER_FLAG_INTERLACED);
- GST_BUFFER_FLAG_SET (outbuf, GST_VIDEO_BUFFER_FLAG_TFF);
- } else if (group->buffer.field == V4L2_FIELD_INTERLACED_BT) {
- GST_BUFFER_FLAG_SET (outbuf, GST_VIDEO_BUFFER_FLAG_INTERLACED);
- GST_BUFFER_FLAG_UNSET (outbuf, GST_VIDEO_BUFFER_FLAG_TFF);
- } else {
- GST_BUFFER_FLAG_UNSET (outbuf, GST_VIDEO_BUFFER_FLAG_INTERLACED);
- GST_BUFFER_FLAG_UNSET (outbuf, GST_VIDEO_BUFFER_FLAG_TFF);
+ switch (group->buffer.field) {
+ case V4L2_FIELD_NONE:
+ GST_BUFFER_FLAG_UNSET (outbuf, GST_VIDEO_BUFFER_FLAG_INTERLACED);
+ GST_BUFFER_FLAG_UNSET (outbuf, GST_VIDEO_BUFFER_FLAG_TFF);
+ break;
+ case V4L2_FIELD_INTERLACED_TB:
+ GST_BUFFER_FLAG_SET (outbuf, GST_VIDEO_BUFFER_FLAG_INTERLACED);
+ GST_BUFFER_FLAG_SET (outbuf, GST_VIDEO_BUFFER_FLAG_TFF);
+ break;
+ case V4L2_FIELD_INTERLACED_BT:
+ GST_BUFFER_FLAG_SET (outbuf, GST_VIDEO_BUFFER_FLAG_INTERLACED);
+ GST_BUFFER_FLAG_UNSET (outbuf, GST_VIDEO_BUFFER_FLAG_TFF);
+ break;
+ case V4L2_FIELD_INTERLACED:
+ GST_BUFFER_FLAG_SET (outbuf, GST_VIDEO_BUFFER_FLAG_INTERLACED);
+ if (obj->tv_norm == V4L2_STD_NTSC_M ||
+ obj->tv_norm == V4L2_STD_NTSC_M_JP ||
+ obj->tv_norm == V4L2_STD_NTSC_M_KR) {
+ GST_BUFFER_FLAG_UNSET (outbuf, GST_VIDEO_BUFFER_FLAG_TFF);
+ } else {
+ GST_BUFFER_FLAG_SET (outbuf, GST_VIDEO_BUFFER_FLAG_TFF);
+ }
+ break;
+ default:
+ GST_BUFFER_FLAG_UNSET (outbuf, GST_VIDEO_BUFFER_FLAG_INTERLACED);
+ GST_BUFFER_FLAG_UNSET (outbuf, GST_VIDEO_BUFFER_FLAG_TFF);
+ GST_FIXME_OBJECT (pool,
+ "Unhandled enum v4l2_field %d - treating as progressive",
+ group->buffer.field);
+ break;
}
if (GST_VIDEO_INFO_FORMAT (&obj->info) == GST_VIDEO_FORMAT_ENCODED) {
- if (group->buffer.flags & V4L2_BUF_FLAG_KEYFRAME)
+ if ((group->buffer.flags & V4L2_BUF_FLAG_KEYFRAME) ||
+ GST_V4L2_PIXELFORMAT (obj) == V4L2_PIX_FMT_MJPEG ||
+ GST_V4L2_PIXELFORMAT (obj) == V4L2_PIX_FMT_JPEG ||
+ GST_V4L2_PIXELFORMAT (obj) == V4L2_PIX_FMT_PJPG)
GST_BUFFER_FLAG_UNSET (outbuf, GST_BUFFER_FLAG_DELTA_UNIT);
else
GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DELTA_UNIT);
}
- GST_BUFFER_TIMESTAMP (outbuf) = timestamp;
+ if (group->buffer.flags & V4L2_BUF_FLAG_ERROR)
+ GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_CORRUPTED);
+ GST_BUFFER_TIMESTAMP (outbuf) = timestamp;
+ GST_BUFFER_OFFSET (outbuf) = group->buffer.sequence;
+ GST_BUFFER_OFFSET_END (outbuf) = group->buffer.sequence + 1;
+
+#ifdef TIZEN_FEATURE_V4L2_TBM_SUPPORT
+ if (group->surface) {
+ tizen_buffer = gst_v4l2_tizen_buffer_new (outbuf, group->buffer.index, pool);
+ if (!tizen_buffer) {
+ GST_ERROR_OBJECT (pool, "tizen buffer failed for index[%d]", group->buffer.index);
+ goto no_buffer;
+ }
+ outbuf = tizen_buffer->gst_buffer;
+ }
+#endif /* TIZEN_FEATURE_V4L2_TBM_SUPPORT */
+done:
*buffer = outbuf;
- return GST_FLOW_OK;
+ return res;
/* ERRORS */
poll_failed:
GST_DEBUG_OBJECT (pool, "poll error %s", gst_flow_get_name (res));
return res;
}
+eos:
+ {
+ return GST_FLOW_EOS;
+ }
dqbuf_failed:
{
return GST_FLOW_ERROR;
GST_DEBUG_OBJECT (pool, "acquire");
- /* If this is being called to resurect a lost buffer */
- if (params && params->flags & GST_V4L2_POOL_ACQUIRE_FLAG_RESURECT) {
+ /* If this is being called to resurrect a lost buffer */
+ if (params && params->flags & GST_V4L2_BUFFER_POOL_ACQUIRE_FLAG_RESURRECT) {
ret = pclass->acquire_buffer (bpool, buffer, params);
goto done;
}
}
case GST_V4L2_IO_DMABUF:
case GST_V4L2_IO_MMAP:
+ case GST_V4L2_IO_USERPTR:
+ case GST_V4L2_IO_DMABUF_IMPORT:
{
/* just dequeue a buffer, we basically use the queue of v4l2 as the
* storage for our buffers. This function does poll first so we can
* interrupt it fine. */
- ret = gst_v4l2_buffer_pool_dqbuf (pool, buffer);
- if (G_UNLIKELY (ret != GST_FLOW_OK))
- goto done;
- break;
- }
- case GST_V4L2_IO_USERPTR:
- case GST_V4L2_IO_DMABUF_IMPORT:
- {
- /* dequeue filled buffer */
- ret = gst_v4l2_buffer_pool_dqbuf (pool, buffer);
+ ret = gst_v4l2_buffer_pool_dqbuf (pool, buffer, TRUE);
break;
}
default:
GST_DEBUG_OBJECT (pool, "release buffer %p", buffer);
+ /* If the buffer's pool has been orphaned, dispose of it so that
+ * the pool resources can be freed */
+ if (pool->orphaned) {
+ GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_TAG_MEMORY);
+ pclass->release_buffer (bpool, buffer);
+ return;
+ }
+
switch (obj->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
case GST_V4L2_IO_USERPTR:
case GST_V4L2_IO_DMABUF_IMPORT:
{
- if (gst_v4l2_is_buffer_valid (buffer, NULL)) {
+ GstV4l2MemoryGroup *group;
+ if (gst_v4l2_is_buffer_valid (buffer, &group)) {
+ GstFlowReturn ret = GST_FLOW_OK;
+
+ gst_v4l2_allocator_reset_group (pool->vallocator, group);
/* queue back in the device */
if (pool->other_pool)
- gst_v4l2_buffer_pool_prepare_buffer (pool, buffer, NULL);
- if (gst_v4l2_buffer_pool_qbuf (pool, buffer) != GST_FLOW_OK)
+ ret = gst_v4l2_buffer_pool_prepare_buffer (pool, buffer, NULL);
+ if (ret != GST_FLOW_OK ||
+ gst_v4l2_buffer_pool_qbuf (pool, buffer, group) != GST_FLOW_OK)
pclass->release_buffer (bpool, buffer);
} else {
/* Simply release invalide/modified buffer, the allocator will
/* playback, put the buffer back in the queue to refill later. */
pclass->release_buffer (bpool, buffer);
} else {
- /* We keep a ref on queued buffer, so this should never happen */
- g_assert_not_reached ();
+ /* the buffer is queued in the device but maybe not played yet. We just
+ * leave it there and not make it available for future calls to acquire
+ * for now. The buffer will be dequeued and reused later. */
+ GST_LOG_OBJECT (pool, "buffer %u is queued", index);
}
break;
}
}
static void
-gst_v4l2_buffer_pool_finalize (GObject * object)
+gst_v4l2_buffer_pool_dispose (GObject * object)
{
GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (object);
- gint i;
-
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (pool->buffers[i])
- gst_buffer_replace (&(pool->buffers[i]), NULL);
- }
-
- if (pool->video_fd >= 0)
- v4l2_close (pool->video_fd);
-
- gst_poll_free (pool->poll);
if (pool->vallocator)
gst_object_unref (pool->vallocator);
+ pool->vallocator = NULL;
if (pool->allocator)
gst_object_unref (pool->allocator);
+ pool->allocator = NULL;
+
+#ifdef TIZEN_FEATURE_V4L2_TBM_SUPPORT
+ g_cond_clear (&pool->buffer_cond);
+ g_mutex_clear (&pool->buffer_lock);
+ if (pool->tallocator)
+ gst_object_unref (pool->tallocator);
+ pool->tallocator = NULL;
+#endif /* TIZEN_FEATURE_V4L2_TBM_SUPPORT */
if (pool->other_pool)
gst_object_unref (pool->other_pool);
+ pool->other_pool = NULL;
+
+ G_OBJECT_CLASS (parent_class)->dispose (object);
+}
+
+static void
+gst_v4l2_buffer_pool_finalize (GObject * object)
+{
+ GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (object);
+
+ if (pool->video_fd >= 0)
+ pool->obj->close (pool->video_fd);
- /* FIXME Is this required to keep around ? */
+ gst_poll_free (pool->poll);
+
+ /* This can't be done in dispose method because we must not set pointer
+ * to NULL as it is part of the v4l2object and dispose could be called
+ * multiple times */
gst_object_unref (pool->obj->element);
+ g_cond_clear (&pool->empty_cond);
+
/* FIXME have we done enough here ? */
G_OBJECT_CLASS (parent_class)->finalize (object);
pool->can_poll_device = TRUE;
g_cond_init (&pool->empty_cond);
pool->empty = TRUE;
+ pool->orphaned = FALSE;
}
static void
GObjectClass *object_class = G_OBJECT_CLASS (klass);
GstBufferPoolClass *bufferpool_class = GST_BUFFER_POOL_CLASS (klass);
+ object_class->dispose = gst_v4l2_buffer_pool_dispose;
object_class->finalize = gst_v4l2_buffer_pool_finalize;
bufferpool_class->start = gst_v4l2_buffer_pool_start;
bufferpool_class->release_buffer = gst_v4l2_buffer_pool_release_buffer;
bufferpool_class->flush_start = gst_v4l2_buffer_pool_flush_start;
bufferpool_class->flush_stop = gst_v4l2_buffer_pool_flush_stop;
+
+ GST_DEBUG_CATEGORY_INIT (v4l2bufferpool_debug, "v4l2bufferpool", 0,
+ "V4L2 Buffer Pool");
+ GST_DEBUG_CATEGORY_GET (CAT_PERFORMANCE, "GST_PERFORMANCE");
}
/**
gchar *name, *parent_name;
gint fd;
- fd = v4l2_dup (obj->video_fd);
+ fd = obj->dup (obj->video_fd);
if (fd < 0)
goto dup_failed;
pool = (GstV4l2BufferPool *) g_object_new (GST_TYPE_V4L2_BUFFER_POOL,
"name", name, NULL);
+ g_object_ref_sink (pool);
g_free (name);
gst_poll_fd_init (&pool->pollfd);
pool->obj = obj;
pool->can_poll_device = TRUE;
- pool->vallocator =
- gst_v4l2_allocator_new (GST_OBJECT (pool), obj->video_fd, &obj->format);
+#ifdef TIZEN_FEATURE_V4L2_TBM_SUPPORT
+ pool->tallocator = gst_tizen_allocator_new ();
+ if (pool->tallocator == NULL)
+ goto allocator_failed;
+
+ g_mutex_init (&pool->buffer_lock);
+ g_cond_init (&pool->buffer_cond);
+#endif /* TIZEN_FEATURE_V4L2_TBM_SUPPORT */
+ pool->vallocator = gst_v4l2_allocator_new (GST_OBJECT (pool), obj);
if (pool->vallocator == NULL)
goto allocator_failed;
}
allocator_failed:
{
+#ifdef TIZEN_FEATURE_V4L2_TBM_SUPPORT
+ if (pool->tallocator) {
+ gst_object_unref (pool->tallocator);
+ pool->tallocator = NULL;
+ }
+#endif /* TIZEN_FEATURE_V4L2_TBM_SUPPORT */
GST_ERROR_OBJECT (pool, "Failed to create V4L2 allocator");
+ gst_object_unref (pool);
return NULL;
}
}
gst_buffer_map (buf, &map, GST_MAP_WRITE);
do {
- if ((res = gst_v4l2_buffer_pool_poll (pool)) != GST_FLOW_OK)
+ if ((res = gst_v4l2_buffer_pool_poll (pool, TRUE)) != GST_FLOW_OK)
goto poll_error;
- amount = v4l2_read (obj->video_fd, map.data, toread);
+ amount = obj->read (obj->video_fd, map.data, toread);
if (amount == toread) {
break;
GST_DEBUG_OBJECT (pool, "process buffer %p", buf);
- g_return_val_if_fail (gst_buffer_pool_is_active (bpool), GST_FLOW_ERROR);
-
if (GST_BUFFER_POOL_IS_FLUSHING (pool))
return GST_FLOW_FLUSHING;
GstBuffer *tmp;
if ((*buf)->pool == bpool) {
- if (gst_buffer_get_size (*buf) == 0)
+ guint num_queued;
+ gsize size = gst_buffer_get_size (*buf);
+
+ /* Legacy M2M devices return empty buffer when drained */
+ if (size == 0 && GST_V4L2_IS_M2M (obj->device_caps))
goto eos;
+ if (GST_VIDEO_INFO_FORMAT (&pool->caps_info) !=
+ GST_VIDEO_FORMAT_ENCODED && size < pool->size)
+ goto buffer_truncated;
+
+ num_queued = g_atomic_int_get (&pool->num_queued);
+ GST_TRACE_OBJECT (pool, "Only %i buffer left in the capture queue.",
+ num_queued);
+
+ /* If we have no more buffer, and can allocate it time to do so */
+ if (num_queued == 0) {
+ if (GST_V4L2_ALLOCATOR_CAN_ALLOCATE (pool->vallocator, MMAP)) {
+ ret = gst_v4l2_buffer_pool_resurrect_buffer (pool);
+ if (ret == GST_FLOW_OK)
+ goto done;
+ }
+ }
+
/* start copying buffers when we are running low on buffers */
- if (g_atomic_int_get (&pool->num_queued) < pool->copy_threshold) {
+ if (num_queued < pool->copy_threshold) {
GstBuffer *copy;
if (GST_V4L2_ALLOCATOR_CAN_ALLOCATE (pool->vallocator, MMAP)) {
-
- if (gst_buffer_pool_acquire_buffer (bpool, ©,
- NULL) == GST_FLOW_OK) {
- gst_v4l2_buffer_pool_release_buffer (bpool, copy);
+ ret = gst_v4l2_buffer_pool_resurrect_buffer (pool);
+ if (ret == GST_FLOW_OK)
goto done;
- }
}
/* copy the buffer */
*buf = copy;
}
+ ret = GST_FLOW_OK;
/* nothing, data was inside the buffer when we did _acquire() */
goto done;
}
/* buffer not from our pool, grab a frame and copy it into the target */
- if ((ret = gst_v4l2_buffer_pool_dqbuf (pool, &tmp)) != GST_FLOW_OK)
+ if ((ret = gst_v4l2_buffer_pool_dqbuf (pool, &tmp, TRUE))
+ != GST_FLOW_OK)
goto done;
/* An empty buffer on capture indicates the end of stream */
if (gst_buffer_get_size (tmp) == 0) {
gst_v4l2_buffer_pool_release_buffer (bpool, tmp);
- goto eos;
- }
+ /* Legacy M2M devices return empty buffer when drained */
+ if (GST_V4L2_IS_M2M (obj->device_caps))
+ goto eos;
+ }
+#ifdef TIZEN_FEATURE_V4L2_TBM_SUPPORT
+ if (pool->obj->tbm_output && pool->obj->mode == GST_V4L2_IO_DMABUF) {
+ gst_buffer_unref (*buf);
+ *buf = tmp;
+ } else {
+#endif /* TIZEN_FEATURE_V4L2_TBM_SUPPORT */
ret = gst_v4l2_buffer_pool_copy_buffer (pool, *buf, tmp);
/* an queue the buffer again after the copy */
gst_v4l2_buffer_pool_release_buffer (bpool, tmp);
+#ifdef TIZEN_FEATURE_V4L2_TBM_SUPPORT
+ }
+#endif /* TIZEN_FEATURE_V4L2_TBM_SUPPORT */
if (ret != GST_FLOW_OK)
goto copy_failed;
case GST_V4L2_IO_USERPTR:
{
struct UserPtrData *data;
+ GstBuffer *tmp;
/* Replace our buffer with downstream allocated buffer */
data = gst_mini_object_steal_qdata (GST_MINI_OBJECT (*buf),
GST_V4L2_IMPORT_QUARK);
- gst_buffer_replace (buf, data->buffer);
+ tmp = gst_buffer_ref (data->buffer);
_unmap_userptr_frame (data);
+
+ /* Now tmp is writable, copy the flags and timestamp */
+ gst_buffer_copy_into (tmp, *buf,
+ GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS, 0, -1);
+
+ gst_buffer_replace (buf, tmp);
+ gst_buffer_unref (tmp);
break;
}
/* Replace our buffer with downstream allocated buffer */
tmp = gst_mini_object_steal_qdata (GST_MINI_OBJECT (*buf),
GST_V4L2_IMPORT_QUARK);
+
+ gst_buffer_copy_into (tmp, *buf,
+ GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS, 0, -1);
+
gst_buffer_replace (buf, tmp);
gst_buffer_unref (tmp);
break;
case GST_V4L2_IO_MMAP:
{
GstBuffer *to_queue = NULL;
+ GstBuffer *buffer;
GstV4l2MemoryGroup *group;
gint index;
GST_LOG_OBJECT (pool, "processing buffer %i from our pool", index);
- index = group->buffer.index;
if (pool->buffers[index] != NULL) {
GST_LOG_OBJECT (pool, "buffer %i already queued, copying", index);
goto copying;
gst_buffer_unref (to_queue);
goto prepare_failed;
}
+
+ /* retreive the group */
+ gst_v4l2_is_buffer_valid (to_queue, &group);
}
- if ((ret = gst_v4l2_buffer_pool_qbuf (pool, to_queue)) != GST_FLOW_OK)
+ if ((ret = gst_v4l2_buffer_pool_qbuf (pool, to_queue, group))
+ != GST_FLOW_OK)
goto queue_failed;
/* if we are not streaming yet (this is the first buffer, start
* streaming now */
if (!gst_v4l2_buffer_pool_streamon (pool)) {
+ /* don't check return value because qbuf would have failed */
+ gst_v4l2_is_buffer_valid (to_queue, &group);
+
+ /* qbuf has stored to_queue buffer but we are not in
+ * streaming state, so the flush logic won't be performed.
+ * To avoid leaks, flush the allocator and restore the queued
+ * buffer as non-queued */
+ gst_v4l2_allocator_flush (pool->vallocator);
+
+ pool->buffers[group->buffer.index] = NULL;
+
+ gst_mini_object_set_qdata (GST_MINI_OBJECT (to_queue),
+ GST_V4L2_IMPORT_QUARK, NULL, NULL);
gst_buffer_unref (to_queue);
+ g_atomic_int_add (&pool->num_queued, -1);
goto start_failed;
}
+ /* Remove our ref, we will still hold this buffer in acquire as needed,
+ * otherwise the pool will think it is outstanding and will refuse to stop. */
+ gst_buffer_unref (to_queue);
+
+ /* release as many buffer as possible */
+ while (gst_v4l2_buffer_pool_dqbuf (pool, &buffer, FALSE) ==
+ GST_FLOW_OK) {
+ if (buffer->pool == NULL)
+ gst_v4l2_buffer_pool_release_buffer (bpool, buffer);
+ }
+
if (g_atomic_int_get (&pool->num_queued) >= pool->min_latency) {
- GstBuffer *out;
/* all buffers are queued, try to dequeue one and release it back
* into the pool so that _acquire can get to it again. */
- ret = gst_v4l2_buffer_pool_dqbuf (pool, &out);
- if (ret == GST_FLOW_OK)
+ ret = gst_v4l2_buffer_pool_dqbuf (pool, &buffer, TRUE);
+ if (ret == GST_FLOW_OK && buffer->pool == NULL)
/* release the rendered buffer back into the pool. This wakes up any
* thread waiting for a buffer in _acquire(). */
- gst_buffer_unref (out);
+ gst_v4l2_buffer_pool_release_buffer (bpool, buffer);
}
break;
}
GST_ERROR_OBJECT (pool, "failed to copy buffer");
return ret;
}
+buffer_truncated:
+ {
+ GST_WARNING_OBJECT (pool,
+ "Dropping truncated buffer, this is likely a driver bug.");
+ gst_buffer_unref (*buf);
+ *buf = NULL;
+ return GST_V4L2_FLOW_CORRUPTED_BUFFER;
+ }
eos:
{
GST_DEBUG_OBJECT (pool, "end of stream reached");
- return GST_FLOW_EOS;
+ gst_buffer_unref (*buf);
+ *buf = NULL;
+ return GST_V4L2_FLOW_LAST_BUFFER;
}
acquire_failed:
{
gst_object_unref (pool->other_pool);
pool->other_pool = gst_object_ref (other_pool);
}
+
+void
+gst_v4l2_buffer_pool_copy_at_threshold (GstV4l2BufferPool * pool, gboolean copy)
+{
+ GST_OBJECT_LOCK (pool);
+ pool->enable_copy_threshold = copy;
+ GST_OBJECT_UNLOCK (pool);
+}
+
+gboolean
+gst_v4l2_buffer_pool_flush (GstBufferPool * bpool)
+{
+ GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (bpool);
+ gboolean ret = TRUE;
+
+ gst_v4l2_buffer_pool_streamoff (pool);
+
+ if (!V4L2_TYPE_IS_OUTPUT (pool->obj->type))
+ ret = gst_v4l2_buffer_pool_streamon (pool);
+
+ return ret;
+}