#include "gstvideofilter.h"
#include <gst/video/video.h>
+#include <gst/video/gstvideometa.h>
+#include <gst/video/gstvideopool.h>
GST_DEBUG_CATEGORY_STATIC (gst_video_filter_debug);
#define GST_CAT_DEFAULT gst_video_filter_debug
-static void gst_video_filter_class_init (gpointer g_class, gpointer class_data);
-static void gst_video_filter_init (GTypeInstance * instance, gpointer g_class);
+#define gst_video_filter_parent_class parent_class
+G_DEFINE_ABSTRACT_TYPE (GstVideoFilter, gst_video_filter,
+ GST_TYPE_BASE_TRANSFORM);
-static GstBaseTransformClass *parent_class = NULL;
+/* Answer the allocation query downstream. This is only called for
+ * non-passthrough cases */
+static gboolean
+gst_video_filter_propose_allocation (GstBaseTransform * trans, GstQuery * query)
+{
+ GstVideoFilter *filter = GST_VIDEO_FILTER_CAST (trans);
+ GstBufferPool *pool;
+ GstCaps *caps;
+ gboolean need_pool;
+ guint size;
+
+ gst_query_parse_allocation (query, &caps, &need_pool);
+
+ size = GST_VIDEO_INFO_SIZE (&filter->in_info);
+
+ if (need_pool) {
+ GstStructure *structure;
+
+ pool = gst_video_buffer_pool_new ();
+
+ structure = gst_buffer_pool_get_config (pool);
+ gst_buffer_pool_config_set (structure, caps, size, 0, 0, 0, 15);
+ if (!gst_buffer_pool_set_config (pool, structure))
+ goto config_failed;
+ } else
+ pool = NULL;
+
+ gst_query_set_allocation_params (query, size, 0, 0, 0, 15, pool);
+ gst_object_unref (pool);
+
+ gst_query_add_allocation_meta (query, GST_VIDEO_META_API);
-GType
-gst_video_filter_get_type (void)
+ return TRUE;
+
+ /* ERRORS */
+config_failed:
+ {
+ GST_ERROR_OBJECT (filter, "failed to set config");
+ gst_object_unref (pool);
+ return FALSE;
+ }
+}
+
+/* configure the allocation query that was answered downstream, we can configure
+ * some properties on it. Only called in passthrough mode. */
+static gboolean
+gst_video_filter_decide_allocation (GstBaseTransform * trans, GstQuery * query)
{
- static GType video_filter_type = 0;
-
- if (!video_filter_type) {
- static const GTypeInfo video_filter_info = {
- sizeof (GstVideoFilterClass),
- NULL,
- NULL,
- gst_video_filter_class_init,
- NULL,
- NULL,
- sizeof (GstVideoFilter),
- 0,
- gst_video_filter_init,
- };
-
- video_filter_type = g_type_register_static (GST_TYPE_BASE_TRANSFORM,
- "GstVideoFilter", &video_filter_info, G_TYPE_FLAG_ABSTRACT);
+ GstBufferPool *pool = NULL;
+ guint size, min, max, prefix, alignment;
+
+ gst_query_parse_allocation_params (query, &size, &min, &max, &prefix,
+ &alignment, &pool);
+
+ if (pool) {
+ GstStructure *config;
+
+ config = gst_buffer_pool_get_config (pool);
+ gst_buffer_pool_config_add_option (config,
+ GST_BUFFER_POOL_OPTION_VIDEO_META);
+ gst_buffer_pool_set_config (pool, config);
}
- return video_filter_type;
+ return TRUE;
+}
+
+
+/* our output size only depends on the caps, not on the input caps */
+static gboolean
+gst_video_filter_transform_size (GstBaseTransform * btrans,
+ GstPadDirection direction, GstCaps * caps, gsize size,
+ GstCaps * othercaps, gsize * othersize)
+{
+ gboolean ret = TRUE;
+ GstVideoInfo info;
+
+ g_assert (size);
+
+ ret = gst_video_info_from_caps (&info, othercaps);
+ if (ret)
+ *othersize = info.size;
+
+ return ret;
}
static gboolean
return TRUE;
}
+static gboolean
+gst_video_filter_set_caps (GstBaseTransform * trans, GstCaps * incaps,
+ GstCaps * outcaps)
+{
+ GstVideoFilter *filter = GST_VIDEO_FILTER_CAST (trans);
+ GstVideoFilterClass *fclass;
+ GstVideoInfo in_info, out_info;
+ gboolean res;
+
+ /* input caps */
+ if (!gst_video_info_from_caps (&in_info, incaps))
+ goto invalid_caps;
+
+ /* output caps */
+ if (!gst_video_info_from_caps (&out_info, outcaps))
+ goto invalid_caps;
+
+ fclass = GST_VIDEO_FILTER_GET_CLASS (filter);
+ if (fclass->set_info)
+ res = fclass->set_info (filter, incaps, &in_info, outcaps, &out_info);
+ else
+ res = TRUE;
+
+ if (res) {
+ filter->in_info = in_info;
+ filter->out_info = out_info;
+ }
+ filter->negotiated = res;
+
+ return res;
+
+ /* ERRORS */
+invalid_caps:
+ {
+ GST_ERROR_OBJECT (filter, "invalid caps");
+ filter->negotiated = FALSE;
+ return FALSE;
+ }
+}
+
+static GstFlowReturn
+gst_video_filter_transform (GstBaseTransform * trans, GstBuffer * inbuf,
+ GstBuffer * outbuf)
+{
+ GstFlowReturn res;
+ GstVideoFilter *filter = GST_VIDEO_FILTER_CAST (trans);
+ GstVideoFilterClass *fclass;
+
+ if (G_UNLIKELY (!filter->negotiated))
+ goto unknown_format;
+
+ fclass = GST_VIDEO_FILTER_GET_CLASS (filter);
+ if (fclass->transform_frame) {
+ GstVideoFrame in_frame, out_frame;
+
+ if (!gst_video_frame_map (&in_frame, &filter->in_info, inbuf, GST_MAP_READ))
+ goto invalid_buffer;
+
+ if (!gst_video_frame_map (&out_frame, &filter->out_info, outbuf,
+ GST_MAP_WRITE))
+ goto invalid_buffer;
+
+ res = fclass->transform_frame (filter, &in_frame, &out_frame);
+
+ gst_video_frame_unmap (&out_frame);
+ gst_video_frame_unmap (&in_frame);
+ } else
+ res = GST_FLOW_OK;
+
+ return res;
+
+ /* ERRORS */
+unknown_format:
+ {
+ GST_ELEMENT_ERROR (filter, CORE, NOT_IMPLEMENTED, (NULL),
+ ("unknown format"));
+ return GST_FLOW_NOT_NEGOTIATED;
+ }
+invalid_buffer:
+ {
+ GST_ELEMENT_WARNING (filter, CORE, NOT_IMPLEMENTED, (NULL),
+ ("invalid video buffer received"));
+ return GST_FLOW_OK;
+ }
+}
+
+static GstFlowReturn
+gst_video_filter_transform_ip (GstBaseTransform * trans, GstBuffer * buf)
+{
+ GstFlowReturn res;
+ GstVideoFilter *filter = GST_VIDEO_FILTER_CAST (trans);
+ GstVideoFilterClass *fclass;
+
+ if (G_UNLIKELY (!filter->negotiated))
+ goto unknown_format;
+
+ fclass = GST_VIDEO_FILTER_GET_CLASS (filter);
+ if (fclass->transform_frame_ip) {
+ GstVideoFrame frame;
+
+ if (!gst_video_frame_map (&frame, &filter->in_info, buf, GST_MAP_READWRITE))
+ goto invalid_buffer;
+
+ res = fclass->transform_frame_ip (filter, &frame);
+
+ gst_video_frame_unmap (&frame);
+ } else
+ res = GST_FLOW_OK;
+
+ return res;
+
+ /* ERRORS */
+unknown_format:
+ {
+ GST_ELEMENT_ERROR (filter, CORE, NOT_IMPLEMENTED, (NULL),
+ ("unknown format"));
+ return GST_FLOW_NOT_NEGOTIATED;
+ }
+invalid_buffer:
+ {
+ GST_ELEMENT_WARNING (filter, CORE, NOT_IMPLEMENTED, (NULL),
+ ("invalid video buffer received"));
+ return GST_FLOW_OK;
+ }
+}
+
static void
-gst_video_filter_class_init (gpointer g_class, gpointer class_data)
+gst_video_filter_class_init (GstVideoFilterClass * g_class)
{
GstBaseTransformClass *trans_class;
GstVideoFilterClass *klass;
klass = (GstVideoFilterClass *) g_class;
trans_class = (GstBaseTransformClass *) klass;
+ trans_class->set_caps = GST_DEBUG_FUNCPTR (gst_video_filter_set_caps);
+ trans_class->propose_allocation =
+ GST_DEBUG_FUNCPTR (gst_video_filter_propose_allocation);
+ trans_class->decide_allocation =
+ GST_DEBUG_FUNCPTR (gst_video_filter_decide_allocation);
+ trans_class->transform_size =
+ GST_DEBUG_FUNCPTR (gst_video_filter_transform_size);
trans_class->get_unit_size =
GST_DEBUG_FUNCPTR (gst_video_filter_get_unit_size);
-
- parent_class = g_type_class_peek_parent (klass);
+ trans_class->transform = GST_DEBUG_FUNCPTR (gst_video_filter_transform);
+ trans_class->transform_ip = GST_DEBUG_FUNCPTR (gst_video_filter_transform_ip);
GST_DEBUG_CATEGORY_INIT (gst_video_filter_debug, "videofilter", 0,
"videofilter");
}
static void
-gst_video_filter_init (GTypeInstance * instance, gpointer g_class)
+gst_video_filter_init (GstVideoFilter * instance)
{
GstVideoFilter *videofilter = GST_VIDEO_FILTER (instance);
GST_DEBUG_OBJECT (videofilter, "gst_video_filter_init");
- videofilter->inited = FALSE;
+ videofilter->negotiated = FALSE;
/* enable QoS */
gst_base_transform_set_qos_enabled (GST_BASE_TRANSFORM (videofilter), TRUE);
}
#define __GST_VIDEO_FILTER_H__
#include <gst/base/gstbasetransform.h>
+#include <gst/video/video.h>
G_BEGIN_DECLS
(G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_VIDEO_FILTER))
#define GST_IS_VIDEO_FILTER_CLASS(klass) \
(G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_VIDEO_FILTER))
+#define GST_VIDEO_FILTER_CAST(obj) ((GstVideoFilter *)(obj))
struct _GstVideoFilter {
GstBaseTransform element;
- gboolean inited;
+ gboolean negotiated;
+ GstVideoInfo in_info;
+ GstVideoInfo out_info;
};
/**
*/
struct _GstVideoFilterClass {
GstBaseTransformClass parent_class;
+
+ gboolean (*set_info) (GstVideoFilter *filter,
+ GstCaps *incaps, GstVideoInfo *in_info,
+ GstCaps *outcaps, GstVideoInfo *out_info);
+
+ /* transform */
+ GstFlowReturn (*transform_frame) (GstVideoFilter *filter,
+ GstVideoFrame *inframe, GstVideoFrame *outframe);
+ GstFlowReturn (*transform_frame_ip) (GstVideoFilter *trans, GstVideoFrame *frame);
};
GType gst_video_filter_get_type (void);
#endif
#include "gstvideoconvert.h"
+
#include <gst/video/video.h>
#include <gst/video/gstvideometa.h>
#include <gst/video/gstvideopool.h>
static void gst_video_convert_get_property (GObject * object,
guint property_id, GValue * value, GParamSpec * pspec);
-static gboolean gst_video_convert_set_caps (GstBaseTransform * btrans,
- GstCaps * incaps, GstCaps * outcaps);
-static gboolean gst_video_convert_transform_size (GstBaseTransform * btrans,
- GstPadDirection direction, GstCaps * caps, gsize size,
- GstCaps * othercaps, gsize * othersize);
-static GstFlowReturn gst_video_convert_transform (GstBaseTransform * btrans,
- GstBuffer * inbuf, GstBuffer * outbuf);
+static gboolean gst_video_convert_set_info (GstVideoFilter * filter,
+ GstCaps * incaps, GstVideoInfo * in_info, GstCaps * outcaps,
+ GstVideoInfo * out_info);
+static GstFlowReturn gst_video_convert_transform_frame (GstVideoFilter * filter,
+ GstVideoFrame * in_frame, GstVideoFrame * out_frame);
static GType
dither_method_get_type (void)
return result;
}
-/* Answer the allocation query downstream. This is only called for
- * non-passthrough cases */
static gboolean
-gst_video_convert_propose_allocation (GstBaseTransform * trans,
- GstQuery * query)
-{
- GstVideoConvert *space = GST_VIDEO_CONVERT_CAST (trans);
- GstBufferPool *pool;
- GstCaps *caps;
- gboolean need_pool;
- guint size;
-
- gst_query_parse_allocation (query, &caps, &need_pool);
-
- size = GST_VIDEO_INFO_SIZE (&space->from_info);
-
- if (need_pool) {
- GstStructure *structure;
-
- pool = gst_video_buffer_pool_new ();
-
- structure = gst_buffer_pool_get_config (pool);
- gst_buffer_pool_config_set (structure, caps, size, 0, 0, 0, 15);
- if (!gst_buffer_pool_set_config (pool, structure))
- goto config_failed;
- } else
- pool = NULL;
-
- gst_query_set_allocation_params (query, size, 0, 0, 0, 15, pool);
- gst_object_unref (pool);
-
- gst_query_add_allocation_meta (query, GST_VIDEO_META_API);
-
- return TRUE;
-
- /* ERRORS */
-config_failed:
- {
- GST_ERROR_OBJECT (space, "failed to set config.");
- gst_object_unref (pool);
- return FALSE;
- }
-}
-
-/* configure the allocation query that was answered downstream, we can configure
- * some properties on it. Only called in passthrough mode. */
-static gboolean
-gst_video_convert_decide_allocation (GstBaseTransform * trans, GstQuery * query)
-{
- GstBufferPool *pool = NULL;
- guint size, min, max, prefix, alignment;
-
- gst_query_parse_allocation_params (query, &size, &min, &max, &prefix,
- &alignment, &pool);
-
- if (pool) {
- GstStructure *config;
-
- config = gst_buffer_pool_get_config (pool);
- gst_buffer_pool_config_add_option (config,
- GST_BUFFER_POOL_OPTION_VIDEO_META);
- gst_buffer_pool_set_config (pool, config);
- }
- return TRUE;
-}
-
-static gboolean
-gst_video_convert_set_caps (GstBaseTransform * btrans, GstCaps * incaps,
- GstCaps * outcaps)
+gst_video_convert_set_info (GstVideoFilter * filter,
+ GstCaps * incaps, GstVideoInfo * in_info, GstCaps * outcaps,
+ GstVideoInfo * out_info)
{
GstVideoConvert *space;
- GstVideoInfo in_info;
- GstVideoInfo out_info;
ColorSpaceColorSpec in_spec, out_spec;
gboolean interlaced;
- space = GST_VIDEO_CONVERT_CAST (btrans);
+ space = GST_VIDEO_CONVERT_CAST (filter);
if (space->convert) {
videoconvert_convert_free (space->convert);
}
/* input caps */
- if (!gst_video_info_from_caps (&in_info, incaps))
- goto invalid_caps;
-
- if (in_info.finfo->flags & GST_VIDEO_FORMAT_FLAG_RGB) {
+ if (GST_VIDEO_INFO_IS_RGB (in_info)) {
in_spec = COLOR_SPEC_RGB;
- } else if (in_info.finfo->flags & GST_VIDEO_FORMAT_FLAG_YUV) {
- if (in_info.colorimetry.matrix == GST_VIDEO_COLOR_MATRIX_BT709)
+ } else if (GST_VIDEO_INFO_IS_YUV (in_info)) {
+ if (in_info->colorimetry.matrix == GST_VIDEO_COLOR_MATRIX_BT709)
in_spec = COLOR_SPEC_YUV_BT709;
else
in_spec = COLOR_SPEC_YUV_BT470_6;
}
/* output caps */
- if (!gst_video_info_from_caps (&out_info, outcaps))
- goto invalid_caps;
-
- if (out_info.finfo->flags & GST_VIDEO_FORMAT_FLAG_RGB) {
+ if (GST_VIDEO_INFO_IS_RGB (out_info)) {
out_spec = COLOR_SPEC_RGB;
- } else if (out_info.finfo->flags & GST_VIDEO_FORMAT_FLAG_YUV) {
- if (out_info.colorimetry.matrix == GST_VIDEO_COLOR_MATRIX_BT709)
+ } else if (GST_VIDEO_INFO_IS_YUV (out_info)) {
+ if (out_info->colorimetry.matrix == GST_VIDEO_COLOR_MATRIX_BT709)
out_spec = COLOR_SPEC_YUV_BT709;
else
out_spec = COLOR_SPEC_YUV_BT470_6;
}
/* these must match */
- if (in_info.width != out_info.width || in_info.height != out_info.height ||
- in_info.fps_n != out_info.fps_n || in_info.fps_d != out_info.fps_d)
+ if (in_info->width != out_info->width || in_info->height != out_info->height
+ || in_info->fps_n != out_info->fps_n || in_info->fps_d != out_info->fps_d)
goto format_mismatch;
/* if present, these must match too */
- if (in_info.par_n != out_info.par_n || in_info.par_d != out_info.par_d)
+ if (in_info->par_n != out_info->par_n || in_info->par_d != out_info->par_d)
goto format_mismatch;
/* if present, these must match too */
- if ((in_info.flags & GST_VIDEO_FLAG_INTERLACED) !=
- (out_info.flags & GST_VIDEO_FLAG_INTERLACED))
+ if ((in_info->flags & GST_VIDEO_FLAG_INTERLACED) !=
+ (out_info->flags & GST_VIDEO_FLAG_INTERLACED))
goto format_mismatch;
- space->from_info = in_info;
space->from_spec = in_spec;
- space->to_info = out_info;
space->to_spec = out_spec;
- interlaced = (in_info.flags & GST_VIDEO_FLAG_INTERLACED) != 0;
+ interlaced = (in_info->flags & GST_VIDEO_FLAG_INTERLACED) != 0;
space->convert =
- videoconvert_convert_new (GST_VIDEO_INFO_FORMAT (&out_info), out_spec,
- GST_VIDEO_INFO_FORMAT (&in_info), in_spec, in_info.width, in_info.height);
+ videoconvert_convert_new (GST_VIDEO_INFO_FORMAT (out_info), out_spec,
+ GST_VIDEO_INFO_FORMAT (in_info), in_spec, in_info->width,
+ in_info->height);
if (space->convert == NULL)
goto no_convert;
videoconvert_convert_set_interlaced (space->convert, interlaced);
/* palette, only for from data */
- if (GST_VIDEO_INFO_FORMAT (&space->from_info) ==
+ if (GST_VIDEO_INFO_FORMAT (in_info) ==
GST_VIDEO_FORMAT_RGB8_PALETTED
- && GST_VIDEO_INFO_FORMAT (&space->to_info) ==
- GST_VIDEO_FORMAT_RGB8_PALETTED) {
+ && GST_VIDEO_INFO_FORMAT (out_info) == GST_VIDEO_FORMAT_RGB8_PALETTED) {
goto format_mismatch;
- } else if (GST_VIDEO_INFO_FORMAT (&space->from_info) ==
- GST_VIDEO_FORMAT_RGB8_PALETTED) {
+ } else if (GST_VIDEO_INFO_FORMAT (in_info) == GST_VIDEO_FORMAT_RGB8_PALETTED) {
GstBuffer *palette;
guint32 *data;
gst_buffer_unmap (palette, data, -1);
gst_buffer_unref (palette);
- } else if (GST_VIDEO_INFO_FORMAT (&space->to_info) ==
- GST_VIDEO_FORMAT_RGB8_PALETTED) {
+ } else if (GST_VIDEO_INFO_FORMAT (out_info) == GST_VIDEO_FORMAT_RGB8_PALETTED) {
const guint32 *palette;
GstBuffer *p_buf;
gst_buffer_unref (p_buf);
}
- GST_DEBUG ("reconfigured %d %d", GST_VIDEO_INFO_FORMAT (&space->from_info),
- GST_VIDEO_INFO_FORMAT (&space->to_info));
-
- space->negotiated = TRUE;
+ GST_DEBUG ("reconfigured %d %d", GST_VIDEO_INFO_FORMAT (in_info),
+ GST_VIDEO_INFO_FORMAT (out_info));
return TRUE;
/* ERRORS */
-invalid_caps:
- {
- GST_ERROR_OBJECT (space, "invalid caps");
- goto error_done;
- }
format_mismatch:
{
GST_ERROR_OBJECT (space, "input and output formats do not match");
- goto error_done;
+ return FALSE;
}
no_convert:
{
GST_ERROR_OBJECT (space, "could not create converter");
- goto error_done;
+ return FALSE;
}
invalid_palette:
{
GST_ERROR_OBJECT (space, "invalid palette");
- goto error_done;
- }
-error_done:
- {
- space->negotiated = FALSE;
return FALSE;
}
}
GstElementClass *gstelement_class = (GstElementClass *) klass;
GstBaseTransformClass *gstbasetransform_class =
(GstBaseTransformClass *) klass;
+ GstVideoFilterClass *gstvideofilter_class = (GstVideoFilterClass *) klass;
gobject_class->set_property = gst_video_convert_set_property;
gobject_class->get_property = gst_video_convert_get_property;
gstbasetransform_class->transform_caps =
GST_DEBUG_FUNCPTR (gst_video_convert_transform_caps);
- gstbasetransform_class->set_caps =
- GST_DEBUG_FUNCPTR (gst_video_convert_set_caps);
- gstbasetransform_class->transform_size =
- GST_DEBUG_FUNCPTR (gst_video_convert_transform_size);
- gstbasetransform_class->propose_allocation =
- GST_DEBUG_FUNCPTR (gst_video_convert_propose_allocation);
- gstbasetransform_class->decide_allocation =
- GST_DEBUG_FUNCPTR (gst_video_convert_decide_allocation);
- gstbasetransform_class->transform =
- GST_DEBUG_FUNCPTR (gst_video_convert_transform);
gstbasetransform_class->passthrough_on_same_caps = TRUE;
+ gstvideofilter_class->set_info =
+ GST_DEBUG_FUNCPTR (gst_video_convert_set_info);
+ gstvideofilter_class->transform_frame =
+ GST_DEBUG_FUNCPTR (gst_video_convert_transform_frame);
+
g_object_class_install_property (gobject_class, PROP_DITHER,
g_param_spec_enum ("dither", "Dither", "Apply dithering while converting",
dither_method_get_type (), DITHER_NONE,
static void
gst_video_convert_init (GstVideoConvert * space)
{
- space->negotiated = FALSE;
}
void
}
}
-static gboolean
-gst_video_convert_transform_size (GstBaseTransform * btrans,
- GstPadDirection direction, GstCaps * caps, gsize size,
- GstCaps * othercaps, gsize * othersize)
-{
- gboolean ret = TRUE;
- GstVideoInfo info;
-
- g_assert (size);
-
- ret = gst_video_info_from_caps (&info, othercaps);
- if (ret)
- *othersize = info.size;
-
- return ret;
-}
-
static GstFlowReturn
-gst_video_convert_transform (GstBaseTransform * btrans, GstBuffer * inbuf,
- GstBuffer * outbuf)
+gst_video_convert_transform_frame (GstVideoFilter * filter,
+ GstVideoFrame * in_frame, GstVideoFrame * out_frame)
{
GstVideoConvert *space;
- GstVideoFrame in_frame, out_frame;
-
- space = GST_VIDEO_CONVERT_CAST (btrans);
- GST_DEBUG ("from %s -> to %s", GST_VIDEO_INFO_NAME (&space->from_info),
- GST_VIDEO_INFO_NAME (&space->to_info));
+ space = GST_VIDEO_CONVERT_CAST (filter);
- if (G_UNLIKELY (!space->negotiated))
- goto unknown_format;
+ GST_DEBUG ("from %s -> to %s", GST_VIDEO_INFO_NAME (&filter->in_info),
+ GST_VIDEO_INFO_NAME (&filter->out_info));
videoconvert_convert_set_dither (space->convert, space->dither);
- if (!gst_video_frame_map (&in_frame, &space->from_info, inbuf, GST_MAP_READ))
- goto invalid_buffer;
-
- if (!gst_video_frame_map (&out_frame, &space->to_info, outbuf, GST_MAP_WRITE))
- goto invalid_buffer;
-
- videoconvert_convert_convert (space->convert, &out_frame, &in_frame);
-
- gst_video_frame_unmap (&out_frame);
- gst_video_frame_unmap (&in_frame);
+ videoconvert_convert_convert (space->convert, out_frame, in_frame);
/* baseclass copies timestamps */
- GST_DEBUG ("from %s -> to %s done", GST_VIDEO_INFO_NAME (&space->from_info),
- GST_VIDEO_INFO_NAME (&space->to_info));
+ GST_DEBUG ("from %s -> to %s done", GST_VIDEO_INFO_NAME (&filter->in_info),
+ GST_VIDEO_INFO_NAME (&filter->out_info));
return GST_FLOW_OK;
-
- /* ERRORS */
-unknown_format:
- {
- GST_ELEMENT_ERROR (space, CORE, NOT_IMPLEMENTED, (NULL),
- ("attempting to convert colorspaces between unknown formats"));
- return GST_FLOW_NOT_NEGOTIATED;
- }
-invalid_buffer:
- {
- GST_ELEMENT_WARNING (space, CORE, NOT_IMPLEMENTED, (NULL),
- ("invalid video buffer received"));
- return GST_FLOW_OK;
- }
-#if 0
-not_supported:
- {
- GST_ELEMENT_ERROR (space, CORE, NOT_IMPLEMENTED, (NULL),
- ("cannot convert between formats"));
- return GST_FLOW_NOT_SUPPORTED;
- }
-#endif
}
static gboolean
struct _GstVideoConvert {
GstVideoFilter element;
- GstVideoInfo from_info;
- GstVideoInfo to_info;
- gboolean negotiated;
-
ColorSpaceColorSpec from_spec;
ColorSpaceColorSpec to_spec;
/* base transform vmethods */
static GstCaps *gst_video_scale_transform_caps (GstBaseTransform * trans,
GstPadDirection direction, GstCaps * caps, GstCaps * filter);
-static gboolean gst_video_scale_set_caps (GstBaseTransform * trans,
- GstCaps * in, GstCaps * out);
-static gboolean gst_video_scale_get_unit_size (GstBaseTransform * trans,
- GstCaps * caps, gsize * size);
-static gboolean gst_video_scale_propose_allocation (GstBaseTransform * trans,
- GstQuery * query);
-static gboolean gst_video_scale_decide_allocation (GstBaseTransform * trans,
- GstQuery * query);
-static GstFlowReturn gst_video_scale_transform (GstBaseTransform * trans,
- GstBuffer * in, GstBuffer * out);
static void gst_video_scale_fixate_caps (GstBaseTransform * base,
GstPadDirection direction, GstCaps * caps, GstCaps * othercaps);
+static gboolean gst_video_scale_set_info (GstVideoFilter * filter,
+ GstCaps * in, GstVideoInfo * in_info, GstCaps * out,
+ GstVideoInfo * out_info);
+static GstFlowReturn gst_video_scale_transform_frame (GstVideoFilter * filter,
+ GstVideoFrame * in, GstVideoFrame * out);
+
static void gst_video_scale_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec);
static void gst_video_scale_get_property (GObject * object, guint prop_id,
GObjectClass *gobject_class = (GObjectClass *) klass;
GstElementClass *element_class = (GstElementClass *) klass;
GstBaseTransformClass *trans_class = (GstBaseTransformClass *) klass;
+ GstVideoFilterClass *filter_class = (GstVideoFilterClass *) klass;
gobject_class->finalize = (GObjectFinalizeFunc) gst_video_scale_finalize;
gobject_class->set_property = gst_video_scale_set_property;
trans_class->transform_caps =
GST_DEBUG_FUNCPTR (gst_video_scale_transform_caps);
- trans_class->set_caps = GST_DEBUG_FUNCPTR (gst_video_scale_set_caps);
- trans_class->get_unit_size =
- GST_DEBUG_FUNCPTR (gst_video_scale_get_unit_size);
- trans_class->propose_allocation =
- GST_DEBUG_FUNCPTR (gst_video_scale_propose_allocation);
- trans_class->decide_allocation =
- GST_DEBUG_FUNCPTR (gst_video_scale_decide_allocation);
- trans_class->transform = GST_DEBUG_FUNCPTR (gst_video_scale_transform);
trans_class->fixate_caps = GST_DEBUG_FUNCPTR (gst_video_scale_fixate_caps);
trans_class->src_event = GST_DEBUG_FUNCPTR (gst_video_scale_src_event);
+
+ filter_class->set_info = GST_DEBUG_FUNCPTR (gst_video_scale_set_info);
+ filter_class->transform_frame =
+ GST_DEBUG_FUNCPTR (gst_video_scale_transform_frame);
}
static void
return ret;
}
-/* Answer the allocation query downstream. This is only called for
- * non-passthrough cases */
-static gboolean
-gst_video_scale_propose_allocation (GstBaseTransform * trans, GstQuery * query)
-{
- GstVideoScale *scale = GST_VIDEO_SCALE_CAST (trans);
- GstBufferPool *pool;
- GstCaps *caps;
- gboolean need_pool;
- guint size;
-
- gst_query_parse_allocation (query, &caps, &need_pool);
-
- size = GST_VIDEO_INFO_SIZE (&scale->from_info);
-
- if (need_pool) {
- GstStructure *structure;
-
- pool = gst_video_buffer_pool_new ();
-
- structure = gst_buffer_pool_get_config (pool);
- gst_buffer_pool_config_set (structure, caps, size, 0, 0, 0, 15);
- if (!gst_buffer_pool_set_config (pool, structure))
- goto config_failed;
- } else
- pool = NULL;
-
- gst_query_set_allocation_params (query, size, 0, 0, 0, 15, pool);
- gst_object_unref (pool);
-
- gst_query_add_allocation_meta (query, GST_VIDEO_META_API);
-
- return TRUE;
-
- /* ERRORS */
-config_failed:
- {
- GST_ERROR_OBJECT (scale, "failed to set config.");
- gst_object_unref (pool);
- return FALSE;
- }
-}
-
-/* configure the allocation query that was answered downstream, we can configure
- * some properties on it. Only called in passthrough mode. */
-static gboolean
-gst_video_scale_decide_allocation (GstBaseTransform * trans, GstQuery * query)
-{
- GstBufferPool *pool = NULL;
- guint size, min, max, prefix, alignment;
-
- gst_query_parse_allocation_params (query, &size, &min, &max, &prefix,
- &alignment, &pool);
-
- if (pool) {
- GstStructure *config;
-
- config = gst_buffer_pool_get_config (pool);
- gst_buffer_pool_config_add_option (config,
- GST_BUFFER_POOL_OPTION_VIDEO_META);
- gst_buffer_pool_set_config (pool, config);
- }
- return TRUE;
-}
-
static gboolean
-gst_video_scale_set_caps (GstBaseTransform * trans, GstCaps * in, GstCaps * out)
+gst_video_scale_set_info (GstVideoFilter * filter, GstCaps * in,
+ GstVideoInfo * in_info, GstCaps * out, GstVideoInfo * out_info)
{
- GstVideoScale *videoscale = GST_VIDEO_SCALE (trans);
- gboolean ret;
- GstVideoInfo in_info, out_info;
+ GstVideoScale *videoscale = GST_VIDEO_SCALE (filter);
gint from_dar_n, from_dar_d, to_dar_n, to_dar_d;
- ret = gst_video_info_from_caps (&in_info, in);
- ret &= gst_video_info_from_caps (&out_info, out);
- if (!ret)
- goto invalid_formats;
-
- if (!gst_util_fraction_multiply (in_info.width,
- in_info.height, out_info.par_n, out_info.par_d, &from_dar_n,
+ if (!gst_util_fraction_multiply (in_info->width,
+ in_info->height, out_info->par_n, out_info->par_d, &from_dar_n,
&from_dar_d)) {
from_dar_n = from_dar_d = -1;
}
- if (!gst_util_fraction_multiply (out_info.width,
- out_info.height, out_info.par_n, out_info.par_d, &to_dar_n,
+ if (!gst_util_fraction_multiply (out_info->width,
+ out_info->height, out_info->par_n, out_info->par_d, &to_dar_n,
&to_dar_d)) {
to_dar_n = to_dar_d = -1;
}
gint n, d, to_h, to_w;
if (from_dar_n != -1 && from_dar_d != -1
- && gst_util_fraction_multiply (from_dar_n, from_dar_d, out_info.par_n,
- out_info.par_d, &n, &d)) {
- to_h = gst_util_uint64_scale_int (out_info.width, d, n);
- if (to_h <= out_info.height) {
- videoscale->borders_h = out_info.height - to_h;
+ && gst_util_fraction_multiply (from_dar_n, from_dar_d,
+ out_info->par_n, out_info->par_d, &n, &d)) {
+ to_h = gst_util_uint64_scale_int (out_info->width, d, n);
+ if (to_h <= out_info->height) {
+ videoscale->borders_h = out_info->height - to_h;
videoscale->borders_w = 0;
} else {
- to_w = gst_util_uint64_scale_int (out_info.height, n, d);
- g_assert (to_w <= out_info.width);
+ to_w = gst_util_uint64_scale_int (out_info->height, n, d);
+ g_assert (to_w <= out_info->width);
videoscale->borders_h = 0;
- videoscale->borders_w = out_info.width - to_w;
+ videoscale->borders_w = out_info->width - to_w;
}
} else {
GST_WARNING_OBJECT (videoscale, "Can't calculate borders");
if (videoscale->tmp_buf)
g_free (videoscale->tmp_buf);
- videoscale->tmp_buf = g_malloc (out_info.width * 8 * 4);
+ videoscale->tmp_buf = g_malloc (out_info->width * 8 * 4);
- gst_base_transform_set_passthrough (trans,
- (in_info.width == out_info.width && in_info.height == out_info.height));
+ gst_base_transform_set_passthrough (GST_BASE_TRANSFORM (filter),
+ (in_info->width == out_info->width
+ && in_info->height == out_info->height));
GST_DEBUG_OBJECT (videoscale, "from=%dx%d (par=%d/%d dar=%d/%d), size %"
G_GSIZE_FORMAT " -> to=%dx%d (par=%d/%d dar=%d/%d borders=%d:%d), "
"size %" G_GSIZE_FORMAT,
- in_info.width, in_info.height, out_info.par_n, out_info.par_d,
- from_dar_n, from_dar_d, in_info.size, out_info.width,
- out_info.height, out_info.par_n, out_info.par_d, to_dar_n, to_dar_d,
- videoscale->borders_w, videoscale->borders_h, out_info.size);
-
- videoscale->from_info = in_info;
- videoscale->to_info = out_info;
-
- return TRUE;
-
- /* ERRORS */
-invalid_formats:
- {
- GST_DEBUG_OBJECT (videoscale, "could not parse formats");
- return FALSE;
- }
-}
-
-static gboolean
-gst_video_scale_get_unit_size (GstBaseTransform * trans, GstCaps * caps,
- gsize * size)
-{
- GstVideoInfo info;
-
- if (!gst_video_info_from_caps (&info, caps))
- return FALSE;
-
- *size = info.size;
+ in_info->width, in_info->height, out_info->par_n, out_info->par_d,
+ from_dar_n, from_dar_d, in_info->size, out_info->width,
+ out_info->height, out_info->par_n, out_info->par_d, to_dar_n, to_dar_d,
+ videoscale->borders_w, videoscale->borders_h, out_info->size);
return TRUE;
}
}
static GstFlowReturn
-gst_video_scale_transform (GstBaseTransform * trans, GstBuffer * in,
- GstBuffer * out)
+gst_video_scale_transform_frame (GstVideoFilter * filter,
+ GstVideoFrame * in_frame, GstVideoFrame * out_frame)
{
- GstVideoScale *videoscale = GST_VIDEO_SCALE (trans);
+ GstVideoScale *videoscale = GST_VIDEO_SCALE (filter);
GstFlowReturn ret = GST_FLOW_OK;
- GstVideoFrame in_frame, out_frame;
VSImage dest[4] = { {NULL,}, };
VSImage src[4] = { {NULL,}, };
gint method;
add_borders = videoscale->add_borders;
GST_OBJECT_UNLOCK (videoscale);
- format = GST_VIDEO_INFO_FORMAT (&videoscale->from_info);
+ format = GST_VIDEO_INFO_FORMAT (&filter->in_info);
black = _get_black_for_format (format);
- if (videoscale->from_info.width == 1) {
+ if (filter->in_info.width == 1) {
method = GST_VIDEO_SCALE_NEAREST;
}
if (method == GST_VIDEO_SCALE_4TAP &&
- (videoscale->from_info.width < 4 || videoscale->from_info.height < 4)) {
+ (filter->in_info.width < 4 || filter->in_info.height < 4)) {
method = GST_VIDEO_SCALE_BILINEAR;
}
- gst_video_frame_map (&in_frame, &videoscale->from_info, in, GST_MAP_READ);
- gst_video_frame_map (&out_frame, &videoscale->to_info, out, GST_MAP_WRITE);
-
- for (i = 0; i < GST_VIDEO_FRAME_N_PLANES (&in_frame); i++) {
- gst_video_scale_setup_vs_image (&src[i], &in_frame, i, 0, 0);
- gst_video_scale_setup_vs_image (&dest[i], &out_frame, i,
+ for (i = 0; i < GST_VIDEO_FRAME_N_PLANES (in_frame); i++) {
+ gst_video_scale_setup_vs_image (&src[i], in_frame, i, 0, 0);
+ gst_video_scale_setup_vs_image (&dest[i], out_frame, i,
videoscale->borders_w, videoscale->borders_h);
}
goto unsupported;
}
- GST_LOG_OBJECT (videoscale, "pushing buffer of %" G_GSIZE_FORMAT " bytes",
- gst_buffer_get_size (out));
-
-done:
- gst_video_frame_unmap (&out_frame);
- gst_video_frame_unmap (&in_frame);
-
return ret;
/* ERRORS */
{
GST_ELEMENT_ERROR (videoscale, STREAM, NOT_IMPLEMENTED, (NULL),
("Unsupported format %d for scaling method %d", format, method));
- ret = GST_FLOW_ERROR;
- goto done;
+ return GST_FLOW_ERROR;
}
unknown_mode:
{
GST_ELEMENT_ERROR (videoscale, STREAM, NOT_IMPLEMENTED, (NULL),
("Unknown scaling method %d", videoscale->method));
- ret = GST_FLOW_ERROR;
- goto done;
+ return GST_FLOW_ERROR;
}
}
static gboolean
gst_video_scale_src_event (GstBaseTransform * trans, GstEvent * event)
{
- GstVideoScale *videoscale = GST_VIDEO_SCALE (trans);
+ GstVideoScale *videoscale = GST_VIDEO_SCALE_CAST (trans);
+ GstVideoFilter *filter = GST_VIDEO_FILTER_CAST (trans);
gboolean ret;
gdouble a;
GstStructure *structure;
structure = (GstStructure *) gst_event_get_structure (event);
if (gst_structure_get_double (structure, "pointer_x", &a)) {
gst_structure_set (structure, "pointer_x", G_TYPE_DOUBLE,
- a * videoscale->from_info.width / videoscale->to_info.width, NULL);
+ a * filter->in_info.width / filter->out_info.width, NULL);
}
if (gst_structure_get_double (structure, "pointer_y", &a)) {
gst_structure_set (structure, "pointer_y", G_TYPE_DOUBLE,
- a * videoscale->from_info.height / videoscale->to_info.height,
- NULL);
+ a * filter->in_info.height / filter->out_info.height, NULL);
}
break;
default:
int submethod;
double envelope;
- /* negotiated stuff */
- GstVideoInfo from_info;
- GstVideoInfo to_info;
-
gint borders_h;
gint borders_w;