* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
- * @file convert2tensor.c
+ * @file tensor_converter.c
* @date 26 Mar 2018
* @brief GStreamer plugin to convert media types to tensors (as a filter for other general neural network filters)
* @see http://github.com/TO-BE-DETERMINED-SOON
*/
/**
- * SECTION:element-convert2tensor
+ * SECTION:element-tensor_converter
*
* A filter that converts media stream to tensor stream for NN frameworks.
* The output is always in the format of other/tensor
* <refsect2>
* <title>Example launch line</title>
* |[
- * gst-launch -v -m fakesrc ! convert2tensor ! fakesink silent=TRUE
+ * gst-launch -v -m fakesrc ! tensor_converter ! fakesink silent=TRUE
* ]|
* </refsect2>
*/
#include <glib.h>
#include <glib/gprintf.h>
-#include "convert2tensor.h"
+#include "tensor_converter.h"
-GST_DEBUG_CATEGORY_STATIC (gst_convert2tensor_debug);
-#define GST_CAT_DEFAULT gst_convert2tensor_debug
+GST_DEBUG_CATEGORY_STATIC (gst_tensor_converter_debug);
+#define GST_CAT_DEFAULT gst_tensor_converter_debug
/* Filter signals and args */
enum
"framerate = (fraction) [ 0/1, 2147483647/1 ]")
);
-#define gst_convert2tensor_parent_class parent_class
-G_DEFINE_TYPE (GstConvert2Tensor, gst_convert2tensor, GST_TYPE_BASE_TRANSFORM);
+#define gst_tensor_converter_parent_class parent_class
+G_DEFINE_TYPE (GstTensor_Converter, gst_tensor_converter, GST_TYPE_BASE_TRANSFORM);
-static void gst_convert2tensor_set_property (GObject * object, guint prop_id,
+static void gst_tensor_converter_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec);
-static void gst_convert2tensor_get_property (GObject * object, guint prop_id,
+static void gst_tensor_converter_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
/* GstBaseTransformer vmethod implementations */
-static GstFlowReturn gst_convert2tensor_transform(GstBaseTransform *trans,
+static GstFlowReturn gst_tensor_converter_transform(GstBaseTransform *trans,
GstBuffer *inbuf,
GstBuffer *outbuf);
-static GstFlowReturn gst_convert2tensor_transform_ip(GstBaseTransform *trans,
+static GstFlowReturn gst_tensor_converter_transform_ip(GstBaseTransform *trans,
GstBuffer *buf);
-static GstCaps* gst_convert2tensor_transform_caps(GstBaseTransform *trans,
+static GstCaps* gst_tensor_converter_transform_caps(GstBaseTransform *trans,
GstPadDirection direction,
GstCaps *caps,
GstCaps *filter);
-static GstCaps* gst_convert2tensor_fixate_caps(GstBaseTransform *trans,
+static GstCaps* gst_tensor_converter_fixate_caps(GstBaseTransform *trans,
GstPadDirection direction,
GstCaps *caps,
GstCaps *othercaps);
-static gboolean gst_convert2tensor_set_caps(GstBaseTransform *trans,
+static gboolean gst_tensor_converter_set_caps(GstBaseTransform *trans,
GstCaps *incaps,
GstCaps *outcaps);
/* GObject vmethod implementations */
-/* initialize the convert2tensor's class */
+/* initialize the tensor_converter's class */
static void
-gst_convert2tensor_class_init (GstConvert2TensorClass * g_class)
+gst_tensor_converter_class_init (GstTensor_ConverterClass * g_class)
{
GObjectClass *gobject_class;
GstElementClass *gstelement_class;
GstBaseTransformClass *trans_class;
- GstConvert2TensorClass *klass;
+ GstTensor_ConverterClass *klass;
- klass = (GstConvert2TensorClass *) g_class;
+ klass = (GstTensor_ConverterClass *) g_class;
trans_class = (GstBaseTransformClass *) klass;
gstelement_class = (GstElementClass *) trans_class;
gobject_class = (GObjectClass *) gstelement_class;
- gobject_class->set_property = gst_convert2tensor_set_property;
- gobject_class->get_property = gst_convert2tensor_get_property;
+ gobject_class->set_property = gst_tensor_converter_set_property;
+ gobject_class->get_property = gst_tensor_converter_get_property;
g_object_class_install_property (gobject_class, PROP_SILENT,
g_param_spec_boolean ("silent", "Silent", "Produce verbose output ?",
FALSE, G_PARAM_READWRITE));
gst_element_class_set_details_simple(gstelement_class,
- "Convert2Tensor",
+ "Tensor_Converter",
"Convert media stream to tensor stream",
"Converts audio or video stream to tensor stream for neural network framework filters",
"MyungJoo Ham <myungjoo.ham@samsung.com>");
trans_class->passthrough_on_same_caps = FALSE;
/* Processing units */
- trans_class->transform = GST_DEBUG_FUNCPTR(gst_convert2tensor_transform);
- trans_class->transform_ip = GST_DEBUG_FUNCPTR(gst_convert2tensor_transform_ip);
+ trans_class->transform = GST_DEBUG_FUNCPTR(gst_tensor_converter_transform);
+ trans_class->transform_ip = GST_DEBUG_FUNCPTR(gst_tensor_converter_transform_ip);
/* Negotiation units */
- trans_class->transform_caps = GST_DEBUG_FUNCPTR(gst_convert2tensor_transform_caps);
- trans_class->fixate_caps = GST_DEBUG_FUNCPTR(gst_convert2tensor_fixate_caps);
- trans_class->set_caps = GST_DEBUG_FUNCPTR(gst_convert2tensor_set_caps);
+ trans_class->transform_caps = GST_DEBUG_FUNCPTR(gst_tensor_converter_transform_caps);
+ trans_class->fixate_caps = GST_DEBUG_FUNCPTR(gst_tensor_converter_fixate_caps);
+ trans_class->set_caps = GST_DEBUG_FUNCPTR(gst_tensor_converter_set_caps);
/** Allocation units
* transform_size and get_unit_size are omitted because we do not change
* initialize instance structure
*/
static void
-gst_convert2tensor_init (GstConvert2Tensor * filter)
+gst_tensor_converter_init (GstTensor_Converter * filter)
{
filter->silent = FALSE;
filter->tensorConfigured = FALSE;
}
static void
-gst_convert2tensor_set_property (GObject * object, guint prop_id,
+gst_tensor_converter_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec)
{
- GstConvert2Tensor *filter = GST_CONVERT2TENSOR (object);
+ GstTensor_Converter *filter = GST_TENSOR_CONVERTER (object);
switch (prop_id) {
case PROP_SILENT:
}
static void
-gst_convert2tensor_get_property (GObject * object, guint prop_id,
+gst_tensor_converter_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec)
{
- GstConvert2Tensor *filter = GST_CONVERT2TENSOR (object);
+ GstTensor_Converter *filter = GST_TENSOR_CONVERTER (object);
switch (prop_id) {
case PROP_SILENT:
;
/* Configure tensor metadata from sink caps */
static gboolean
-gst_convert2tensor_configure_tensor(const GstCaps *caps, GstConvert2Tensor *filter) {
+gst_tensor_converter_configure_tensor(const GstCaps *caps, GstTensor_Converter *filter) {
GstStructure *structure;
gint rank;
- gint dimension[GST_CONVERT2TENSOR_TENSOR_RANK_LIMIT];
+ gint dimension[GST_TENSOR_CONVERTER_TENSOR_RANK_LIMIT];
tensor_type type;
gint framerate_numerator;
gint framerate_denominator;
}
dimension[3] = 1; /* This is 3-D Tensor */
- tensorFrameSize = GstConvert2TensorDataSize[type] * dimension[0] * dimension[1] * dimension[2] * dimension[3];
+ tensorFrameSize = GstTensor_ConverterDataSize[type] * dimension[0] * dimension[1] * dimension[2] * dimension[3];
/* Refer: https://gstreamer.freedesktop.org/documentation/design/mediatype-video-raw.html */
if (filter->tensorConfigured == TRUE) {
framerate_numerator == filter->framerate_numerator &&
tensorFrameSize == filter->tensorFrameSize &&
framerate_denominator == filter->framerate_denominator) {
- for (i = 0; i < GST_CONVERT2TENSOR_TENSOR_RANK_LIMIT; i++)
+ for (i = 0; i < GST_TENSOR_CONVERTER_TENSOR_RANK_LIMIT; i++)
if (dimension[i] != filter->dimension[i]) {
g_printerr(" Dimension %d Mismatch with cached: %d --> %d\n", i, dimension[i], filter->dimension[i]);
return FALSE;
}
filter->rank = rank;
- for (i = 0; i < GST_CONVERT2TENSOR_TENSOR_RANK_LIMIT; i++)
+ for (i = 0; i < GST_TENSOR_CONVERTER_TENSOR_RANK_LIMIT; i++)
filter->dimension[i] = dimension[i];
filter->type = type;
filter->framerate_numerator = framerate_numerator;
* register the element factories and other features
*/
static gboolean
-convert2tensor_init (GstPlugin * convert2tensor)
+tensor_converter_init (GstPlugin * tensor_converter)
{
/* debug category for fltering log messages
*
- * exchange the string 'Template convert2tensor' with your description
+ * exchange the string 'Template tensor_converter' with your description
*/
- GST_DEBUG_CATEGORY_INIT (gst_convert2tensor_debug, "convert2tensor",
- 0, "Template convert2tensor");
+ GST_DEBUG_CATEGORY_INIT (gst_tensor_converter_debug, "tensor_converter",
+ 0, "Template tensor_converter");
- return gst_element_register (convert2tensor, "convert2tensor", GST_RANK_NONE,
- GST_TYPE_CONVERT2TENSOR);
+ return gst_element_register (tensor_converter, "tensor_converter", GST_RANK_NONE,
+ GST_TYPE_TENSOR_CONVERTER);
}
/* PACKAGE: this is usually set by autotools depending on some _INIT macro
* compile this code. GST_PLUGIN_DEFINE needs PACKAGE to be defined.
*/
#ifndef PACKAGE
-#define PACKAGE "convert2tensor"
+#define PACKAGE "tensor_converter"
#endif
-/* gstreamer looks for this structure to register convert2tensors
+/* gstreamer looks for this structure to register tensor_converters
*
- * exchange the string 'Template convert2tensor' with your convert2tensor description
+ * exchange the string 'Template tensor_converter' with your tensor_converter description
*/
GST_PLUGIN_DEFINE (
GST_VERSION_MAJOR,
GST_VERSION_MINOR,
- convert2tensor,
- "convert2tensor",
- convert2tensor_init,
+ tensor_converter,
+ "tensor_converter",
+ tensor_converter_init,
VERSION,
"LGPL",
"GStreamer",
"http://gstreamer.net/"
)
-static GstFlowReturn gst_c2t_transformer_videoframe(GstConvert2Tensor *filter,
+static GstFlowReturn gst_c2t_transformer_videoframe(GstTensor_Converter *filter,
GstVideoFrame *inframe, GstBuffer *outbuf)
{
return gst_buffer_copy_into(outbuf, inframe->buffer,
GST_VIDEO_FRAME_SIZE(inframe));
}
-static GstFlowReturn gst_convert2tensor_transform(GstBaseTransform *trans,
+static GstFlowReturn gst_tensor_converter_transform(GstBaseTransform *trans,
GstBuffer *inbuf,
GstBuffer *outbuf)
{
GstVideoFrame in_frame;
GstFlowReturn res;
- GstConvert2Tensor *filter = GST_CONVERT2TENSOR_CAST(trans);
+ GstTensor_Converter *filter = GST_TENSOR_CONVERTER_CAST(trans);
if (G_UNLIKELY(!filter->negotiated))
goto unknown_format;
return GST_FLOW_ERROR;
}
-static GstFlowReturn gst_convert2tensor_transform_ip(GstBaseTransform *trans,
+static GstFlowReturn gst_tensor_converter_transform_ip(GstBaseTransform *trans,
GstBuffer *buf)
{
/* DO NOTHING. THIS WORKS AS A PASSTHROUGH. We just remove metadata from video */
}
/**
- * gst_convert2tensor_transform_caps() - configure tensor-srcpad cap from "proposed" cap.
+ * gst_tensor_converter_transform_caps() - configure tensor-srcpad cap from "proposed" cap.
*
* @trans ("this" pointer)
* @direction (why do we need this?)
* @caps sinkpad cap
* @filter this element's cap (don't know specifically.)
*/
-static GstCaps* gst_convert2tensor_transform_caps(GstBaseTransform *trans,
+static GstCaps* gst_tensor_converter_transform_caps(GstBaseTransform *trans,
GstPadDirection direction,
GstCaps *caps,
GstCaps *filter)
{
GstCaps *tmp;
gboolean ret;
- GstConvert2Tensor bogusFilter = {0};
+ GstTensor_Converter bogusFilter = {0};
bogusFilter.tensorConfigured = FALSE;
/* @TODO: Verify if direction == GST_PAD_SINK means caps is sink pad */
/* @TODO CRITICAL: Handle when caps is in range, not fixed */
/* Construct bogusFilter from caps (sinkpad) */
- ret = gst_convert2tensor_configure_tensor(caps, &bogusFilter);
+ ret = gst_tensor_converter_configure_tensor(caps, &bogusFilter);
if (ret == FALSE) {
GstStructure *structure = gst_caps_get_structure(caps, 0);
gchar *str = gst_structure_to_string(structure);
"dim2", G_TYPE_INT, bogusFilter.dimension[1],
"dim3", G_TYPE_INT, bogusFilter.dimension[2],
"dim4", G_TYPE_INT, bogusFilter.dimension[3],
- "type", G_TYPE_STRING, GstConvert2TensorDataTypeName[bogusFilter.type],
+ "type", G_TYPE_STRING, GstTensor_ConverterDataTypeName[bogusFilter.type],
"framerate", GST_TYPE_FRACTION, bogusFilter.framerate_numerator,
bogusFilter.framerate_denominator,
NULL);
return NULL;
}
-static GstCaps* gst_convert2tensor_fixate_caps(GstBaseTransform *trans,
+static GstCaps* gst_tensor_converter_fixate_caps(GstBaseTransform *trans,
GstPadDirection direction,
GstCaps *caps,
GstCaps *othercaps)
{
- GstCaps *supposed = gst_convert2tensor_transform_caps(trans, direction, caps, NULL);
+ GstCaps *supposed = gst_tensor_converter_transform_caps(trans, direction, caps, NULL);
GstCaps *result;
GST_DEBUG_OBJECT (trans, "trying to fixate othercaps %" GST_PTR_FORMAT
return result;
}
-static gboolean gst_convert2tensor_set_caps(GstBaseTransform *trans,
+static gboolean gst_tensor_converter_set_caps(GstBaseTransform *trans,
GstCaps *incaps,
GstCaps *outcaps)
{
/** This is notifier of cap changes for subclass.
* However, we do not have subclass (This is the concrete class)
*/
- GstConvert2Tensor *filter = GST_CONVERT2TENSOR_CAST(trans);
+ GstTensor_Converter *filter = GST_TENSOR_CONVERTER_CAST(trans);
GstVideoInfo in_info, out_info;
GST_DEBUG_OBJECT (trans, "converting from %" GST_PTR_FORMAT
filter->in_info.video = in_info;
gst_base_transform_set_in_place(trans, TRUE);
- filter->negotiated = gst_convert2tensor_configure_tensor(incaps, filter);
+ filter->negotiated = gst_tensor_converter_configure_tensor(incaps, filter);
/* @TODO Verity if outcaps and filter conf are compatible */
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
- * @file convert2tensor.c
+ * @file tensor_converter.c
* @date 26 Mar 2018
* @brief GStreamer plugin to convert media types to tensors (as a filter for other general neural network filters)
*
*
*/
-#ifndef __GST_CONVERT2TENSOR_H__
-#define __GST_CONVERT2TENSOR_H__
+#ifndef __GST_TENSOR_CONVERTER_H__
+#define __GST_TENSOR_CONVERTER_H__
#include <gst/gst.h>
#include <gst/base/gstbasetransform.h>
G_BEGIN_DECLS
/* #defines don't like whitespacey bits */
-#define GST_TYPE_CONVERT2TENSOR \
- (gst_convert2tensor_get_type())
-#define GST_CONVERT2TENSOR(obj) \
- (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_CONVERT2TENSOR,GstConvert2Tensor))
-#define GST_CONVERT2TENSOR_CLASS(klass) \
- (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_CONVERT2TENSOR,GstConvert2TensorClass))
-#define GST_IS_CONVERT2TENSOR(obj) \
- (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_CONVERT2TENSOR))
-#define GST_IS_CONVERT2TENSOR_CLASS(klass) \
- (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_CONVERT2TENSOR))
-#define GST_CONVERT2TENSOR_CAST(obj) ((GstConvert2Tensor *)(obj))
-
-typedef struct _GstConvert2Tensor GstConvert2Tensor;
-
-typedef struct _GstConvert2TensorClass GstConvert2TensorClass;
-
-#define GST_CONVERT2TENSOR_TENSOR_RANK_LIMIT (4)
+#define GST_TYPE_TENSOR_CONVERTER \
+ (gst_tensor_converter_get_type())
+#define GST_TENSOR_CONVERTER(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_TENSOR_CONVERTER,GstTensor_Converter))
+#define GST_TENSOR_CONVERTER_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_TENSOR_CONVERTER,GstTensor_ConverterClass))
+#define GST_IS_TENSOR_CONVERTER(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_TENSOR_CONVERTER))
+#define GST_IS_TENSOR_CONVERTER_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_TENSOR_CONVERTER))
+#define GST_TENSOR_CONVERTER_CAST(obj) ((GstTensor_Converter *)(obj))
+
+typedef struct _GstTensor_Converter GstTensor_Converter;
+
+typedef struct _GstTensor_ConverterClass GstTensor_ConverterClass;
+
+#define GST_TENSOR_CONVERTER_TENSOR_RANK_LIMIT (4)
/**
* @brief Possible data element types of other/tensor.
*
/**
* @brief Internal data structure for tensor_converter instances.
*/
-struct _GstConvert2Tensor
+struct _GstTensor_Converter
{
GstBaseTransform element; /**< This is the parent object */
gboolean silent; /**< True if logging is minimized */
gboolean tensorConfigured; /**< True if already successfully configured tensor metadata */
gint rank; /**< Tensor Rank (# dimensions) */
- gint dimension[GST_CONVERT2TENSOR_TENSOR_RANK_LIMIT]; /**< Dimensions. We support up to 4th ranks. **/
+ gint dimension[GST_TENSOR_CONVERTER_TENSOR_RANK_LIMIT]; /**< Dimensions. We support up to 4th ranks. **/
tensor_type type; /**< Type of each element in the tensor. User must designate this. Otherwise, this is UINT8 for video/x-raw byte stream */
gint framerate_numerator; /**< framerate is in fraction, which is numerator/denominator */
gint framerate_denominator; /**< framerate is in fraction, which is numerator/denominator */
/**
* @brief Byte-per-element of each tensor element type.
*/
-const unsigned int GstConvert2TensorDataSize[] = {
+const unsigned int GstTensor_ConverterDataSize[] = {
[_C2T_INT32] = 4,
[_C2T_UINT32] = 4,
[_C2T_INT16] = 2,
/**
* @brief String representations for each tensor element type.
*/
-const gchar* GstConvert2TensorDataTypeName[] = {
+const gchar* GstTensor_ConverterDataTypeName[] = {
[_C2T_INT32] = "int32",
[_C2T_UINT32] = "uint32",
[_C2T_INT16] = "int16",
};
/*
- * @brief GstConvert2TensorClass inherits GstBaseTransformClass.
+ * @brief GstTensor_ConverterClass inherits GstBaseTransformClass.
*
* Referring another child (sibiling), GstVideoFilter (abstract class) and
* its child (concrete class) GstVideoConverter.
- * Note that GstConvert2TensorClass is a concrete class; thus we need to look at both.
+ * Note that GstTensor_ConverterClass is a concrete class; thus we need to look at both.
*/
-struct _GstConvert2TensorClass
+struct _GstTensor_ConverterClass
{
GstBaseTransformClass parent_class; /**< Inherits GstBaseTransformClass */
};
/*
* @brief Get Type function required for gst elements
*/
-GType gst_convert2tensor_get_type (void);
+GType gst_tensor_converter_get_type (void);
G_END_DECLS
-#endif /* __GST_CONVERT2TENSOR_H__ */
+#endif /* __GST_TENSOR_CONVERTER_H__ */