From: MyungJoo Ham Date: Wed, 28 Mar 2018 06:38:24 +0000 (+0900) Subject: [Convert] Implement Processing part from the document X-Git-Tag: v0.0.1~228 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=c9230c0fdd71cf1366049e4e4c18bfbf7b3e46c3;p=platform%2Fupstream%2Fnnstreamer.git [Convert] Implement Processing part from the document From the document, https://gstreamer.freedesktop.org/documentation/design/element-transform.html implement "Processing" part. Signed-off-by: MyungJoo Ham --- diff --git a/convert2tensor/convert2tensor.c b/convert2tensor/convert2tensor.c index 187abed..2b14d34 100644 --- a/convert2tensor/convert2tensor.c +++ b/convert2tensor/convert2tensor.c @@ -95,7 +95,7 @@ enum static GstStaticPadTemplate sink_factory = GST_STATIC_PAD_TEMPLATE ("sink", GST_PAD_SINK, GST_PAD_ALWAYS, - GST_STATIC_CAPS ("video/x-raw, format = (string)RGB") + GST_STATIC_CAPS ("video/x-raw, format = (string)RGB, views = (int)1, interlace-mode = (string)progressive") ); /* the capabilities of the outputs @@ -261,6 +261,7 @@ gst_convert2tensor_configure_tensor(const GstCaps *caps, GstConvert2Tensor *filt tensor_type type; gint framerate_numerator; gint framerate_denominator; + gsize tensorFrameSize; gboolean ret; GstCaps *outcaps; int i; @@ -274,6 +275,7 @@ gst_convert2tensor_configure_tensor(const GstCaps *caps, GstConvert2Tensor *filt type = _C2T_UINT8; /* Assume color depth per component is 8 bit */ dimension[2] = 3; /* R G B */ dimension[3] = 1; /* This is 3-D Tensor */ + tensorFrameSize = GstConvert2TensorDataSize[type] * dimension[0] * dimension[1] * dimension[2] * dimension[3]; /* Refer: https://gstreamer.freedesktop.org/documentation/design/mediatype-video-raw.html */ if (filter->tensorConfigured == TRUE) { @@ -281,6 +283,7 @@ gst_convert2tensor_configure_tensor(const GstCaps *caps, GstConvert2Tensor *filt if (rank == filter->rank && type == filter->type && framerate_numerator == filter->framerate_numerator && + tensorFrameSize == filter->tensorFrameSize && framerate_denominator == filter->framerate_denominator) { for (i = 0; i < GST_CONVERT2TENSOR_TENSOR_RANK_LIMIT; i++) if (dimension[i] != filter->dimension[i]) @@ -297,6 +300,7 @@ gst_convert2tensor_configure_tensor(const GstCaps *caps, GstConvert2Tensor *filt filter->type = type; filter->framerate_numerator = framerate_numerator; filter->framerate_denominator = framerate_denominator; + filter->tensorFrameSize = tensorFrameSize; filter->tensorConfigured = TRUE; return TRUE; @@ -346,16 +350,72 @@ GST_PLUGIN_DEFINE ( "http://gstreamer.net/" ) +static GstFlowReturn gst_c2t_transformer_videoframe(GstConvert2Tensor *filter, + GstVideoFrame *inframe, GstBuffer *outbuf) +{ + /* @TODO RGB are interlaced! FIXME! + @TODO Width has stride-4. FIXME! + */ + return gst_buffer_copy_into(outbuf, inframe->buffer, + GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS, 0, + GST_VIDEO_FRAME_SIZE(inframe)); +} static GstFlowReturn gst_convert2tensor_transform(GstBaseTransform *trans, GstBuffer *inbuf, GstBuffer *outbuf) { + GstVideoFrame in_frame; + GstFlowReturn res; + GstConvert2Tensor *filter = GST_CONVERT2TENSOR_CAST(trans); + + if (G_UNLIKELY(!filter->negotiated)) + goto unknown_format; + if (G_UNLIKELY(!filter->tensorConfigured)) + goto unknown_tensor; + + switch(filter->input_media_type) { + case _C2T_VIDEO: + // CAUTION! in_info.video must be already configured! + if (!gst_video_frame_map(&in_frame, &filter->in_info.video, inbuf, + GST_MAP_READ | GST_VIDEO_FRAME_MAP_FLAG_NO_REF)) + goto invalid_buffer; + + if (gst_c2t_transformer_videoframe(filter, &in_frame, outbuf)) + res = GST_FLOW_OK; + else + res = GST_FLOW_ERROR; + gst_video_frame_unmap(&in_frame); + break; + /* NOT SUPPORTED */ + case _C2T_AUDIO: + case _C2T_STRING: + default: + g_printerr(" Unsupported Media Type (%d)\n", filter->input_media_type); + goto unknown_type; + } + + return res; + +unknown_format: + GST_ELEMENT_ERROR(filter, CORE, NOT_IMPLEMENTED, (NULL), ("unknown format")); + return GST_FLOW_NOT_NEGOTIATED; +unknown_tensor: + GST_ELEMENT_ERROR(filter, CORE, NOT_IMPLEMENTED, (NULL), ("unknown format for tensor")); + return GST_FLOW_NOT_NEGOTIATED; +unknown_type: + GST_ELEMENT_ERROR(filter, CORE, NOT_IMPLEMENTED, (NULL), ("not implemented type of media")); + return GST_FLOW_NOT_SUPPORTED; +invalid_buffer: + GST_ELEMENT_ERROR(filter, CORE, NOT_IMPLEMENTED, (NULL), ("invalid video buffer received from input")); + return GST_FLOW_ERROR; } static GstFlowReturn gst_convert2tensor_transform_ip(GstBaseTransform *trans, GstBuffer *buf) { + /* DO NOTHING. THIS WORKS AS A PASSTHROUGH. We just remove metadata from video */ + return GST_FLOW_OK; } static GstCaps* gst_convert2tensor_transform_caps(GstBaseTransform *trans, diff --git a/convert2tensor/convert2tensor.h b/convert2tensor/convert2tensor.h index b158c2f..fa43b02 100644 --- a/convert2tensor/convert2tensor.h +++ b/convert2tensor/convert2tensor.h @@ -45,6 +45,12 @@ * @file convert2tensor.c * @date 26 Mar 2018 * @brief GStreamer plugin to convert media types to tensors (as a filter for other general neural network filters) + * + * Be careful: this filter assumes that the user has attached + * rawvideoparser as a preprocessor for this filter so that + * the incoming buffer is nicely aligned in the array of + * uint8[RGB][height][width]. + * * @see http://github.com/TO-BE-DETERMINED-SOON * @author MyungJoo Ham * @@ -55,6 +61,8 @@ #include #include +#include +#include G_BEGIN_DECLS @@ -77,28 +85,48 @@ typedef struct _GstConvert2TensorClass GstConvert2TensorClass; #define GST_CONVERT2TENSOR_TENSOR_RANK_LIMIT (4) typedef enum _tensor_type { - _C2T_INT32 = 0, - _C2T_UINT32, - _C2T_INT16, - _C2T_UINT16, - _C2T_INT8, - _C2T_UINT8, - _C2T_FLOAT64, - _C2T_FLOAT32, + _C2T_INT32 = 0, + _C2T_UINT32, + _C2T_INT16, + _C2T_UINT16, + _C2T_INT8, + _C2T_UINT8, + _C2T_FLOAT64, + _C2T_FLOAT32, - _C2T_END, + _C2T_END, } tensor_type; +typedef enum _media_type { + _C2T_VIDEO = 0, + _C2T_AUDIO, /* Not Supported Yet */ + _C2T_STRING, /* Not Supported Yet */ + + _C2T_MEDIA_END, +} media_type; struct _GstConvert2Tensor { GstBaseTransform element; /**< This is the parent object */ + /* For transformer */ + gboolean negotiated; /* When this is %TRUE, tensor metadata must be set */ + media_type input_media_type; + union { + GstVideoInfo video; + /* @TODO: Add other media types */ + } in_info; + + /* For Tensor */ gboolean silent; /**< True if logging is minimized */ gboolean tensorConfigured; /**< True if already successfully configured tensor metadata */ gint rank; /**< Tensor Rank (# dimensions) */ - gint dimension[GST_CONVERT2TENSOR_TENSOR_RANK_LIMIT]; /**< Dimensions. We support up to 4th ranks */ + gint dimension[GST_CONVERT2TENSOR_TENSOR_RANK_LIMIT]; + /**< Dimensions. We support up to 4th ranks. + * @caution The first dimension is always 4 x N. + **/ tensor_type type; /**< Type of each element in the tensor. User must designate this. Otherwise, this is UINT8 for video/x-raw byte stream */ gint framerate_numerator; /**< framerate is in fraction, which is numerator/denominator */ gint framerate_denominator; /**< framerate is in fraction, which is numerator/denominator */ + gsize tensorFrameSize; }; const unsigned int GstConvert2TensorDataSize[] = { [_C2T_INT32] = 4, @@ -121,7 +149,13 @@ const gchar* GstConvert2TensorDataTypeName[] = { [_C2T_FLOAT32] = "float32", }; - +/* + * GstConvert2TensorClass inherits GstBaseTransformClass. + * + * Referring another child (sibiling), GstVideoFilter (abstract class) and + * its child (concrete class) GstVideoConverter. + * Note that GstConvert2TensorClass is a concrete class; thus we need to look at both. + */ struct _GstConvert2TensorClass { GstBaseTransformClass parent_class;