From: MyungJoo Ham Date: Tue, 29 May 2018 02:35:49 +0000 (+0900) Subject: [Convert] Use common header. Remove redundant content X-Git-Tag: v0.0.1~201 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=27bee89a804c5d550b6e2a66f7a1e5fb9cc050c3;p=platform%2Fupstream%2Fnnstreamer.git [Convert] Use common header. Remove redundant content Use for common data types. Signed-off-by: MyungJoo Ham --- diff --git a/CMakeLists.txt b/CMakeLists.txt index c60095c..0576337 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -42,5 +42,10 @@ FOREACH(flag ${pkgs_CFALGS}) ENDFOREACH(flag) SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${EXTRA_CFLAGS} -Wall -Werror ") +# Provide common data +ADD_LIBRARY(common STATIC common/tensor_common.c) +TARGET_INCLUDE_DIRECTORIES(common PUBLIC ${pkgs_INCLUDE_DIRS}) +SET(pkgs_LIBRARIES ${pkgs_LIBRARIES} common) + ADD_SUBDIRECTORY(tensor_converter) ADD_SUBDIRECTORY(tensor_filter) diff --git a/common/tensor_common.c b/common/tensor_common.c new file mode 100644 index 0000000..7ac48a7 --- /dev/null +++ b/common/tensor_common.c @@ -0,0 +1,66 @@ +/* + * NNStreamer Common Header's Contents + * Copyright (C) 2018 MyungJoo Ham + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Alternatively, the contents of this file may be used under the + * GNU Lesser General Public License Version 2.1 (the "LGPL"), in + * which case the following provisions apply instead of the ones + * mentioned above: + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + * + * @file tensor_common.c + * @date 29 May 2018 + * @brief Common data for NNStreamer, the GStreamer plugin for neural networks + * @see http://github.com/TO-BE-DETERMINED-SOON + * @see https://github.sec.samsung.net/STAR/nnstreamer + * @author MyungJoo Ham + * + */ + +#include + +/** + * @brief String representations for each tensor element type. + */ +const gchar* tensor_element_typename[] = { + [_NNS_INT32] = "int32", + [_NNS_UINT32] = "uint32", + [_NNS_INT16] = "int16", + [_NNS_UINT16] = "uint16", + [_NNS_INT8] = "int8", + [_NNS_UINT8] = "uint8", + [_NNS_FLOAT64] = "float64", + [_NNS_FLOAT32] = "float32", +}; diff --git a/include/tensor_common.h b/include/tensor_common.h index a9e903f..199b967 100644 --- a/include/tensor_common.h +++ b/include/tensor_common.h @@ -74,7 +74,7 @@ typedef enum _nns_tensor_type { _NNS_FLOAT32, _NNS_END, -} nns_tensor_type; +} tensor_type; /** * @brief Possible input stream types for other/tensor. @@ -88,12 +88,12 @@ typedef enum _nns_media_type { _NNS_STRING, /* Not Supported Yet */ _NNS_MEDIA_END, -} nns_media_type; +} media_type; /** * @brief Byte-per-element of each tensor element type. */ -static const unsigned int nns_tensor_element_size[] = { +static const unsigned int tensor_element_size[] = { [_NNS_INT32] = 4, [_NNS_UINT32] = 4, [_NNS_INT16] = 2, @@ -107,16 +107,7 @@ static const unsigned int nns_tensor_element_size[] = { /** * @brief String representations for each tensor element type. */ -static const gchar* nns_tensor_element_typename[] = { - [_NNS_INT32] = "int32", - [_NNS_UINT32] = "uint32", - [_NNS_INT16] = "int16", - [_NNS_UINT16] = "uint16", - [_NNS_INT8] = "int8", - [_NNS_UINT8] = "uint8", - [_NNS_FLOAT64] = "float64", - [_NNS_FLOAT32] = "float32", -}; +extern const gchar* tensor_element_typename[]; G_END_DECLS diff --git a/tensor_converter/tensor_converter.c b/tensor_converter/tensor_converter.c index 5ddd3a2..764b057 100644 --- a/tensor_converter/tensor_converter.c +++ b/tensor_converter/tensor_converter.c @@ -257,7 +257,7 @@ static gboolean gst_tensor_converter_configure_tensor(const GstCaps *caps, GstTensor_Converter *filter) { GstStructure *structure; gint rank; - gint dimension[GST_TENSOR_CONVERTER_TENSOR_RANK_LIMIT]; + gint dimension[NNS_TENSOR_RANK_LIMIT]; tensor_type type; gint framerate_numerator; gint framerate_denominator; @@ -273,7 +273,7 @@ gst_tensor_converter_configure_tensor(const GstCaps *caps, GstTensor_Converter * return_false_if_fail(gst_structure_get_int(structure, "width", &dimension[1])); return_false_if_fail(gst_structure_get_int(structure, "height", &dimension[2])); return_false_if_fail(gst_structure_get_fraction(structure, "framerate", &framerate_numerator, &framerate_denominator)); - type = _C2T_UINT8; /* Assume color depth per component is 8 bit */ + type = _NNS_UINT8; /* Assume color depth per component is 8 bit */ if (dimension[1] % 4) { g_print(" Width(dim2) is not divisible with 4. Width is adjusted %d -> %d\n", dimension[1], (dimension[1] + 3) / 4 * 4); @@ -292,7 +292,7 @@ gst_tensor_converter_configure_tensor(const GstCaps *caps, GstTensor_Converter * } dimension[3] = 1; /* This is 3-D Tensor */ - tensorFrameSize = GstTensor_ConverterDataSize[type] * dimension[0] * dimension[1] * dimension[2] * dimension[3]; + tensorFrameSize = tensor_element_size[type] * dimension[0] * dimension[1] * dimension[2] * dimension[3]; /* Refer: https://gstreamer.freedesktop.org/documentation/design/mediatype-video-raw.html */ if (filter->tensorConfigured == TRUE) { @@ -302,7 +302,7 @@ gst_tensor_converter_configure_tensor(const GstCaps *caps, GstTensor_Converter * framerate_numerator == filter->framerate_numerator && tensorFrameSize == filter->tensorFrameSize && framerate_denominator == filter->framerate_denominator) { - for (i = 0; i < GST_TENSOR_CONVERTER_TENSOR_RANK_LIMIT; i++) + for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) if (dimension[i] != filter->dimension[i]) { g_printerr(" Dimension %d Mismatch with cached: %d --> %d\n", i, dimension[i], filter->dimension[i]); return FALSE; @@ -314,7 +314,7 @@ gst_tensor_converter_configure_tensor(const GstCaps *caps, GstTensor_Converter * } filter->rank = rank; - for (i = 0; i < GST_TENSOR_CONVERTER_TENSOR_RANK_LIMIT; i++) + for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) filter->dimension[i] = dimension[i]; filter->type = type; filter->framerate_numerator = framerate_numerator; @@ -324,7 +324,7 @@ gst_tensor_converter_configure_tensor(const GstCaps *caps, GstTensor_Converter * filter->tensorConfigured = TRUE; /* @TODO Support other types */ - filter->input_media_type = _C2T_VIDEO; + filter->input_media_type = _NNS_VIDEO; return TRUE; } @@ -394,7 +394,7 @@ static GstFlowReturn gst_tensor_converter_transform(GstBaseTransform *trans, goto unknown_tensor; switch(filter->input_media_type) { - case _C2T_VIDEO: + case _NNS_VIDEO: // CAUTION! in_info.video must be already configured! if (!gst_video_frame_map(&in_frame, &filter->in_info.video, inbuf, GST_MAP_READ | GST_VIDEO_FRAME_MAP_FLAG_NO_REF)) @@ -407,8 +407,8 @@ static GstFlowReturn gst_tensor_converter_transform(GstBaseTransform *trans, gst_video_frame_unmap(&in_frame); break; /* NOT SUPPORTED */ - case _C2T_AUDIO: - case _C2T_STRING: + case _NNS_AUDIO: + case _NNS_STRING: default: g_printerr(" Unsupported Media Type (%d)\n", filter->input_media_type); goto unknown_type; @@ -533,7 +533,7 @@ static GstCaps* gst_tensor_converter_transform_caps(GstBaseTransform *trans, "dim2", G_TYPE_INT, bogusFilter.dimension[1], "dim3", G_TYPE_INT, bogusFilter.dimension[2], "dim4", G_TYPE_INT, bogusFilter.dimension[3], - "type", G_TYPE_STRING, GstTensor_ConverterDataTypeName[bogusFilter.type], + "type", G_TYPE_STRING, tensor_element_typename[bogusFilter.type], "framerate", GST_TYPE_FRACTION, bogusFilter.framerate_numerator, bogusFilter.framerate_denominator, NULL); diff --git a/tensor_converter/tensor_converter.h b/tensor_converter/tensor_converter.h index 8868777..f96d802 100644 --- a/tensor_converter/tensor_converter.h +++ b/tensor_converter/tensor_converter.h @@ -65,6 +65,7 @@ #include #include #include +#include G_BEGIN_DECLS @@ -85,40 +86,6 @@ typedef struct _GstTensor_Converter GstTensor_Converter; typedef struct _GstTensor_ConverterClass GstTensor_ConverterClass; -#define GST_TENSOR_CONVERTER_TENSOR_RANK_LIMIT (4) -/** - * @brief Possible data element types of other/tensor. - * - * The current version supports C2T_UINT8 only as video-input. - * There is no restrictions for inter-NN or sink-to-app. - */ -typedef enum _tensor_type { - _C2T_INT32 = 0, - _C2T_UINT32, - _C2T_INT16, - _C2T_UINT16, - _C2T_INT8, - _C2T_UINT8, - _C2T_FLOAT64, - _C2T_FLOAT32, - - _C2T_END, -} tensor_type; - -/** - * @brief Possible input stream types for other/tensor. - * - * This is realted with media input stream to other/tensor. - * There is no restrictions for the outputs. - */ -typedef enum _media_type { - _C2T_VIDEO = 0, - _C2T_AUDIO, /* Not Supported Yet */ - _C2T_STRING, /* Not Supported Yet */ - - _C2T_MEDIA_END, -} media_type; - /** * @brief Internal data structure for tensor_converter instances. */ @@ -138,41 +105,13 @@ struct _GstTensor_Converter gboolean silent; /**< True if logging is minimized */ gboolean tensorConfigured; /**< True if already successfully configured tensor metadata */ gint rank; /**< Tensor Rank (# dimensions) */ - gint dimension[GST_TENSOR_CONVERTER_TENSOR_RANK_LIMIT]; /**< Dimensions. We support up to 4th ranks. **/ + gint dimension[NNS_TENSOR_RANK_LIMIT]; /**< Dimensions. We support up to 4th ranks. **/ tensor_type type; /**< Type of each element in the tensor. User must designate this. Otherwise, this is UINT8 for video/x-raw byte stream */ gint framerate_numerator; /**< framerate is in fraction, which is numerator/denominator */ gint framerate_denominator; /**< framerate is in fraction, which is numerator/denominator */ gsize tensorFrameSize; }; -/** - * @brief Byte-per-element of each tensor element type. - */ -const unsigned int GstTensor_ConverterDataSize[] = { - [_C2T_INT32] = 4, - [_C2T_UINT32] = 4, - [_C2T_INT16] = 2, - [_C2T_UINT16] = 2, - [_C2T_INT8] = 1, - [_C2T_UINT8] = 1, - [_C2T_FLOAT64] = 8, - [_C2T_FLOAT32] = 4, -}; - -/** - * @brief String representations for each tensor element type. - */ -const gchar* GstTensor_ConverterDataTypeName[] = { - [_C2T_INT32] = "int32", - [_C2T_UINT32] = "uint32", - [_C2T_INT16] = "int16", - [_C2T_UINT16] = "uint16", - [_C2T_INT8] = "int8", - [_C2T_UINT8] = "uint8", - [_C2T_FLOAT64] = "float64", - [_C2T_FLOAT32] = "float32", -}; - /* * @brief GstTensor_ConverterClass inherits GstBaseTransformClass. *