From 7e357a8564f8896926d6f896208248ab588b419d Mon Sep 17 00:00:00 2001 From: Jaeyun Date: Fri, 14 Sep 2018 16:04:17 +0900 Subject: [PATCH] [Filter/Common] prepare multi-tensors refactor tensor-filter to support multi-tensors (other/tensors). tf and tf-lte should be updated later for multi-tensors. 1. add common GstTensorsInfo in tensor-filter and parse from NN model 2. change NN framework properties 3. fix the bug in common functions for multi-tensors 4. remove space to get tensor dim and type in common function Signed-off-by: Jaeyun Jung --- common/tensor_common.c | 261 ++-- gst/tensor_filter/tensor_filter.c | 1428 +++++++++----------- gst/tensor_filter/tensor_filter.h | 85 +- gst/tensor_filter/tensor_filter_custom.c | 53 +- gst/tensor_filter/tensor_filter_tensorflow.c | 45 +- gst/tensor_filter/tensor_filter_tensorflow_core.cc | 48 +- gst/tensor_filter/tensor_filter_tensorflow_lite.c | 43 +- .../tensor_filter_tensorflow_lite_core.cc | 91 +- include/tensor_common.h | 28 +- include/tensor_filter_custom.h | 43 +- include/tensor_filter_tensorflow_core.h | 12 +- include/tensor_filter_tensorflow_lite_core.h | 14 +- include/tensor_typedef.h | 42 +- .../nnstreamer_customfilter_example_average.c | 69 +- .../nnstreamer_customfilter_example_passthrough.c | 40 +- ...mer_customfilter_example_passthrough_variable.c | 25 +- .../nnstreamer_customfilter_example_scaler.c | 67 +- ...treamer_customfilter_example_scaler_allocator.c | 73 +- tests/common/unittest_common.cpp | 56 +- tests/nnstreamer_sink/unittest_sink.cpp | 443 +++--- 20 files changed, 1417 insertions(+), 1549 deletions(-) diff --git a/common/tensor_common.c b/common/tensor_common.c index 79623eb..fcc8ceb 100644 --- a/common/tensor_common.c +++ b/common/tensor_common.c @@ -480,7 +480,7 @@ gst_tensor_config_from_text_info (GstTensorConfig * config, /** [size][frames] */ config->info.dimension[0] = GST_TENSOR_STRING_SIZE; /** fixed size of string */ - config->info.dimension[1] = 1; /** Supposed 1 frame in tensor,, change this if tensor contains N frames */ + config->info.dimension[1] = 1; /** Supposed 1 frame in tensor, change this if tensor contains N frames */ config->info.dimension[2] = 1; config->info.dimension[3] = 1; @@ -658,7 +658,7 @@ gst_tensors_config_from_structure (GstTensorsConfig * config, const GstStructure * structure) { const gchar *name; - guint i, j; + guint i; g_return_val_if_fail (config != NULL, FALSE); gst_tensors_config_init (config); @@ -680,17 +680,16 @@ gst_tensors_config_from_structure (GstTensorsConfig * config, const gchar *dims_string; const gchar *types_string; - gst_structure_get_uint (structure, "num_tensors", - &config->info.num_tensors); + gst_structure_get_int (structure, "num_tensors", + (gint *) (&config->info.num_tensors)); gst_structure_get_fraction (structure, "framerate", &config->rate_n, &config->rate_d); /* parse dimensions */ dims_string = gst_structure_get_string (structure, "dimensions"); if (dims_string) { - gchar **str_dim, **str_dims; - gint num_dim, num_dims; - guint64 val; + gchar **str_dims; + gint num_dims; str_dims = g_strsplit (dims_string, ",", -1); num_dims = g_strv_length (str_dims); @@ -701,19 +700,7 @@ gst_tensors_config_from_structure (GstTensorsConfig * config, } for (i = 0; i < num_dims; i++) { - str_dim = g_strsplit (str_dims[i], ":", NNS_TENSOR_RANK_LIMIT); - num_dim = g_strv_length (str_dim); - - for (j = 0; j < num_dim; j++) { - val = g_ascii_strtoull (str_dim[j], NULL, 10); - config->info.info[i].dimension[j] = (uint32_t) val; - } - - for (; j < NNS_TENSOR_RANK_LIMIT; j++) { - config->info.info[i].dimension[j] = 1; - } - - g_strfreev (str_dim); + get_tensor_dimension (str_dims[i], config->info.info[i].dimension); } g_strfreev (str_dims); @@ -725,7 +712,7 @@ gst_tensors_config_from_structure (GstTensorsConfig * config, gchar **str_types; gint num_types; - str_types = g_strsplit (dims_string, ",", -1); + str_types = g_strsplit (types_string, ",", -1); num_types = g_strv_length (str_types); if (config->info.num_tensors != num_types) { @@ -765,27 +752,22 @@ gst_tensors_caps_from_config (const GstTensorsConfig * config) if (config->info.num_tensors > 0) { GString *dimensions = g_string_new (NULL); GString *types = g_string_new (NULL); + gchar *dim_str; /** dimensions and types */ for (i = 0; i < config->info.num_tensors; i++) { - /** - * @note supposed dimension with rank 4 (NNS_TENSOR_RANK_LIMIT) - */ - gchar *dim_string = g_strdup_printf ("%d:%d:%d:%d", - config->info.info[i].dimension[0], config->info.info[i].dimension[1], - config->info.info[i].dimension[2], config->info.info[i].dimension[3]); - - dimensions = g_string_append (dimensions, dim_string); - types = - g_string_append (types, + dim_str = get_tensor_dimension_string (config->info.info[i].dimension); + + g_string_append (dimensions, dim_str); + g_string_append (types, tensor_element_typename[config->info.info[i].type]); if (i < config->info.num_tensors - 1) { - dimensions = g_string_append (dimensions, ","); - types = g_string_append (types, ","); + g_string_append (dimensions, ","); + g_string_append (types, ","); } - g_free (dim_string); + g_free (dim_str); } gst_caps_set_simple (caps, "num_tensors", G_TYPE_INT, @@ -839,55 +821,61 @@ tensor_type get_tensor_type (const gchar * typestr) { int len; + gchar *type_string; + tensor_type type = _NNS_END; + + g_return_val_if_fail (typestr != NULL, _NNS_END); + + /** remove spaces */ + type_string = g_strdup (typestr); + g_strstrip (type_string); - if (!typestr) - return _NNS_END; - len = strlen (typestr); + len = strlen (type_string); - if (typestr[0] == 'u' || typestr[0] == 'U') { + if (type_string[0] == 'u' || type_string[0] == 'U') { /** * Let's believe the developer and the following three letters are "int" * (case insensitive) */ if (len == 6) { /* uint16, uint32 */ - if (typestr[4] == '1' && typestr[5] == '6') - return _NNS_UINT16; - else if (typestr[4] == '3' && typestr[5] == '2') - return _NNS_UINT32; - else if (typestr[4] == '6' && typestr[5] == '4') - return _NNS_UINT64; + if (type_string[4] == '1' && type_string[5] == '6') + type = _NNS_UINT16; + else if (type_string[4] == '3' && type_string[5] == '2') + type = _NNS_UINT32; + else if (type_string[4] == '6' && type_string[5] == '4') + type = _NNS_UINT64; } else if (len == 5) { /* uint8 */ - if (typestr[4] == '8') - return _NNS_UINT8; + if (type_string[4] == '8') + type = _NNS_UINT8; } - } else if (typestr[0] == 'i' || typestr[0] == 'I') { + } else if (type_string[0] == 'i' || type_string[0] == 'I') { /** * Let's believe the developer and the following two letters are "nt" * (case insensitive) */ if (len == 5) { /* int16, int32 */ - if (typestr[3] == '1' && typestr[4] == '6') - return _NNS_INT16; - else if (typestr[3] == '3' && typestr[4] == '2') - return _NNS_INT32; - else if (typestr[3] == '6' && typestr[4] == '4') - return _NNS_INT64; + if (type_string[3] == '1' && type_string[4] == '6') + type = _NNS_INT16; + else if (type_string[3] == '3' && type_string[4] == '2') + type = _NNS_INT32; + else if (type_string[3] == '6' && type_string[4] == '4') + type = _NNS_INT64; } else if (len == 4) { /* int8 */ - if (typestr[3] == '8') - return _NNS_INT8; + if (type_string[3] == '8') + type = _NNS_INT8; } - return _NNS_END; - } else if (typestr[0] == 'f' || typestr[0] == 'F') { + } else if (type_string[0] == 'f' || type_string[0] == 'F') { /* Let's assume that the following 4 letters are "loat" */ if (len == 7) { - if (typestr[5] == '6' && typestr[6] == '4') - return _NNS_FLOAT64; - else if (typestr[5] == '3' && typestr[6] == '2') - return _NNS_FLOAT32; + if (type_string[5] == '6' && type_string[6] == '4') + type = _NNS_FLOAT64; + else if (type_string[5] == '3' && type_string[6] == '2') + type = _NNS_FLOAT32; } } - return _NNS_END; + g_free (type_string); + return type; } /** @@ -914,36 +902,68 @@ find_key_strv (const gchar ** strv, const gchar * key) /** * @brief Parse tensor dimension parameter string * @return The Rank. 0 if error. - * @param param The parameter string in the format of d1:d2:d3:d4, d1:d2:d3, d1:d2, or d1, where dN is a positive integer and d1 is the innermost dimension; i.e., dim[d4][d3][d2][d1]; + * @param dimstr The dimension string in the format of d1:d2:d3:d4, d1:d2:d3, d1:d2, or d1, where dN is a positive integer and d1 is the innermost dimension; i.e., dim[d4][d3][d2][d1]; + * @param dim dimension to be filled. */ int -get_tensor_dimension (const gchar * param, - uint32_t dim[NNS_TENSOR_SIZE_LIMIT][NNS_TENSOR_RANK_LIMIT]) +get_tensor_dimension (const gchar * dimstr, tensor_dim dim) { - int i, j = 0; + int rank = 0; guint64 val; - gchar **_strv = g_strsplit (param, ",./", -1); - gint num_tensor = g_strv_length (_strv); + gchar **strv; + gchar *dim_string; + gint i, num_dims; - for (i = 0; i < num_tensor; i++) { - gchar **strv = g_strsplit (_strv[i], ":", NNS_TENSOR_RANK_LIMIT); + g_return_val_if_fail (dimstr != NULL, 0); - g_assert (strv != NULL); + /** remove spaces */ + dim_string = g_strdup (dimstr); + g_strstrip (dim_string); - for (j = 0; j < NNS_TENSOR_RANK_LIMIT; j++) { - if (strv[j] == NULL) - break; - val = g_ascii_strtoull (strv[j], NULL, 10); - dim[i][j] = val; - } - for (; j < NNS_TENSOR_RANK_LIMIT; j++) - dim[i][j] = 1; - g_strfreev (strv); + strv = g_strsplit (dim_string, ":", NNS_TENSOR_RANK_LIMIT); + num_dims = g_strv_length (strv); + + for (i = 0; i < num_dims; i++) { + g_strstrip (strv[i]); + if (strv[i] == NULL || strlen (strv[i]) == 0) + break; + + val = g_ascii_strtoull (strv[i], NULL, 10); + dim[i] = (uint32_t) val; + rank = i + 1; } - g_strfreev (_strv); + for (; i < NNS_TENSOR_RANK_LIMIT; i++) + dim[i] = 1; + + g_strfreev (strv); + g_free (dim_string); + return rank; +} + +/** + * @brief Get dimension string from given tensor dimension. + * @param dim tensor dimension + * @return Formatted string of given dimension (d1:d2:d3:d4). + * @note The returned value should be freed with g_free() + */ +gchar * +get_tensor_dimension_string (const tensor_dim dim) +{ + gint i; + GString *dim_str; + + dim_str = g_string_new (NULL); + + for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) { + g_string_append_printf (dim_str, "%d", dim[i]); - return num_tensor; + if (i < NNS_TENSOR_RANK_LIMIT - 1) { + g_string_append (dim_str, ":"); + } + } + + return g_string_free (dim_str, FALSE); } /** @@ -1001,83 +1021,6 @@ get_tensor_from_structure (const GstStructure * str, tensor_dim dim, } /** - * @brief internal static function to trim the front. - */ -static const gchar * -ftrim (const gchar * str) -{ - if (!str) - return str; - while (*str && (*str == ' ' || *str == '\t')) { - str++; - } - return str; -} - -/** - * @brief Extract other/tensors dim/type from GstStructure - */ -int -get_tensors_from_structure (const GstStructure * str, - GstTensor_TensorsMeta * meta, int *framerate_num, int *framerate_denom) -{ - int num = 0; - const gchar *strval; - gint fn = 0, fd = 0; - gchar **strv; - int counter = 0; - - if (!gst_structure_has_name (str, "other/tensors")) - return 0; - - if (gst_structure_get_int (str, "num_tensors", (int *) &num)) { - if (num > 16 || num < 1) - num = 0; - } - if (0 == num) - return 0; - - meta->num_tensors = num; - - if (gst_structure_get_fraction (str, "framerate", &fn, &fd)) { - if (framerate_num) - *framerate_num = fn; - if (framerate_denom) - *framerate_denom = fd; - } - - strval = gst_structure_get_string (str, "dimensions"); - - counter = get_tensor_dimension (strval, meta->dims); - - if (counter != num) { - err_print - ("The number of dimensions does not match the number of tensors.\n"); - return 0; - } - - strval = gst_structure_get_string (str, "types"); - strv = g_strsplit (strval, ",", -1); - counter = 0; - while (strv[counter]) { - if (counter >= num) { - err_print ("The number of types does not match the number of tensors.\n"); - return 0; - } - meta->types[counter] = get_tensor_type (ftrim (strv[counter])); - if (meta->types[counter] >= _NNS_END) - return 0; - counter++; - } - if (counter != num) { - err_print ("The number of types does not match the number of tensors.\n"); - return 0; - } - g_strfreev (strv); - return num; -} - -/** * @brief Get tensor dimension/type from GstCaps */ GstTensor_Filter_CheckStatus diff --git a/gst/tensor_filter/tensor_filter.c b/gst/tensor_filter/tensor_filter.c index 3ca1df5..21c04ba 100644 --- a/gst/tensor_filter/tensor_filter.c +++ b/gst/tensor_filter/tensor_filter.c @@ -34,7 +34,7 @@ * SECTION:element-tensor_filter * * A filter that converts media stream to tensor stream for NN frameworks. - * The output is always in the format of other/tensor + * The output is always in the format of other/tensor or other/tensors. * * * Example launch line @@ -57,7 +57,37 @@ #include "tensor_filter.h" -GstTensor_Filter_Framework *tensor_filter_supported[] = { +/** + * @brief Macro for debug mode. + */ +#ifndef DBG +#define DBG (!self->silent) +#endif + +/** + * @brief Macro for debug message. + */ +#define silent_debug(...) \ + debug_print (DBG, __VA_ARGS__) + +#define silent_debug_caps(caps,msg) do {\ + if (DBG) { \ + if (caps) { \ + GstStructure *caps_s; \ + gchar *caps_s_string; \ + guint caps_size, caps_idx; \ + caps_size = gst_caps_get_size (caps);\ + for (caps_idx = 0; caps_idx < caps_size; caps_idx++) { \ + caps_s = gst_caps_get_structure (caps, caps_idx); \ + caps_s_string = gst_structure_to_string (caps_s); \ + debug_print (TRUE, msg " = %s\n", caps_s_string); \ + g_free (caps_s_string); \ + } \ + } \ + } \ +} while (0) + +GstTensorFilterFramework *tensor_filter_supported[] = { [_T_F_UNDEFINED] = NULL, [_T_F_CUSTOM] = &NNS_support_custom, @@ -91,13 +121,9 @@ const char *nnfw_names[] = { GST_DEBUG_CATEGORY_STATIC (gst_tensor_filter_debug); #define GST_CAT_DEFAULT gst_tensor_filter_debug -/* Filter signals and args */ -enum -{ - /* FILL ME */ - LAST_SIGNAL -}; - +/** + * @brief GstTensorFilter properties. + */ enum { PROP_0, @@ -112,12 +138,17 @@ enum }; /** + * @brief Default caps string for both sink and source pad. + */ +#define CAPS_STRING GST_TENSOR_CAP_DEFAULT "; " GST_TENSORS_CAP_DEFAULT + +/** * @brief The capabilities of the inputs */ static GstStaticPadTemplate sink_factory = GST_STATIC_PAD_TEMPLATE ("sink", GST_PAD_SINK, GST_PAD_ALWAYS, - GST_STATIC_CAPS (GST_TENSOR_CAP_DEFAULT)); + GST_STATIC_CAPS (CAPS_STRING)); /** * @brief The capabilities of the outputs @@ -125,17 +156,18 @@ static GstStaticPadTemplate sink_factory = GST_STATIC_PAD_TEMPLATE ("sink", static GstStaticPadTemplate src_factory = GST_STATIC_PAD_TEMPLATE ("src", GST_PAD_SRC, GST_PAD_ALWAYS, - GST_STATIC_CAPS (GST_TENSOR_CAP_DEFAULT)); + GST_STATIC_CAPS (CAPS_STRING)); #define gst_tensor_filter_parent_class parent_class -G_DEFINE_TYPE (GstTensor_Filter, gst_tensor_filter, GST_TYPE_BASE_TRANSFORM); +G_DEFINE_TYPE (GstTensorFilter, gst_tensor_filter, GST_TYPE_BASE_TRANSFORM); +/* GObject vmethod implementations */ static void gst_tensor_filter_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec); static void gst_tensor_filter_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec); -/* GstBaseTransformer vmethod implementations */ +/* GstBaseTransform vmethod implementations */ static GstFlowReturn gst_tensor_filter_transform (GstBaseTransform * trans, GstBuffer * inbuf, GstBuffer * outbuf); static GstFlowReturn gst_tensor_filter_transform_ip (GstBaseTransform * trans, @@ -151,20 +183,39 @@ static gboolean gst_tensor_filter_transform_size (GstBaseTransform * trans, GstCaps * othercaps, gsize * othersize); static gboolean gst_tensor_filter_start (GstBaseTransform * trans); static gboolean gst_tensor_filter_stop (GstBaseTransform * trans); -/* GObject vmethod implementations */ + +/** + * @brief Invoke callbacks of filter->prop.fw. Guarantees calling open for the first call. + */ +#define gst_tensor_filter_call(filter,ret,funcname,...) do { \ + if (filter->prop.fw_opened == FALSE) { \ + if (filter->prop.fw->open != NULL) \ + filter->prop.fw->open (filter, &filter->privateData); \ + filter->prop.fw_opened = TRUE; \ + } \ + ret = filter->prop.fw->funcname (filter, &filter->privateData, __VA_ARGS__); \ + } while(0) + +/** + * @brief Close nn framework. + */ +#define gst_tensor_filter_close(filter) do { \ + g_assert (filter->prop.fw_opened); \ + if (filter->prop.fw->close) \ + filter->prop.fw->close (filter, &filter->privateData); \ + filter->prop.fw_opened = FALSE; \ + } while (0) /** * @brief initialize the tensor_filter's class */ static void -gst_tensor_filter_class_init (GstTensor_FilterClass * g_class) +gst_tensor_filter_class_init (GstTensorFilterClass * klass) { GObjectClass *gobject_class; GstElementClass *gstelement_class; GstBaseTransformClass *trans_class; - GstTensor_FilterClass *klass; - klass = (GstTensor_FilterClass *) g_class; trans_class = (GstBaseTransformClass *) klass; gstelement_class = (GstElementClass *) trans_class; gobject_class = (GObjectClass *) gstelement_class; @@ -173,37 +224,40 @@ gst_tensor_filter_class_init (GstTensor_FilterClass * g_class) gobject_class->get_property = gst_tensor_filter_get_property; g_object_class_install_property (gobject_class, PROP_SILENT, - g_param_spec_boolean ("silent", "Silent", "Produce verbose output ?", - FALSE, G_PARAM_READWRITE)); + g_param_spec_boolean ("silent", "Silent", "Produce verbose output", + FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); g_object_class_install_property (gobject_class, PROP_FRAMEWORK, g_param_spec_string ("framework", "Framework", - "Neural network framework ?", "", G_PARAM_READWRITE)); + "Neural network framework", "", + G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); g_object_class_install_property (gobject_class, PROP_MODEL, g_param_spec_string ("model", "Model filepath", - "Filepath to the model file ?", "", G_PARAM_READWRITE)); + "File path to the model file", "", + G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); g_object_class_install_property (gobject_class, PROP_INPUT, g_param_spec_string ("input", "Input dimension", "Input tensor dimension from inner array, upto 4 dimensions ?", "", - G_PARAM_READWRITE)); + G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); g_object_class_install_property (gobject_class, PROP_INPUTTYPE, g_param_spec_string ("inputtype", "Input tensor element type", - "Type of each element of the input tensor ?", "uint8", - G_PARAM_READWRITE)); + "Type of each element of the input tensor ?", "", + G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); g_object_class_install_property (gobject_class, PROP_OUTPUT, g_param_spec_string ("output", "Output dimension", "Output tensor dimension from inner array, upto 4 dimensions ?", "", - G_PARAM_READWRITE)); + G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); g_object_class_install_property (gobject_class, PROP_OUTPUTTYPE, g_param_spec_string ("outputtype", "Output tensor element type", - "Type of each element of the output tensor ?", "uint8", - G_PARAM_READWRITE)); + "Type of each element of the output tensor ?", "", + G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); g_object_class_install_property (gobject_class, PROP_CUSTOM, g_param_spec_string ("custom", "Custom properties for subplugins", - "Custom properties for subplugins ?", "", G_PARAM_READWRITE)); + "Custom properties for subplugins ?", "", + G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); gst_element_class_set_details_simple (gstelement_class, "Tensor_Filter", - "NN Frameworks (e.g., tensorflow) as Media Filters", + "Converter/Filter/Tensor", "Handles NN Frameworks (e.g., tensorflow) as Media Filters with other/tensor type stream", "MyungJoo Ham "); @@ -240,264 +294,87 @@ gst_tensor_filter_class_init (GstTensor_FilterClass * g_class) * instantiate pads and add them to element * set pad calback functions * initialize instance structure - * @todo change the first index [0] of input/output Dimension & Type to loop for multi tensors */ static void -gst_tensor_filter_init (GstTensor_Filter * filter) +gst_tensor_filter_init (GstTensorFilter * self) { - int i; - GstTensor_Filter_Properties *prop = &filter->prop; + GstTensorFilterProperties *prop; - prop->silent = TRUE; + prop = &self->prop; + + /* init NNFW properties */ prop->nnfw = _T_F_UNDEFINED; prop->fw = NULL; - prop->fwOpened = FALSE; - prop->fwClosed = FALSE; - prop->inputConfigured = _TFC_INIT; - prop->outputConfigured = _TFC_INIT; - prop->modelFilename = NULL; - - for (i = 0; i < NNS_TENSOR_SIZE_LIMIT; i++) { - prop->inputMeta.dims[i][0] = 1; /* innermost */ - prop->inputMeta.dims[i][1] = 1; - prop->inputMeta.dims[i][2] = 1; - prop->inputMeta.dims[i][3] = 1; /* out */ - prop->inputMeta.types[i] = _NNS_END; /* not initialized */ - - prop->outputMeta.dims[i][0] = 1; /* innermost */ - prop->outputMeta.dims[i][1] = 1; - prop->outputMeta.dims[i][2] = 1; - prop->outputMeta.dims[i][3] = 1; /* out */ - prop->outputMeta.types[i] = _NNS_END; /* not initialized */ - } - - prop->inputCapNegotiated = FALSE; - prop->outputCapNegotiated = FALSE; - - prop->customProperties = NULL; - filter->privateData = NULL; /* mark not initialized. */ + prop->fw_opened = FALSE; + prop->input_configured = FALSE; + prop->output_configured = FALSE; + prop->model_file = NULL; + prop->custom_properties = NULL; + gst_tensors_info_init (&prop->input_meta); + gst_tensors_info_init (&prop->output_meta); + + /* init internal properties */ + self->privateData = NULL; + self->silent = TRUE; + self->configured = FALSE; + gst_tensors_config_init (&self->in_config); + gst_tensors_config_init (&self->out_config); } -#define silent_debug(...) debug_print (!prop->silent, __VA_ARGS__) - -/** - * @brief Invoke callbacks of filter->prop.fw. Gurantees calling open for the first call. - */ -#define gst_tensor_filter_call(filter, ret, funcname, ...) do { \ - if (filter->prop.fwOpened == FALSE) { \ - if (filter->prop.fw->open != NULL) \ - filter->prop.fw->open(filter, &filter->privateData); \ - filter->prop.fwOpened = TRUE; \ - } \ - g_assert(filter->prop.fwClosed != TRUE); \ - ret = filter->prop.fw->funcname(filter, &filter->privateData, __VA_ARGS__); \ - } while(0) - -/** @todo Call this where appropriate */ -#define gst_tensor_filter_close(filter) \ - do { \ - g_assert(filter->prop.fwClosed != TRUE); \ - g_assert(filter->prop.fwOpened == TRUE); \ - if (filter->prop.fw->close) \ - filter->prop.fw->close(filter, &filter->privateData); \ - filter->prop.fw->fwClosed = TRUE; \ - } while (0); - -static GstTensor_Filter_CheckStatus -gst_tensor_filter_generate_dim_from_cap (GstCaps * caps, const tensor_dim dim, - tensor_type * type); /** - * @brief Find caps based on i/o configuration or from the 'other' cap - * @param filter "this" object - * @param isInput TRUE if the "source" is input(sinkpad) and "target" is output(srcpad0 - * @param fromCaps The "source" cap given - * @return The "target" cap from "source" cap. - * - * We need both type and dimension to do this. - * This is supposed to be used by set_properties, restrting pad-caps before attaching input/output elements - * - * @todo Looks like this is buggy!!! + * @brief Calculate output buffer size. + * @param self "this" pointer + * @param index index of output tensors (if index < 0, the size of all output tensors will be returned.) + * @return output buffer size */ -static GstCaps * -gst_tensor_filter_fix_caps (GstTensor_Filter * filter, gboolean isInput, - GstCaps * fromCaps) +static gsize +gst_tensor_filter_out_size (GstTensorFilter * self, gint index) { - tensor_type *type = NULL, _type[NNS_TENSOR_SIZE_LIMIT]; - const uint32_t *dimension[NNS_TENSOR_SIZE_LIMIT]; - tensor_dim dim[NNS_TENSOR_SIZE_LIMIT]; - GstTensor_Filter_CheckStatus configured = _TFC_INIT; - GstTensor_Filter_Properties *prop = &filter->prop; - GstCaps *tmp = NULL, *tmp2 = NULL, *staticcap = NULL, *resultCaps = NULL; - GstStaticCaps rawcap = GST_STATIC_CAPS (GST_TENSOR_CAP_DEFAULT); - staticcap = gst_static_caps_get (&rawcap); - - if (isInput == TRUE) { - type = prop->inputMeta.types; - int i; - for (i = 0; i < NNS_TENSOR_SIZE_LIMIT; i++) { - dimension[i] = prop->inputMeta.dims[i]; - } - configured = prop->inputConfigured & _TFC_ALL; - } else { - type = prop->outputMeta.types; - int i; - for (i = 0; i < NNS_TENSOR_SIZE_LIMIT; i++) { - dimension[i] = prop->outputMeta.dims[i]; - } - configured = prop->outputConfigured & _TFC_ALL; - } - - /* 2. configure caps based on type & dimension */ - if (configured == _TFC_ALL) { - tmp2 = - gst_caps_new_simple ("other/tensor", "type", - G_TYPE_STRING, tensor_element_typename[type[0]], "dim1", G_TYPE_INT, - dimension[0][0], "dim2", G_TYPE_INT, dimension[0][1], "dim3", - G_TYPE_INT, dimension[0][2], "dim4", G_TYPE_INT, dimension[0][3], NULL); - tmp = gst_caps_intersect_full (staticcap, tmp2, GST_CAPS_INTERSECT_FIRST); - gst_caps_unref (tmp2); - } else if (configured == _TFC_DIMENSION) { - tmp2 = - gst_caps_new_simple ("other/tensor", "dim1", - G_TYPE_INT, dimension[0][0], "dim2", G_TYPE_INT, dimension[0][1], - "dim3", G_TYPE_INT, dimension[0][2], "dim4", G_TYPE_INT, - dimension[0][3], NULL); - tmp = gst_caps_intersect_full (staticcap, tmp2, GST_CAPS_INTERSECT_FIRST); - gst_caps_unref (tmp2); - } else if (configured == _TFC_TYPE) { - tmp2 = - gst_caps_new_simple ("other/tensor", "type", G_TYPE_STRING, - tensor_element_typename[type[0]], NULL); - tmp = gst_caps_intersect_full (staticcap, tmp2, GST_CAPS_INTERSECT_FIRST); - gst_caps_unref (tmp2); - } else { - /* knows nothing. This happens.. */ - tmp2 = gst_caps_new_any (); - tmp = gst_caps_intersect_full (staticcap, tmp2, GST_CAPS_INTERSECT_FIRST); - gst_caps_unref (tmp2); - } + GstTensorsInfo *info; + guint i; + gsize out_size = 0; - if (fromCaps) { - gchar *str; - if (prop->silent == FALSE) { - str = gst_caps_to_string (fromCaps); - debug_print (TRUE, "fromCaps: %s\n", str); - g_free (str); + g_assert (self->configured); - str = gst_caps_to_string (tmp); - debug_print (TRUE, "filter: %s\n", str); - g_free (str); - } - tmp2 = gst_caps_intersect_full (fromCaps, tmp, GST_CAPS_INTERSECT_FIRST); - gst_caps_unref (tmp); - tmp = tmp2; - if (prop->silent == FALSE) { - str = gst_caps_to_string (tmp); - debug_print (TRUE, "filtered fromCaps: %s\n", str); - g_free (str); - } - } else { - if (prop->silent == FALSE) { - gchar *str = gst_caps_to_string (tmp); - debug_print (TRUE, "not filtered fromCaps: %s\n", str); - g_free (str); - } - } - - /* 2-2. Extract effective dim info from tmp */ - dimension[0] = dim[0]; - configured = - gst_tensor_filter_generate_dim_from_cap (tmp, dimension[0], &_type[0]); - configured &= _TFC_ALL; - /* tmp is no more needed */ - gst_caps_unref (tmp); - - /* 3. Calculate resultcap from fromcap. */ - if (isInput == TRUE) { - /* result == srcpad (output) */ - GstTensor_TensorsMeta outputMeta; - int ret = -1; - - /* 3-1-1. Try get output dim for srcpad */ - if (prop->fw->getOutputDimension) { - gst_tensor_filter_call (filter, ret, getOutputDimension, &outputMeta); - } - /* 3-1-1-a. If inputdim is available but outputdim is not available */ - if (ret != 0 && configured == _TFC_ALL && prop->fw->setInputDimension) { - gst_tensor_filter_call (filter, ret, setInputDimension, dimension[0], - _type[0], outputMeta.dims[0], &outputMeta.types[0]); - } - /* if ret == 0, either get or set has been successful. */ - if (ret != 0) { - /* We do not have enough info for dimension */ - /* knows nothing. This happens.. */ - tmp = gst_caps_new_any (); - resultCaps = - gst_caps_intersect_full (staticcap, tmp, GST_CAPS_INTERSECT_FIRST); - gst_caps_unref (tmp); - } + info = &self->out_config.info; - /* 3-1.2. Configure resultCap from rdim/rtype */ - if (resultCaps == NULL) { - resultCaps = - gst_caps_new_simple ("other/tensor", "type", - G_TYPE_STRING, tensor_element_typename[outputMeta.types[0]], "dim1", - G_TYPE_INT, outputMeta.dims[0][0], "dim2", G_TYPE_INT, - outputMeta.dims[0][1], "dim3", G_TYPE_INT, outputMeta.dims[0][2], - "dim4", G_TYPE_INT, outputMeta.dims[0][3], NULL); + if (index < 0) { + /** calculate all output tensors */ + for (i = 0; i < info->num_tensors; i++) { + out_size += + get_tensor_element_count (info->info[i].dimension) * + tensor_element_size[info->info[i].type]; } } else { - /* result == sinkpad (input) */ - GstTensor_TensorsMeta meta; - int ret = -1; + g_assert (index < info->num_tensors); - /* 3-1-1. Try get output dim for srcpad */ - if (prop->fw->getInputDimension) { - gst_tensor_filter_call (filter, ret, getInputDimension, &meta); - } - if (ret != 0) { - /* We do not have output->input dimension conversion. */ - /* knows nothing. This happens.. */ - tmp = gst_caps_new_any (); - resultCaps = - gst_caps_intersect_full (staticcap, tmp, GST_CAPS_INTERSECT_FIRST); - gst_caps_unref (tmp); - } - - /* 3-1.2. Configure resultCap from rdim/rtype */ - if (resultCaps == NULL) { - resultCaps = - gst_caps_new_simple ("other/tensor", - "type", G_TYPE_STRING, tensor_element_typename[meta.types[0]], "dim1", - G_TYPE_INT, meta.dims[0][0], "dim2", G_TYPE_INT, meta.dims[0][1], - "dim3", G_TYPE_INT, meta.dims[0][2], "dim4", G_TYPE_INT, - meta.dims[0][3], NULL); - } + out_size = + get_tensor_element_count (info->info[index].dimension) * + tensor_element_size[info->info[index].type]; } - /** @todo 5. Verify with get_input/output_dimension callbacks! */ - gst_caps_unref (staticcap); - - return resultCaps; + return out_size; } /** - * @brief @todo fill this in + * @brief Setter for tensor_filter properties. */ static void gst_tensor_filter_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec) { - GstTensor_Filter *filter = GST_TENSOR_FILTER (object); - GstTensor_Filter_Properties *prop = &filter->prop; - GstTensor_Filter_Framework *fw = prop->fw; + GstTensorFilter *self; + GstTensorFilterProperties *prop; + + self = GST_TENSOR_FILTER (object); + prop = &self->prop; - silent_debug ("Setting property. for Prop %d.\n", prop_id); + silent_debug ("Setting property for prop %d.\n", prop_id); switch (prop_id) { case PROP_SILENT: - prop->silent = g_value_get_boolean (value); - silent_debug ("Debug mode on (silent off)\n"); + self->silent = g_value_get_boolean (value); + silent_debug ("Debug mode = %d", self->silent); break; case PROP_FRAMEWORK: g_assert (prop->nnfw == _T_F_UNDEFINED && value); @@ -508,76 +385,113 @@ gst_tensor_filter_set_property (GObject * object, guint prop_id, g_assert (prop->nnfw != _T_F_UNDEFINED); g_assert (tensor_filter_supported[prop->nnfw] != NULL); prop->fw = tensor_filter_supported[prop->nnfw]; - fw = prop->fw; g_assert (prop->fw != NULL); /* See if mandatory methods are filled in */ - g_assert (fw->invoke_NN); - g_assert ((fw->getInputDimension && fw->getOutputDimension) - || fw->setInputDimension); + g_assert (prop->fw->invoke_NN); + g_assert ((prop->fw->getInputDimension && prop->fw->getOutputDimension) + || prop->fw->setInputDimension); break; case PROP_MODEL: - g_assert (prop->modelFilename == NULL && value); + g_assert (prop->model_file == NULL && value); /* Once configures, it cannot be changed in runtime */ - prop->modelFilename = g_value_dup_string (value); - silent_debug ("Model = %s\n", prop->modelFilename); - g_assert (g_file_test (prop->modelFilename, - G_FILE_TEST_IS_REGULAR) == TRUE); + prop->model_file = g_value_dup_string (value); + silent_debug ("Model = %s\n", prop->model_file); + g_assert (g_file_test (prop->model_file, G_FILE_TEST_IS_REGULAR)); break; case PROP_INPUT: - g_assert (!(prop->inputConfigured & _TFC_DIMENSION) && value); + g_assert (!prop->input_configured && value); /* Once configures, it cannot be changed in runtime */ { - int i; - prop->inputMeta.num_tensors = - get_tensor_dimension (g_value_get_string (value), - prop->inputMeta.dims); - for (i = 0; i < prop->inputMeta.num_tensors; i++) { + int i, rank; + gchar **str_dims; + + str_dims = g_strsplit (g_value_get_string (value), ",", -1); + prop->input_meta.num_tensors = g_strv_length (str_dims); + + for (i = 0; i < prop->input_meta.num_tensors; i++) { + rank = + get_tensor_dimension (str_dims[i], + prop->input_meta.info[i].dimension); + g_assert (rank > 0); + silent_debug ("Input Prop: %d:%d:%d:%d Rank %d\n", - prop->inputMeta.dims[i][0], prop->inputMeta.dims[i][1], - prop->inputMeta.dims[i][2], prop->inputMeta.dims[i][3], - prop->inputMeta.ranks[i]); + prop->input_meta.info[i].dimension[0], + prop->input_meta.info[i].dimension[1], + prop->input_meta.info[i].dimension[2], + prop->input_meta.info[i].dimension[3], rank); } - prop->inputConfigured |= _TFC_DIMENSION; + + g_strfreev (str_dims); } break; case PROP_OUTPUT: - g_assert (!(prop->outputConfigured & _TFC_DIMENSION) && value); + g_assert (!prop->output_configured && value); /* Once configures, it cannot be changed in runtime */ { - int i; - prop->outputMeta.num_tensors = - get_tensor_dimension (g_value_get_string (value), - prop->outputMeta.dims); - for (i = 0; i < prop->outputMeta.num_tensors; i++) { + int i, rank; + gchar **str_dims; + + str_dims = g_strsplit (g_value_get_string (value), ",", -1); + prop->output_meta.num_tensors = g_strv_length (str_dims); + + for (i = 0; i < prop->output_meta.num_tensors; i++) { + rank = + get_tensor_dimension (str_dims[i], + prop->output_meta.info[i].dimension); + g_assert (rank > 0); + silent_debug ("Output Prop: %d:%d:%d:%d Rank %d\n", - prop->outputMeta.dims[i][0], prop->outputMeta.dims[i][1], - prop->outputMeta.dims[i][2], prop->outputMeta.dims[i][3], - prop->outputMeta.ranks[i]); + prop->output_meta.info[i].dimension[0], + prop->output_meta.info[i].dimension[1], + prop->output_meta.info[i].dimension[2], + prop->output_meta.info[i].dimension[3], rank); } - prop->outputConfigured |= _TFC_DIMENSION; + + g_strfreev (str_dims); } break; case PROP_INPUTTYPE: - g_assert (prop->inputMeta.types[0] == _NNS_END && value); + g_assert (!prop->input_configured && value); /* Once configures, it cannot be changed in runtime */ - prop->inputMeta.types[0] = get_tensor_type (g_value_get_string (value)); - prop->inputConfigured |= _TFC_TYPE; - g_assert (prop->inputMeta.types[0] != _NNS_END); + { + int i; + gchar **str_types; + + str_types = g_strsplit (g_value_get_string (value), ",", -1); + prop->input_meta.num_tensors = g_strv_length (str_types); + + for (i = 0; i < prop->input_meta.num_tensors; i++) { + prop->input_meta.info[i].type = get_tensor_type (str_types[i]); + g_assert (prop->input_meta.info[i].type != _NNS_END); + } + + g_strfreev (str_types); + } break; case PROP_OUTPUTTYPE: - g_assert (prop->outputMeta.types[0] == _NNS_END && value); + g_assert (!prop->output_configured && value); /* Once configures, it cannot be changed in runtime */ - prop->outputMeta.types[0] = get_tensor_type (g_value_get_string (value)); - prop->outputConfigured |= _TFC_TYPE; - g_assert (prop->outputMeta.types[0] != _NNS_END); + { + int i; + gchar **str_types; + + str_types = g_strsplit (g_value_get_string (value), ",", -1); + prop->output_meta.num_tensors = g_strv_length (str_types); + + for (i = 0; i < prop->output_meta.num_tensors; i++) { + prop->output_meta.info[i].type = get_tensor_type (str_types[i]); + g_assert (prop->output_meta.info[i].type != _NNS_END); + } + + g_strfreev (str_types); + } break; case PROP_CUSTOM: - g_assert (prop->customProperties == NULL && value); + g_assert (prop->custom_properties == NULL && value); /* Once configures, it cannot be changed in runtime */ - prop->customProperties = g_value_dup_string (value); - if (prop->silent == FALSE) - g_printerr ("Custom Option = %s\n", prop->customProperties); + prop->custom_properties = g_value_dup_string (value); + silent_debug ("Custom Option = %s\n", prop->custom_properties); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); @@ -586,57 +500,120 @@ gst_tensor_filter_set_property (GObject * object, guint prop_id, } /** - * @brief @todo fill this in + * @brief Getter for tensor_filter properties. */ static void gst_tensor_filter_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec) { - GstTensor_Filter *filter = GST_TENSOR_FILTER (object); + GstTensorFilter *self; + GstTensorFilterProperties *prop; - debug_print (!filter->prop.silent, "Getting property. for Prop %d.\n", - prop_id); + self = GST_TENSOR_FILTER (object); + prop = &self->prop; + + silent_debug ("Getting property for prop %d.\n", prop_id); switch (prop_id) { case PROP_SILENT: - g_value_set_boolean (value, filter->prop.silent); + g_value_set_boolean (value, self->silent); break; case PROP_FRAMEWORK: - g_value_set_string (value, nnfw_names[filter->prop.nnfw]); + g_value_set_string (value, nnfw_names[prop->nnfw]); break; case PROP_MODEL: - g_value_set_string (value, filter->prop.modelFilename); + g_value_set_string (value, prop->model_file); break; - case PROP_INPUT:{ - GArray *input = - g_array_sized_new (FALSE, FALSE, 4, NNS_TENSOR_RANK_LIMIT); - int i; - for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) - g_array_append_val (input, filter->prop.inputMeta.dims[0][i]); - g_value_take_boxed (value, input); - /* take function hands the object over from here so that we don't need to free it. */ - } + case PROP_INPUT: + if (prop->input_meta.num_tensors > 0) { + GString *dimensions = g_string_new (NULL); + gchar *dim_str; + int i; + + for (i = 0; i < prop->input_meta.num_tensors; i++) { + dim_str = + get_tensor_dimension_string (prop->input_meta.info[i].dimension); + g_string_append (dimensions, dim_str); + + if (i < prop->input_meta.num_tensors - 1) { + g_string_append (dimensions, ","); + } + + g_free (dim_str); + } + + g_value_set_string (value, dimensions->str); + g_string_free (dimensions, TRUE); + } else { + g_value_set_string (value, ""); + } break; - case PROP_OUTPUT:{ - GArray *output = - g_array_sized_new (FALSE, FALSE, 4, NNS_TENSOR_RANK_LIMIT); - int i; - for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) - g_array_append_val (output, filter->prop.outputMeta.dims[0][i]); - g_value_take_boxed (value, output); - /* take function hands the object over from here so that we don't need to free it. */ - } + case PROP_OUTPUT: + if (prop->output_meta.num_tensors > 0) { + GString *dimensions = g_string_new (NULL); + gchar *dim_str; + int i; + + for (i = 0; i < prop->output_meta.num_tensors; i++) { + dim_str = + get_tensor_dimension_string (prop->output_meta.info[i].dimension); + g_string_append (dimensions, dim_str); + + if (i < prop->output_meta.num_tensors - 1) { + g_string_append (dimensions, ","); + } + + g_free (dim_str); + } + + g_value_set_string (value, dimensions->str); + g_string_free (dimensions, TRUE); + } else { + g_value_set_string (value, ""); + } break; case PROP_INPUTTYPE: - g_value_set_string (value, - tensor_element_typename[filter->prop.inputMeta.types[0]]); + if (prop->input_meta.num_tensors > 0) { + GString *types = g_string_new (NULL); + int i; + + for (i = 0; i < prop->input_meta.num_tensors; i++) { + g_string_append (types, + tensor_element_typename[prop->input_meta.info[i].type]); + + if (i < prop->input_meta.num_tensors - 1) { + g_string_append (types, ","); + } + } + + g_value_set_string (value, types->str); + g_string_free (types, TRUE); + } else { + g_value_set_string (value, ""); + } break; case PROP_OUTPUTTYPE: - g_value_set_string (value, - tensor_element_typename[filter->prop.outputMeta.types[0]]); + if (prop->output_meta.num_tensors > 0) { + GString *types = g_string_new (NULL); + int i; + + for (i = 0; i < prop->output_meta.num_tensors; i++) { + g_string_append (types, + tensor_element_typename[prop->output_meta.info[i].type]); + + if (i < prop->output_meta.num_tensors - 1) { + g_string_append (types, ","); + } + } + + g_value_set_string (value, types->str); + g_string_free (types, TRUE); + } else { + g_value_set_string (value, ""); + } break; case PROP_CUSTOM: - g_value_set_string (value, filter->prop.customProperties); + g_value_set_string (value, prop->custom_properties); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); @@ -644,92 +621,45 @@ gst_tensor_filter_get_property (GObject * object, guint prop_id, } } -/****************************************************************** - * GstElement vmethod implementations - */ - -/** - * @brief entry point to initialize the plug-in - * initialize the plug-in itself - * register the element factories and other features - */ -static gboolean -tensor_filter_init (GstPlugin * tensor_filter) -{ - /** - * debug category for fltering log messages - * - * exchange the string 'Template tensor_filter' with your description - */ - GST_DEBUG_CATEGORY_INIT (gst_tensor_filter_debug, "tensor_filter", - 0, "Template tensor_filter"); - - return gst_element_register (tensor_filter, "tensor_filter", GST_RANK_NONE, - GST_TYPE_TENSOR_FILTER); -} - -/** - * PACKAGE: this is usually set by autotools depending on some _INIT macro - * in configure.ac and then written into and defined in config.h, but we can - * just set it ourselves here in case someone doesn't use autotools to - * compile this code. GST_PLUGIN_DEFINE needs PACKAGE to be defined. - */ -#ifndef PACKAGE -#define PACKAGE "tensor_filter" -#endif - /** - * gstreamer looks for this structure to register tensor_filters - * - * exchange the string 'Template tensor_filter' with your tensor_filter description - */ -GST_PLUGIN_DEFINE (GST_VERSION_MAJOR, - GST_VERSION_MINOR, - tensor_filter, - "tensor_filter", - tensor_filter_init, VERSION, "LGPL", "GStreamer", "http://gstreamer.net/"); - -/** - * @brief @todo fill this in + * @brief non-ip transform. required vmethod of GstBaseTransform. */ static GstFlowReturn gst_tensor_filter_transform (GstBaseTransform * trans, GstBuffer * inbuf, GstBuffer * outbuf) { - GstTensor_Filter *filter = GST_TENSOR_FILTER_CAST (trans); - size_t outBufSize; + GstTensorFilter *self; + gsize outBufSize; uint8_t *inptr, *outptr; uint8_t *retoutptr; GstMapInfo inInfo, outInfo; - if (G_UNLIKELY (filter->prop.inputCapNegotiated == FALSE - || filter->prop.outputCapNegotiated == FALSE)) + self = GST_TENSOR_FILTER_CAST (trans); + + if (G_UNLIKELY (!self->configured)) goto unknown_format; - if (G_UNLIKELY (!filter->prop.fw)) + if (G_UNLIKELY (!self->prop.fw)) goto unknown_framework; - if (G_UNLIKELY (!filter->prop.modelFilename)) + if (G_UNLIKELY (!self->prop.model_file)) goto unknown_model; - if (G_UNLIKELY (!filter->prop.fw->invoke_NN)) + if (G_UNLIKELY (!self->prop.fw->invoke_NN)) goto unknown_invoke; /* 0. Check all properties and inbuf size. */ - debug_print (!filter->prop.silent, "Invoking %s with %s model\n", - filter->prop.fw->name, filter->prop.modelFilename); - - g_assert ((filter->prop.inputConfigured & _TFC_ALL) == _TFC_ALL && - (filter->prop.outputConfigured & _TFC_ALL) == _TFC_ALL); + silent_debug ("Invoking %s with %s model\n", self->prop.fw->name, + self->prop.model_file); /* 1. Allocate outbuf if allocate_in_invoke is FALSE */ g_assert (outbuf); - if (filter->prop.fw->allocate_in_invoke == FALSE) { - outBufSize = tensor_element_size[filter->prop.outputMeta.types[0]] * - get_tensor_element_count (filter->prop.outputMeta.dims[0]); + outBufSize = gst_tensor_filter_out_size (self, -1); + + if (self->prop.fw->allocate_in_invoke == FALSE) { if (gst_buffer_get_size (outbuf) < outBufSize) { /** @todo: write a routine to say aloud when this happens */ gst_buffer_set_size (outbuf, outBufSize); } - debug_print (!filter->prop.silent, "outbuf = %lu / expected = %lu\n", + silent_debug ("outbuf = %lu / expected = %lu\n", gst_buffer_get_size (outbuf), outBufSize); g_assert (gst_buffer_get_size (outbuf) >= outBufSize); @@ -739,7 +669,7 @@ gst_tensor_filter_transform (GstBaseTransform * trans, inptr = inInfo.data; outptr = outInfo.data; - gst_tensor_filter_call (filter, retoutptr, invoke_NN, inptr, outptr); + gst_tensor_filter_call (self, retoutptr, invoke_NN, inptr, outptr); g_assert (outptr == retoutptr); gst_buffer_unmap (inbuf, &inInfo); @@ -750,40 +680,36 @@ gst_tensor_filter_transform (GstBaseTransform * trans, g_assert (gst_buffer_get_size (outbuf) == 0); inptr = inInfo.data; - gst_tensor_filter_call (filter, retoutptr, invoke_NN, inptr, NULL); + gst_tensor_filter_call (self, retoutptr, invoke_NN, inptr, NULL); gst_buffer_unmap (inbuf, &inInfo); - /** @todo Performance: cache get_tensor_element_count * tensor_element_size */ - mem = gst_memory_new_wrapped (0, retoutptr, - get_tensor_element_count (filter->prop.outputMeta.dims[0]) * - tensor_element_size[filter->prop.outputMeta.types[0]], - 0, - get_tensor_element_count (filter->prop.outputMeta.dims[0]) * - tensor_element_size[filter->prop.outputMeta.types[0]], NULL, NULL); + mem = + gst_memory_new_wrapped (0, retoutptr, outBufSize, 0, outBufSize, NULL, + NULL); gst_buffer_insert_memory (outbuf, -1, mem); } /* 3. Return result! */ return GST_FLOW_OK; unknown_format: - GST_ELEMENT_ERROR (filter, CORE, NOT_IMPLEMENTED, (NULL), ("unknown format")); + GST_ELEMENT_ERROR (self, CORE, NOT_IMPLEMENTED, (NULL), ("unknown format")); return GST_FLOW_NOT_NEGOTIATED; unknown_framework: - GST_ELEMENT_ERROR (filter, CORE, NOT_IMPLEMENTED, (NULL), + GST_ELEMENT_ERROR (self, CORE, NOT_IMPLEMENTED, (NULL), ("framework not configured")); return GST_FLOW_ERROR; unknown_model: - GST_ELEMENT_ERROR (filter, CORE, NOT_IMPLEMENTED, (NULL), + GST_ELEMENT_ERROR (self, CORE, NOT_IMPLEMENTED, (NULL), ("model filepath not configured")); return GST_FLOW_ERROR; unknown_invoke: - GST_ELEMENT_ERROR (filter, CORE, NOT_IMPLEMENTED, (NULL), + GST_ELEMENT_ERROR (self, CORE, NOT_IMPLEMENTED, (NULL), ("invoke function is not defined")); return GST_FLOW_ERROR; } /** - * @brief @todo fill this in + * @brief in-place transform. required vmethod of GstBaseTransform. */ static GstFlowReturn gst_tensor_filter_transform_ip (GstBaseTransform * trans, GstBuffer * buf) @@ -794,144 +720,200 @@ gst_tensor_filter_transform_ip (GstBaseTransform * trans, GstBuffer * buf) /** @todo 1. Resize buf if output is larger than input */ /** @todo 2. Call the filter-subplugin callback, "invoke" */ /** @todo 3. Return result! */ - g_assert (1 == 0); + g_assert (0); return GST_FLOW_ERROR; } - /** - * @brief process property values, call get/set I/O dim. (internal static function) - * If set-prop configured dimension, verify the dimension with fw callbacks - * Otherwise, configure dimension with fw callbacks. - * - * @param filter "this" pointer - * @param fixate TRUE if we may fixate property values. - * @return 1: OK and all set. 0: Try again later. -1: cannot proceed. fatal ERROR. + * @brief Load tensor info from NN model. + * (both input and output tensor) */ -static int -gst_tensor_filter_property_process (GstTensor_Filter * filter, gboolean fixate) +static void +gst_tensor_filter_load_tensor_info (GstTensorFilter * self) { - GstTensor_Filter_Framework *fw = filter->prop.fw; - GstTensor_Filter_Properties *prop = &filter->prop; - int ret; - GstTensor_TensorsMeta meta; - int i, tensor_idx; - - /* Ensure the subplugin is contacted first before checking the XOR assert */ - if (!prop->fwOpened && fw->open) - fw->open (filter, &filter->privateData); - prop->fwOpened = TRUE; - - if (fw->getInputDimension != NULL) { - gst_tensor_filter_call (filter, ret, getInputDimension, &meta); - if (ret == 0) { - for (tensor_idx = 0; tensor_idx < meta.num_tensors; tensor_idx++) { - if (prop->inputConfigured & _TFC_TYPE) - if (prop->inputMeta.types[tensor_idx] != meta.types[tensor_idx]) { - return -1; - } - if (prop->inputConfigured & _TFC_DIMENSION) - for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) - if (prop->inputMeta.dims[tensor_idx][i] != meta.dims[tensor_idx][i]) { - return -1; - } - if (fixate && !(prop->inputConfigured & _TFC_TYPE)) { - prop->inputMeta.types[tensor_idx] = meta.types[tensor_idx]; - prop->inputConfigured |= _TFC_TYPE; - } - if (fixate && !(prop->inputConfigured & _TFC_DIMENSION)) { - memcpy (prop->inputMeta.dims[tensor_idx], meta.dims[tensor_idx], - sizeof (meta.dims[tensor_idx])); - prop->inputConfigured |= _TFC_DIMENSION; + GstTensorFilterProperties *prop; + int res; + + prop = &self->prop; + + /** + * supposed fixed in-tensor info if getInputDimension is defined. + */ + if (!prop->input_configured) { + if (prop->fw->getInputDimension) { + GstTensorsInfo in_info; + + gst_tensors_info_init (&in_info); + gst_tensor_filter_call (self, res, getInputDimension, &in_info); + + if (res == 0) { + g_assert (in_info.num_tensors > 0); + + /** if set-property called and already has info, verify it! */ + if (prop->input_meta.num_tensors > 0) { + g_assert (gst_tensors_info_is_equal (&prop->input_meta, &in_info)); } + + prop->input_configured = TRUE; + self->in_config.info = prop->input_meta = in_info; } } } - if (fw->getOutputDimension != NULL) { - gst_tensor_filter_call (filter, ret, getOutputDimension, &meta); - if (ret == 0) { - for (tensor_idx = 0; tensor_idx < meta.num_tensors; tensor_idx++) { - if (prop->outputConfigured & _TFC_TYPE) - if (prop->outputMeta.types[tensor_idx] != meta.types[tensor_idx]) { - return -1; - } - if (prop->outputConfigured & _TFC_DIMENSION) - for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) - if (prop->outputMeta.dims[tensor_idx][i] != - meta.dims[tensor_idx][i]) { - return -1; - } - if (fixate && !(prop->outputConfigured & _TFC_TYPE)) { - prop->outputMeta.types[tensor_idx] = meta.types[tensor_idx]; - prop->outputConfigured |= _TFC_TYPE; - } - if (fixate && !(prop->outputConfigured & _TFC_DIMENSION)) { - memcpy (prop->outputMeta.dims[tensor_idx], meta.dims[tensor_idx], - sizeof (meta.dims[tensor_idx])); - prop->outputConfigured |= _TFC_DIMENSION; + /** + * supposed fixed out-tensor info if getOutputDimension is defined. + */ + if (!prop->output_configured) { + if (prop->fw->getOutputDimension) { + GstTensorsInfo out_info; + + gst_tensors_info_init (&out_info); + gst_tensor_filter_call (self, res, getOutputDimension, &out_info); + + if (res == 0) { + g_assert (out_info.num_tensors > 0); + + /** if set-property called and already has info, verify it! */ + if (prop->output_meta.num_tensors > 0) { + g_assert (gst_tensors_info_is_equal (&prop->output_meta, &out_info)); } + + prop->output_configured = TRUE; + self->out_config.info = prop->output_meta = out_info; } } } +} - if (fw->setInputDimension != NULL) { - GstTensor_TensorsMeta *cmpMeta; - /* If filter's inputdimension is not clear, yet, we cannot proceed. try again later */ - if ((prop->inputConfigured & _TFC_ALL) == _TFC_ALL) { - cmpMeta = &meta; - memcpy (meta.dims[0], prop->outputMeta.dims[0], sizeof (meta.dims[0])); - meta.types[0] = prop->outputMeta.types[0]; - } else { - if (fw->getOutputDimension != NULL) { - gst_tensor_filter_call (filter, ret, getInputDimension, &meta); - if (ret != 0) - goto finalize; - cmpMeta = &meta; - } else { - /* Nothing to do here */ - goto finalize; - } - } +/** + * @brief Configure input and output tensor info from incaps. + * @param self "this" pointer + * @param incaps received caps for sink pad + * @return TRUE if fully configured + */ +static gboolean +gst_tensor_filter_configure_tensor (GstTensorFilter * self, + const GstCaps * incaps) +{ + GstTensorFilterProperties *prop; + GstStructure *structure; + GstTensorsConfig in_config, out_config; - gst_tensor_filter_call (filter, ret, setInputDimension, cmpMeta->dims[0], - cmpMeta->types[0], meta.dims[0], &meta.types[0]); - if (ret != 0) - goto finalize; + g_return_val_if_fail (incaps != NULL, FALSE); - if (prop->outputConfigured & _TFC_TYPE) { - if (prop->outputMeta.types[0] != meta.types[0]) { - return -1; + prop = &self->prop; + + /** + * GstTensorFilter has to parse the tensor dimension and type from NN model. + * 1. Call functions getInputDimension and getOutputDimension to get the dimension and type. + * 2. If these functions are not defined, call setInputDimension with parsed info from caps. + * 3. If set-prop configured dimension, verify the dimension with fw callbacks. + */ + gst_tensor_filter_load_tensor_info (self); + + structure = gst_caps_get_structure (incaps, 0); + gst_tensors_config_from_structure (&in_config, structure); + + /** + * Check configuration from caps. + * If true, fully configured tensor info from caps. + */ + if (gst_tensors_config_validate (&in_config)) { + /** if set-property called and already has info, verify it! */ + if (prop->input_meta.num_tensors > 0) { + if (!gst_tensors_info_is_equal (&in_config.info, &prop->input_meta)) { + g_assert (0); + return FALSE; } } - if (prop->outputConfigured & _TFC_DIMENSION) { - for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) { - if (prop->outputMeta.dims[0][i] != meta.dims[0][i]) { - return -1; + + prop->input_configured = TRUE; + self->in_config.info = prop->input_meta = in_config.info; + + /** call setInputDimension if output tensor is not configured */ + if (!prop->output_configured) { + if (prop->fw->setInputDimension) { + GstTensorsInfo out_info; + int res; + + gst_tensors_info_init (&out_info); + gst_tensor_filter_call (self, res, setInputDimension, &in_config.info, + &out_info); + + if (res == 0) { + /** if set-property called and already has info, verify it! */ + if (prop->output_meta.num_tensors > 0) { + if (!gst_tensors_info_is_equal (&prop->output_meta, &out_info)) { + g_assert (0); + return FALSE; + } + } + + prop->output_configured = TRUE; + self->out_config.info = prop->output_meta = out_info; } } - } - if (fixate && !(prop->outputConfigured & _TFC_TYPE)) { - prop->outputMeta.types[0] = meta.types[0]; - prop->outputConfigured |= _TFC_TYPE; + if (!prop->output_configured) { + err_print ("Failed to get output tensor info.\n"); + g_assert (0); + return FALSE; + } } - if (fixate && !(prop->outputConfigured & _TFC_DIMENSION)) { - memcpy (prop->outputMeta.dims[0], meta.dims[0], sizeof (meta.dims[0])); - prop->outputConfigured |= _TFC_DIMENSION; + + /** + * @todo how can we update the framerate? + * GstTensorFilter cannot assure the framerate. + * Simply set the framerate of out-tensor from incaps. + */ + out_config.info = prop->output_meta; + out_config.rate_n = in_config.rate_n; + out_config.rate_d = in_config.rate_d; + + if (self->configured) { + /** already configured, compare to old. */ + g_assert (gst_tensors_config_is_equal (&self->in_config, &in_config)); + g_assert (gst_tensors_config_is_equal (&self->out_config, &out_config)); + } else { + self->in_config = in_config; + self->out_config = out_config; + self->configured = TRUE; } } -finalize: - if ((prop->inputConfigured & _TFC_ALL) == _TFC_ALL && - (prop->outputConfigured & _TFC_ALL) == _TFC_ALL) - return 1; - else - return 0; - - return -1; /* Code cannot reach here */ + return self->configured; } +/** + * @brief Get caps for given config. + * @param self "this" pointer + * @param config tensor config info + */ +static GstCaps * +gst_tensor_filter_caps_from_config (GstTensorFilter * self, + GstTensorsConfig * config) +{ + GstCaps *caps; + + g_return_val_if_fail (config != NULL, NULL); + + if (config->info.num_tensors < 2) { + GstTensorConfig c; + + /** + * supposed other/tensor if the number of tensor is less than 2. + */ + c.info = config->info.info[0]; + c.rate_n = config->rate_n; + c.rate_d = config->rate_d; + + caps = gst_tensor_caps_from_config (&c); + } else { + caps = gst_tensors_caps_from_config (config); + } + + return caps; +} /** * @brief configure tensor-srcpad cap from "proposed" cap. @@ -947,257 +929,159 @@ static GstCaps * gst_tensor_filter_transform_caps (GstBaseTransform * trans, GstPadDirection direction, GstCaps * caps, GstCaps * filter) { - GstTensor_Filter *obj = GST_TENSOR_FILTER_CAST (trans); - int check = gst_tensor_filter_property_process (obj, FALSE); + GstTensorFilter *self; + GstCaps *result; - g_assert (check >= 0); + self = GST_TENSOR_FILTER_CAST (trans); - if (direction == GST_PAD_SINK) { - /* caps: sink pad. get src pad info */ - obj->prop.outputCapNegotiated = TRUE; + silent_debug ("Direction = %d\n", direction); + silent_debug_caps (caps, "from"); + silent_debug_caps (filter, "filter"); - /** @todo 1. Check caps w/ getInputDimension && saved input dimension */ - /** @todo 2. Check returning-caps w/ getOutputDimension && saved output dimension */ + /** + * GstTensorFilter has to parse the tensor dimension and type from NN model. + * In this stage, in-caps is not fixed yet. + * So, just call getInputDimension and getOutputDimension to get the tensor info. + * If these functions are not defined, we have to call setInputDimension in set_caps(), and then it will fully configure the tensor info. + */ + gst_tensor_filter_load_tensor_info (self); - return gst_tensor_filter_fix_caps (obj, TRUE, caps); + if (direction == GST_PAD_SINK) { + /* caps: sink pad. get src pad info */ + if (self->prop.output_configured) { + /** fixed tensor info */ + result = gst_tensor_filter_caps_from_config (self, &self->out_config); + } else { + /** we don't know the exact tensor info yet */ + result = gst_caps_from_string (CAPS_STRING); + } } else { /* caps: src pad. get sink pad info */ - obj->prop.inputCapNegotiated = TRUE; - - /** @todo 1. Check caps w/ getOutputDimension && saved output dimension */ - /** @todo 2. Check returning-caps w/ getInputDimension && saved input dimension */ - return gst_tensor_filter_fix_caps (obj, FALSE, caps); + if (self->prop.input_configured) { + /** fixed tensor info */ + result = gst_tensor_filter_caps_from_config (self, &self->in_config); + } else { + /** we don't know the exact tensor info yet */ + result = gst_caps_from_string (CAPS_STRING); + } } - /* Cannot reach here. */ - return NULL; -} - -/** - * @brief Try to generate dim/type from caps (internal static function) - * @return _TFC_TYPE is on if type determined. _TFC_DIMENSION is on if dim determined - * @param filter "this" pointer - * @param caps the caps to be analyzed (padcap) - * @param[out] dim tensor dimension derived from caps - * @param[out] type tensor type derived from caps - */ -static GstTensor_Filter_CheckStatus -gst_tensor_filter_generate_dim_from_cap (GstCaps * caps, const tensor_dim dim, - tensor_type * type) -{ - unsigned int i, capsize; - const GstStructure *str; - GstTensor_Filter_CheckStatus ret = _TFC_INIT; - const gchar *strval; - - if (!caps) { - return _TFC_INIT; - } + if (filter) { + GstCaps *intersection; - capsize = gst_caps_get_size (caps); + intersection = + gst_caps_intersect_full (filter, result, GST_CAPS_INTERSECT_FIRST); - for (i = 0; i < capsize; i++) { - str = gst_caps_get_structure (caps, i); - if (gst_structure_get_int (str, "dim1", (int *) &dim[0]) && - gst_structure_get_int (str, "dim2", (int *) &dim[1]) && - gst_structure_get_int (str, "dim3", (int *) &dim[2]) && - gst_structure_get_int (str, "dim4", (int *) &dim[3])) { - ret |= _TFC_DIMENSION; - } - strval = gst_structure_get_string (str, "type"); - if (strval) { - *type = get_tensor_type (strval); - g_assert (*type != _NNS_END); - ret |= _TFC_TYPE; - } + gst_caps_unref (result); + result = intersection; } - return ret; -} - -/** - * @brief Read pad-cap and return dimension/type info - * @return _TFC_TYPE is on if type determined. _TFC_DIMENSION is on if dim determined - * @param[in] caps The pad cap - * @param[in] input TRUE if input. FALSE if output. - * @param[out] dim Tensor dimension - @ @param[out[ type Tensor element type - */ -static void -gst_tensor_caps_to_dimension (GstCaps * caps, gboolean input, - GstTensor_Filter_Properties * prop) -{ - if (input) { - prop->inputConfigured |= - gst_tensor_filter_generate_dim_from_cap (caps, prop->inputMeta.dims[0], - &prop->inputMeta.types[0]); - } else { - prop->outputConfigured |= - gst_tensor_filter_generate_dim_from_cap (caps, prop->outputMeta.dims[0], - &prop->outputMeta.types[0]); - } + silent_debug_caps (result, "to"); + return result; } /** - * @brief @todo fill this in + * @brief fixate caps. required vmethod of GstBaseTransform. */ static GstCaps * gst_tensor_filter_fixate_caps (GstBaseTransform * trans, GstPadDirection direction, GstCaps * caps, GstCaps * othercaps) { - GstCaps *supposed = - gst_tensor_filter_transform_caps (trans, direction, caps, NULL); - GstCaps *result = gst_caps_intersect (othercaps, supposed); - GstTensor_Filter *obj = GST_TENSOR_FILTER_CAST (trans); - GstTensor_Filter_Framework *fw = obj->prop.fw; - GstCaps *sinkpadcap, *srcpadcap; - int check = gst_tensor_filter_property_process (obj, TRUE); - GstTensor_TensorsMeta meta; + GstTensorFilter *self; + GstTensorsConfig in_config, out_config; + GstStructure *structure; + GstCaps *supposed; + GstCaps *result; - gst_caps_unref (supposed); - g_assert (check >= 0); + self = GST_TENSOR_FILTER_CAST (trans); - g_assert (!gst_caps_is_empty (result)); - gst_caps_unref (othercaps); + silent_debug ("fixate_caps, direction = %d\n", direction); + silent_debug_caps (caps, "caps"); + silent_debug_caps (othercaps, "othercaps"); - result = gst_caps_make_writable (result); - result = gst_caps_fixate (result); + gst_tensors_config_init (&in_config); + gst_tensors_config_init (&out_config); - if (direction == GST_PAD_SINK) { - if (gst_caps_is_subset (caps, result)) { - gst_caps_replace (&result, caps); - } - obj->prop.inputCapNegotiated = TRUE; - sinkpadcap = caps; - srcpadcap = result; + gst_tensor_filter_load_tensor_info (self); + + /** + * Get input tensor info from caps. + * @todo Do we need to verify configured info from caps? + * If getInputDimension is defined and gets exact tensor info from NN model, we can use it. + */ + structure = gst_caps_get_structure (caps, 0); + gst_tensors_config_from_structure (&in_config, structure); + + /** output tensor info */ + if (self->prop.output_configured) { + /** fixed tensor info */ + out_config.info = self->prop.output_meta; } else { - obj->prop.outputCapNegotiated = TRUE; - sinkpadcap = result; - srcpadcap = caps; - } + int res = -1; - if ((obj->prop.inputConfigured & _TFC_ALL) == _TFC_ALL && - (obj->prop.outputConfigured & _TFC_ALL) == _TFC_ALL) - return result; - - debug_print (!obj->prop.silent, "Nego (%s) / i %d / o %d\n", - (direction == GST_PAD_SINK) ? "sink" : "src", - obj->prop.inputCapNegotiated, obj->prop.outputCapNegotiated); - - /* Before moving on, use if getInputDim/getOutputDim is available. */ - if (fw->getInputDimension - && (obj->prop.inputConfigured & _TFC_ALL) == _TFC_ALL) { - int ret = 0; - int tensor_idx; - gst_tensor_filter_call (obj, ret, getInputDimension, &meta); - for (tensor_idx = 0; tensor_idx < meta.num_tensors; tensor_idx++) { - memcpy (obj->prop.inputMeta.dims[tensor_idx], meta.dims[tensor_idx], - sizeof (meta.dims[tensor_idx])); - obj->prop.inputMeta.types[tensor_idx] = meta.types[tensor_idx]; + /** call setInputDimension with given input tensor */ + gst_tensor_filter_call (self, res, setInputDimension, &in_config.info, + &out_config.info); - } - if (ret == 0) { - obj->prop.inputConfigured |= _TFC_ALL; + if (res != 0) { + silent_debug ("Cannot get the output tensor info."); } } - if (fw->getOutputDimension - && (obj->prop.outputConfigured & _TFC_ALL) == _TFC_ALL) { - int ret = 0; - int tensor_idx; - gst_tensor_filter_call (obj, ret, getOutputDimension, &meta); - for (tensor_idx = 0; tensor_idx < meta.num_tensors; tensor_idx++) { - memcpy (obj->prop.outputMeta.dims[tensor_idx], meta.dims[tensor_idx], - sizeof (meta.dims[tensor_idx])); - obj->prop.outputMeta.types[tensor_idx] = meta.types[tensor_idx]; - } - if (ret == 0) { - obj->prop.outputConfigured |= _TFC_ALL; - } - } - if ((obj->prop.inputConfigured & _TFC_ALL) == _TFC_ALL && - (obj->prop.outputConfigured & _TFC_ALL) == _TFC_ALL) { - return result; - } - gst_tensor_caps_to_dimension (sinkpadcap, TRUE, &obj->prop); - gst_tensor_caps_to_dimension (srcpadcap, FALSE, &obj->prop); - - if ((obj->prop.inputConfigured & _TFC_ALL) == _TFC_ALL && - (obj->prop.outputConfigured & _TFC_ALL) == _TFC_ALL) - return result; - - if ((obj->prop.inputConfigured & _TFC_ALL) == _TFC_ALL) { - if (fw->setInputDimension) { - int ret = 0; - gst_tensor_filter_call (obj, ret, setInputDimension, - obj->prop.inputMeta.dims[0], obj->prop.inputMeta.types[0], - obj->prop.outputMeta.dims[0], &obj->prop.outputMeta.types[0]); - obj->prop.outputConfigured |= _TFC_ALL; - g_assert (ret == 0); - return result; - } - } + out_config.rate_n = in_config.rate_n; + out_config.rate_d = in_config.rate_d; - /** - * @todo ARCH-Decision required; are we going to (and do we need to) - * support setOutputDimention (and get InputDim accordingly?) - * - * If not, we have done with it and emit error here if we still don't have - * capabilities fixed. - * - * In this case, result should be re-calculated because - * gst_tensor_filter_transform_caps () cannot do reverse transform. - */ + supposed = gst_tensor_filter_caps_from_config (self, &out_config); - if (!obj->prop.silent) { - gchar *str = gst_caps_to_string (caps); - debug_print (TRUE, "Caps(%s) %s\n", - (direction == GST_PAD_SINK) ? "input/sink" : "output/src", str); - g_free (str); - str = gst_caps_to_string (result); - debug_print (TRUE, "Caps(%s) %s\n", - (direction == GST_PAD_SINK) ? "Op-input/sink" : "Op-output/src", str); - g_free (str); - } + result = gst_caps_intersect (othercaps, supposed); + gst_caps_unref (supposed); - g_assert (0); /* Not Supported (configure input from output dimension) */ + result = gst_caps_make_writable (result); + result = gst_caps_fixate (result); + + silent_debug_caps (result, "result"); return result; } /** - * @brief @todo fill this in + * @brief set caps. required vmethod of GstBaseTransform. */ static gboolean gst_tensor_filter_set_caps (GstBaseTransform * trans, GstCaps * incaps, GstCaps * outcaps) { - GstTensor_Filter *filter = GST_TENSOR_FILTER_CAST (trans); - int check = gst_tensor_filter_property_process (filter, TRUE); - tensor_dim dim; - tensor_type type; - gboolean result; + GstTensorFilter *self; + GstStructure *structure; + GstTensorsConfig config; - g_assert (check >= 0); + self = GST_TENSOR_FILTER_CAST (trans); - result = gst_tensor_filter_generate_dim_from_cap (incaps, dim, &type); - /** @todo Configure filter-dim from caps if filter-dim is not configured, yet */ - if ((filter->prop.inputConfigured & _TFC_ALL) != _TFC_ALL) { - /* we may set if result == TRUE */ - g_assert (FALSE); /* NYI */ + silent_debug_caps (incaps, "incaps"); + silent_debug_caps (outcaps, "outcaps"); - g_assert (result == TRUE); + if (!gst_tensor_filter_configure_tensor (self, incaps)) { + silent_debug ("Failed to configure tensor."); + return FALSE; } - /** @todo Check consistencyu between dim/type with filter->input* */ - result = gst_tensor_filter_generate_dim_from_cap (outcaps, dim, &type); - /** @todo Configure filter-dim from caps if filter-dim is not configured, yet */ - if ((filter->prop.outputConfigured & _TFC_ALL) != _TFC_ALL) { - /* we may set if result == TRUE */ - g_assert (FALSE); /* NYI */ + if (!gst_tensors_config_validate (&self->in_config)) { + silent_debug ("Failed to validate input tensor."); + return FALSE; + } - g_assert (result == TRUE); + if (!gst_tensors_config_validate (&self->out_config)) { + silent_debug ("Failed to validate output tensor."); + return FALSE; + } + + /** compare output tensor */ + structure = gst_caps_get_structure (outcaps, 0); + gst_tensors_config_from_structure (&config, structure); + + if (!gst_tensors_config_is_equal (&self->out_config, &config)) { + silent_debug ("Invalid outcaps."); + return FALSE; } - /** @todo Check consistencyu between dim/type with filter->output* */ return TRUE; } @@ -1213,35 +1097,19 @@ gst_tensor_filter_transform_size (GstBaseTransform * trans, GstPadDirection direction, GstCaps * caps, gsize size, GstCaps * othercaps, gsize * othersize) { - GstTensor_Filter *filter = GST_TENSOR_FILTER_CAST (trans); - const GstCaps *srccap = (direction == GST_PAD_SINK) ? othercaps : caps; - tensor_dim dim; - tensor_type type; - GstTensor_Filter_CheckStatus ret = - get_tensor_from_padcap (srccap, dim, &type, NULL, NULL); - - if (filter->prop.fw->allocate_in_invoke == TRUE) { - *othersize = 0; /* Do not allocate outbuf. invoke_NN will allocate! */ - return TRUE; - } + GstTensorFilter *self; - g_assert ((ret & _TFC_ALL) == _TFC_ALL); - - if (!filter->prop.silent) { - debug_print (TRUE, "transform_size, direction = %s\n", - (direction == GST_PAD_SINK) ? "sink" : "src"); - GstStructure *structure = gst_caps_get_structure (caps, 0); - gchar *str = gst_structure_to_string (structure); - debug_print (TRUE, "cap = %s\n", str); - g_free (str); - structure = gst_caps_get_structure (othercaps, 0); - str = gst_structure_to_string (structure); - debug_print (TRUE, "othercap = %s\n", str); - g_free (str); - } + self = GST_TENSOR_FILTER_CAST (trans); - *othersize = get_tensor_element_count (dim) * tensor_element_size[type]; + g_assert (self->configured); + if (self->prop.fw->allocate_in_invoke == TRUE) { + /* Do not allocate outbuf. invoke_NN will allocate! */ + *othersize = 0; + return TRUE; + } + + *othersize = gst_tensor_filter_out_size (self, -1); return TRUE; } @@ -1254,15 +1122,16 @@ gst_tensor_filter_transform_size (GstBaseTransform * trans, static gboolean gst_tensor_filter_start (GstBaseTransform * trans) { - GstTensor_Filter *filter = GST_TENSOR_FILTER_CAST (trans); - GstTensor_Filter_Framework *fw = filter->prop.fw; - GstTensor_Filter_Properties *prop = &filter->prop; + GstTensorFilter *self; + GstTensorFilterProperties *prop; - if (!prop->fwOpened && fw->open) - fw->open (filter, &filter->privateData); - prop->fwOpened = TRUE; + self = GST_TENSOR_FILTER_CAST (trans); + prop = &self->prop; - g_assert (prop->fwClosed == FALSE); + if (!prop->fw_opened && prop->fw->open) { + prop->fw->open (self, &self->privateData); + } + prop->fw_opened = TRUE; return TRUE; } @@ -1275,25 +1144,64 @@ gst_tensor_filter_start (GstBaseTransform * trans) static gboolean gst_tensor_filter_stop (GstBaseTransform * trans) { - GstTensor_Filter *filter = GST_TENSOR_FILTER_CAST (trans); - GstTensor_Filter_Framework *fw = filter->prop.fw; - GstTensor_Filter_Properties *prop = &filter->prop; + GstTensorFilter *self; + GstTensorFilterProperties *prop; - g_assert (prop->fwOpened == TRUE); + self = GST_TENSOR_FILTER_CAST (trans); + prop = &self->prop; - if (fw->close) - fw->close (filter, &filter->privateData); - prop->fwClosed = TRUE; + gst_tensor_filter_close (self); - if (prop->modelFilename) { - g_free ((void *) prop->modelFilename); - prop->modelFilename = NULL; + if (prop->model_file) { + g_free ((void *) prop->model_file); + prop->model_file = NULL; } - if (prop->customProperties) { - g_free ((void *) prop->customProperties); - prop->customProperties = NULL; + if (prop->custom_properties) { + g_free ((void *) prop->custom_properties); + prop->custom_properties = NULL; } return TRUE; } + +/** + * @brief entry point to initialize the plug-in + * initialize the plug-in itself + * register the element factories and other features + */ +static gboolean +gst_tensor_filter_plugin_init (GstPlugin * plugin) +{ + /** + * debug category for filtering log messages + */ + GST_DEBUG_CATEGORY_INIT (gst_tensor_filter_debug, "tensor_filter", + 0, "tensor_filter element"); + + return gst_element_register (plugin, "tensor_filter", GST_RANK_NONE, + GST_TYPE_TENSOR_FILTER); +} + +/** + * @brief Definition for identifying tensor_filter plugin. + * + * PACKAGE: this is usually set by autotools depending on some _INIT macro + * in configure.ac and then written into and defined in config.h, but we can + * just set it ourselves here in case someone doesn't use autotools to + * compile this code. GST_PLUGIN_DEFINE needs PACKAGE to be defined. + */ +#ifndef PACKAGE +#define PACKAGE "tensor_filter" +#endif + +/** + * @brief Macro to define the entry point of the plugin. + * gstreamer looks for this structure to register tensor_filter. + */ +GST_PLUGIN_DEFINE (GST_VERSION_MAJOR, + GST_VERSION_MINOR, + tensor_filter, + "GStreamer plugin to use general neural network frameworks as filters", + gst_tensor_filter_plugin_init, VERSION, "LGPL", "GStreamer", + "http://gstreamer.net/"); diff --git a/gst/tensor_filter/tensor_filter.h b/gst/tensor_filter/tensor_filter.h index 2c00270..358b6a5 100644 --- a/gst/tensor_filter/tensor_filter.h +++ b/gst/tensor_filter/tensor_filter.h @@ -36,52 +36,58 @@ #include G_BEGIN_DECLS -/* #defines don't like whitespacey bits */ + #define GST_TYPE_TENSOR_FILTER \ (gst_tensor_filter_get_type()) #define GST_TENSOR_FILTER(obj) \ - (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_TENSOR_FILTER,GstTensor_Filter)) + (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_TENSOR_FILTER,GstTensorFilter)) #define GST_TENSOR_FILTER_CLASS(klass) \ - (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_TENSOR_FILTER,GstTensor_FilterClass)) + (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_TENSOR_FILTER,GstTensorFilterClass)) #define GST_IS_TENSOR_FILTER(obj) \ (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_TENSOR_FILTER)) #define GST_IS_TENSOR_FILTER_CLASS(klass) \ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_TENSOR_FILTER)) -#define GST_TENSOR_FILTER_CAST(obj) ((GstTensor_Filter *)(obj)) -typedef struct _GstTensor_Filter GstTensor_Filter; +#define GST_TENSOR_FILTER_CAST(obj) ((GstTensorFilter *)(obj)) -typedef struct _GstTensor_FilterClass GstTensor_FilterClass; +typedef struct _GstTensorFilter GstTensorFilter; +typedef struct _GstTensorFilterClass GstTensorFilterClass; extern const char *nnfw_names[]; /** * @brief Internal data structure for tensor_filter instances. */ -struct _GstTensor_Filter +struct _GstTensorFilter { GstBaseTransform element; /**< This is the parent object */ void *privateData; /**< NNFW plugin's private data is stored here */ + GstTensorFilterProperties prop; /**< NNFW plugin's properties */ - GstTensor_Filter_Properties prop; + /** internal properties for tensor-filter */ + int silent; /**< Verbose mode if FALSE. int instead of gboolean for non-glib custom plugins */ + gboolean configured; /**< True if already successfully configured tensor metadata */ + GstTensorsConfig in_config; /**< input tensor info */ + GstTensorsConfig out_config; /**< output tensor info */ }; -/** @brief Location of GstTensor_Filter from privateData - * @param p the "privateData" pointer of GstTensor_Filter - * @return the pointer to GstTensor_Filter containing p as privateData +/** + * @brief Location of GstTensorFilter from privateData + * @param p the "privateData" pointer of GstTensorFilter + * @return the pointer to GstTensorFilter containing p as privateData */ -#define GstTensor_Filter_of_privateData(p) ({ \ +#define GstTensorFilter_of_privateData(p) ({ \ const void **__mptr = (const void **)(p); \ - (GstTensor_Filter *)( (char *)__mptr - offsetof(GstTensor_Filter, privateData) );}) + (GstTensorFilter *)( (char *)__mptr - offsetof(GstTensorFilter, privateData) );}) /** - * @brief GstTensor_FilterClass inherits GstBaseTransformClass. + * @brief GstTensorFilterClass inherits GstBaseTransformClass. * * Referring another child (sibiling), GstVideoFilter (abstract class) and * its child (concrete class) GstVideoConverter. - * Note that GstTensor_FilterClass is a concrete class; thus we need to look at both. + * Note that GstTensorFilterClass is a concrete class; thus we need to look at both. */ -struct _GstTensor_FilterClass +struct _GstTensorFilterClass { GstBaseTransformClass parent_class; /**< Inherits GstBaseTransformClass */ }; @@ -98,13 +104,13 @@ GType gst_tensor_filter_get_type (void); * filter Filter properties. Read Only * private_data Subplugin's private data. Set this (*private_data = XXX) if you want to change filter->private_data */ -struct _GstTensor_Filter_Framework +struct _GstTensorFilterFramework { gchar *name; /**< Name of the neural network framework, searchable by FRAMEWORK property */ gboolean allow_in_place; /**< TRUE if InPlace transfer of input-to-output is allowed. Not supported in main, yet */ gboolean allocate_in_invoke; /**< TRUE if invoke_NN is going to allocate outputptr by itself and return the address via outputptr. Do not change this value after cap negotiation is complete (or the stream has been started). */ - uint8_t *(*invoke_NN) (const GstTensor_Filter * filter, void **private_data, + uint8_t *(*invoke_NN) (const GstTensorFilter * filter, void **private_data, const uint8_t * inputptr, uint8_t * outputptr); /**< Mandatory callback. Invoke the given network model. * @@ -115,34 +121,33 @@ struct _GstTensor_Filter_Framework * @return outputptr if allocate_in_invoke = 00 if OK. non-zero if error. */ - int (*getInputDimension) (const GstTensor_Filter * filter, - void **private_data, GstTensor_TensorsMeta * meta); + int (*getInputDimension) (const GstTensorFilter * filter, + void **private_data, GstTensorsInfo * info); /**< Optional. Set NULL if not supported. Get dimension of input tensor * If getInputDimension is NULL, setInputDimension must be defined. * If getInputDimension is defined, it is recommended to define getOutputDimension * * @param[in] filter "this" pointer. Use this to read property values * @param[in/out] private_data A subplugin may save its internal private data here. The subplugin is responsible for alloc/free of this pointer. - * @param[out] inputDimension dimension of input tensor (return value) - * @param[out] type type of input tensor element (return value) + * @param[out] info structure of tensor info (return value) * @return the size of input tensors */ - int (*getOutputDimension) (const GstTensor_Filter * filter, - void **private_data, GstTensor_TensorsMeta * meta); + + int (*getOutputDimension) (const GstTensorFilter * filter, + void **private_data, GstTensorsInfo * info); /**< Optional. Set NULL if not supported. Get dimension of output tensor * If getInputDimension is NULL, setInputDimension must be defined. * If getInputDimension is defined, it is recommended to define getOutputDimension * * @param[in] filter "this" pointer. Use this to read property values * @param[in/out] private_data A subplugin may save its internal private data here. The subplugin is responsible for alloc/free of this pointer. - * @param[out] outputDimension dimension of output tensor (return value) - * @param[out] type type of output tensor element (return value) + * @param[out] info structure of tensor info (return value) * @return the size of output tensors */ - int (*setInputDimension) (const GstTensor_Filter * filter, - void **private_data, const tensor_dim inputDimension, - const tensor_type inputType, tensor_dim outputDimension, - tensor_type * outputType); + + int (*setInputDimension) (const GstTensorFilter * filter, + void **private_data, const GstTensorsInfo * in_info, + GstTensorsInfo * out_info); /**< Optional. Set Null if not supported. Tensor_filter::main will * configure input dimension from pad-cap in run-time for the sub-plugin. * Then, the sub-plugin is required to return corresponding output dimension @@ -154,20 +159,19 @@ struct _GstTensor_Filter_Framework * * @param[in] filter "this" pointer. Use this to read property values * @param[in/out] private_data A subplugin may save its internal private data here. The subplugin is responsible for alloc/free of this pointer. - * @param[in] inputDimension dimension of input tensor - * @param[in] inputType type of input tensor element - * @param[out] outputDimension dimension of output tensor (return value) - * @param[out] outputType type of output tensor element (return value) + * @param[in] in_info structure of input tensor info + * @param[out] out_info structure of output tensor info (return value) * @return 0 if OK. non-zero if error. */ - void (*open) (const GstTensor_Filter * filter, void **private_data); + void (*open) (const GstTensorFilter * filter, void **private_data); /**< Optional. tensor_filter.c will call this before any of other callbacks and will call once before calling close * * @param[in] filter "this" pointer. Use this to read property values * @param[in/out] private_data A subplugin may save its internal private data here. The subplugin is responsible for alloc/free of this pointer. Normally, open() allocates memory for private_data. */ - void (*close) (const GstTensor_Filter * filter, void **private_data); + + void (*close) (const GstTensorFilter * filter, void **private_data); /**< Optional. tensor_filter.c will not call other callbacks after calling close. Free-ing private_data is this function's responsibility. Set NULL after that. * * @param[in] filter "this" pointer. Use this to read property values @@ -175,11 +179,12 @@ struct _GstTensor_Filter_Framework */ }; -extern GstTensor_Filter_Framework NNS_support_tensorflow_lite; -extern GstTensor_Filter_Framework NNS_support_tensorflow; -extern GstTensor_Filter_Framework NNS_support_custom; +extern GstTensorFilterFramework NNS_support_tensorflow_lite; +extern GstTensorFilterFramework NNS_support_tensorflow; +extern GstTensorFilterFramework NNS_support_custom; -extern GstTensor_Filter_Framework *tensor_filter_supported[]; +extern GstTensorFilterFramework *tensor_filter_supported[]; G_END_DECLS + #endif /* __GST_TENSOR_FILTER_H__ */ diff --git a/gst/tensor_filter/tensor_filter_custom.c b/gst/tensor_filter/tensor_filter_custom.c index 7167f5c..0afff28 100644 --- a/gst/tensor_filter/tensor_filter_custom.c +++ b/gst/tensor_filter/tensor_filter_custom.c @@ -23,7 +23,7 @@ * @bug No known bugs except for NYI items * * This is the per-NN-framework plugin (custom) for tensor_filter. - * Fill in "GstTensor_Filter_Framework" for tensor_filter.h/c + * Fill in "GstTensorFilterFramework" for tensor_filter.h/c * */ @@ -37,7 +37,7 @@ */ struct _internal_data { - GstTensor_Filter *parent; + GstTensorFilter *parent; void *handle; NNStreamer_custom_class *methods; @@ -51,23 +51,23 @@ typedef struct _internal_data internal_data; * @return 0 if successfully loaded. 1 if skipped (already loaded). -1 if error */ static int -custom_loadlib (const GstTensor_Filter * filter, void **private_data) +custom_loadlib (const GstTensorFilter * filter, void **private_data) { internal_data *ptr; char *dlsym_error; if (filter->privateData != NULL) { - /** @todo : Check the integrity of filter->data and filter->modelFilename, nnfw */ + /** @todo : Check the integrity of filter->data and filter->model_file, nnfw */ return 1; } ptr = g_new0 (internal_data, 1); /* Fill Zero! */ *private_data = ptr; g_assert (*private_data == filter->privateData); - ptr->parent = GstTensor_Filter_of_privateData (private_data); + ptr->parent = GstTensorFilter_of_privateData (private_data); /* Load .so if this is the first time for this instance. */ - ptr->handle = dlopen (filter->prop.modelFilename, RTLD_NOW); + ptr->handle = dlopen (filter->prop.model_file, RTLD_NOW); if (!ptr->handle) { g_free (ptr); *private_data = NULL; @@ -98,10 +98,10 @@ custom_loadlib (const GstTensor_Filter * filter, void **private_data) } /** - * @brief The open callback for GstTensor_Filter_Framework. Called before anything else + * @brief The open callback for GstTensorFilterFramework. Called before anything else */ static void -custom_open (const GstTensor_Filter * filter, void **private_data) +custom_open (const GstTensorFilter * filter, void **private_data) { int retval = custom_loadlib (filter, private_data); internal_data *ptr; @@ -116,13 +116,13 @@ custom_open (const GstTensor_Filter * filter, void **private_data) } /** - * @brief The mandatory callback for GstTensor_Filter_Framework + * @brief The mandatory callback for GstTensorFilterFramework * @param filter The parent object * @param[in] inptr The input tensor * @param[out] outptr The output tensor */ static uint8_t * -custom_invoke (const GstTensor_Filter * filter, void **private_data, +custom_invoke (const GstTensorFilter * filter, void **private_data, const uint8_t * inptr, uint8_t * outptr) { int retval = custom_loadlib (filter, private_data); @@ -145,8 +145,8 @@ custom_invoke (const GstTensor_Filter * filter, void **private_data, uint8_t *retptr = ptr->methods->allocate_invoke (ptr->customFW_private_data, &(filter->prop), inptr, &size); g_assert (size == - (get_tensor_element_count (filter->prop.outputMeta.dims[0]) * - tensor_element_size[filter->prop.outputMeta.types[0]])); + (get_tensor_element_count (filter->prop.output_meta.info[0].dimension) * + tensor_element_size[filter->prop.output_meta.info[0].type])); return retptr; } else { return NULL; @@ -154,11 +154,11 @@ custom_invoke (const GstTensor_Filter * filter, void **private_data, } /** - * @brief The optional callback for GstTensor_Filter_Framework + * @brief The optional callback for GstTensorFilterFramework */ static int -custom_getInputDim (const GstTensor_Filter * filter, void **private_data, - GstTensor_TensorsMeta * meta) +custom_getInputDim (const GstTensorFilter * filter, void **private_data, + GstTensorsInfo * info) { int retval = custom_loadlib (filter, private_data); internal_data *ptr; @@ -172,15 +172,15 @@ custom_getInputDim (const GstTensor_Filter * filter, void **private_data, } return ptr->methods->getInputDim (ptr->customFW_private_data, &(filter->prop), - meta); + info); } /** - * @brief The optional callback for GstTensor_Filter_Framework + * @brief The optional callback for GstTensorFilterFramework */ static int -custom_getOutputDim (const GstTensor_Filter * filter, void **private_data, - GstTensor_TensorsMeta * meta) +custom_getOutputDim (const GstTensorFilter * filter, void **private_data, + GstTensorsInfo * info) { int retval = custom_loadlib (filter, private_data); internal_data *ptr; @@ -194,16 +194,15 @@ custom_getOutputDim (const GstTensor_Filter * filter, void **private_data, } return ptr->methods->getOutputDim (ptr->customFW_private_data, - &(filter->prop), meta); + &(filter->prop), info); } /** - * @brief The set-input-dim callback for GstTensor_Filter_Framework + * @brief The set-input-dim callback for GstTensorFilterFramework */ static int -custom_setInputDim (const GstTensor_Filter * filter, void **private_data, - const tensor_dim iDimension, const tensor_type iType, - tensor_dim oDimension, tensor_type * oType) +custom_setInputDim (const GstTensorFilter * filter, void **private_data, + const GstTensorsInfo * in_info, GstTensorsInfo * out_info) { int retval = custom_loadlib (filter, private_data); internal_data *ptr; @@ -216,14 +215,14 @@ custom_setInputDim (const GstTensor_Filter * filter, void **private_data, return -1; return ptr->methods->setInputDim (ptr->customFW_private_data, - &(filter->prop), iDimension, iType, oDimension, oType); + &(filter->prop), in_info, out_info); } /** * @brief Free privateData and move on. */ static void -custom_close (const GstTensor_Filter * filter, void **private_data) +custom_close (const GstTensorFilter * filter, void **private_data) { internal_data *ptr = *private_data; @@ -233,7 +232,7 @@ custom_close (const GstTensor_Filter * filter, void **private_data) g_assert (filter->privateData == NULL); } -GstTensor_Filter_Framework NNS_support_custom = { +GstTensorFilterFramework NNS_support_custom = { .name = "custom", .allow_in_place = FALSE, /* custom cannot support in-place (outptr == inptr). */ .allocate_in_invoke = FALSE, /* Let tensor_flow allocate output buffers */ diff --git a/gst/tensor_filter/tensor_filter_tensorflow.c b/gst/tensor_filter/tensor_filter_tensorflow.c index 46a6dea..77dbd08 100644 --- a/gst/tensor_filter/tensor_filter_tensorflow.c +++ b/gst/tensor_filter/tensor_filter_tensorflow.c @@ -23,7 +23,7 @@ * @bug No known bugs except for NYI items * * This is the per-NN-framework plugin (tensorflow) for tensor_filter. - * Fill in "GstTensor_Filter_Framework" for tensor_filter.h/c + * Fill in "GstTensorFilterFramework" for tensor_filter.h/c * */ @@ -47,16 +47,16 @@ typedef struct _Tf_data tf_data; * @return 0 if successfully loaded. 1 if skipped (already loaded). -1 if error */ static int -tf_loadModelFile (const GstTensor_Filter * filter, void **private_data) +tf_loadModelFile (const GstTensorFilter * filter, void **private_data) { tf_data *tf; if (filter->privateData != NULL) { - /** @todo : Check the integrity of filter->data and filter->modelFilename, nnfw */ + /** @todo : Check the integrity of filter->data and filter->model_file, nnfw */ return 1; } tf = g_new0 (tf_data, 1); /** initialize tf Fill Zero! */ *private_data = tf; - tf->tf_private_data = tf_core_new (filter->prop.modelFilename); + tf->tf_private_data = tf_core_new (filter->prop.model_file); if (tf->tf_private_data) { return 0; } else { @@ -65,24 +65,24 @@ tf_loadModelFile (const GstTensor_Filter * filter, void **private_data) } /** - * @brief The open callback for GstTensor_Filter_Framework. Called before anything else + * @brief The open callback for GstTensorFilterFramework. Called before anything else * @param filter : tensor_filter instance * @param private_data : tensorflow lite plugin's private data */ static void -tf_open (const GstTensor_Filter * filter, void **private_data) +tf_open (const GstTensorFilter * filter, void **private_data) { int retval = tf_loadModelFile (filter, private_data); g_assert (retval == 0); /** This must be called only once */ } /** - * @brief The mandatory callback for GstTensor_Filter_Framework + * @brief The mandatory callback for GstTensorFilterFramework * @param[in] inptr The input tensor * @param[out] outptr The output tensor */ static uint8_t * -tf_invoke (const GstTensor_Filter * filter, void **private_data, +tf_invoke (const GstTensorFilter * filter, void **private_data, const uint8_t * inptr, uint8_t * outptr) { int retval; @@ -98,11 +98,11 @@ tf_invoke (const GstTensor_Filter * filter, void **private_data, } /** - * @brief The optional callback for GstTensor_Filter_Framework + * @brief The optional callback for GstTensorFilterFramework */ static int -tf_getInputDim (const GstTensor_Filter * filter, void **private_data, - GstTensor_TensorsMeta * meta) +tf_getInputDim (const GstTensorFilter * filter, void **private_data, + GstTensorsInfo * info) { int temp_idx = 0; tf_data *tf; @@ -113,16 +113,15 @@ tf_getInputDim (const GstTensor_Filter * filter, void **private_data, else temp_idx = 0; g_assert (filter->privateData && *private_data == filter->privateData); - return tf_core_getInputDim (tf->tf_private_data, meta->dims[0], - &meta->types[0], &meta->num_tensors); + return tf_core_getInputDim (tf->tf_private_data, info); } /** - * @brief The optional callback for GstTensor_Filter_Framework + * @brief The optional callback for GstTensorFilterFramework */ static int -tf_getOutputDim (const GstTensor_Filter * filter, void **private_data, - GstTensor_TensorsMeta * meta) +tf_getOutputDim (const GstTensorFilter * filter, void **private_data, + GstTensorsInfo * info) { int temp_idx = 0; tf_data *tf; @@ -133,17 +132,15 @@ tf_getOutputDim (const GstTensor_Filter * filter, void **private_data, else temp_idx = 0; g_assert (filter->privateData && *private_data == filter->privateData); - return tf_core_getOutputDim (tf->tf_private_data, meta->dims[0], - &meta->types[0], &meta->num_tensors); + return tf_core_getOutputDim (tf->tf_private_data, info); } /** - * @brief The set-input-dim callback for GstTensor_Filter_Framework + * @brief The set-input-dim callback for GstTensorFilterFramework */ static int -tf_setInputDim (const GstTensor_Filter * filter, void **private_data, - const tensor_dim iDimension, const tensor_type iType, - tensor_dim oDimension, tensor_type * oType) +tf_setInputDim (const GstTensorFilter * filter, void **private_data, + const GstTensorsInfo * in_info, GstTensorsInfo * out_info) { /** @todo call tflite core apis */ return 0; /** NYI */ @@ -153,7 +150,7 @@ tf_setInputDim (const GstTensor_Filter * filter, void **private_data, * @brief Free privateData and move on. */ static void -tf_close (const GstTensor_Filter * filter, void **private_data) +tf_close (const GstTensorFilter * filter, void **private_data) { tf_data *tf; tf = *private_data; @@ -163,7 +160,7 @@ tf_close (const GstTensor_Filter * filter, void **private_data) g_assert (filter->privateData == NULL); } -GstTensor_Filter_Framework NNS_support_tensorflow = { +GstTensorFilterFramework NNS_support_tensorflow = { .name = "tensorflow", .allow_in_place = FALSE, /** @todo: support this to optimize performance later. */ .allocate_in_invoke = TRUE, diff --git a/gst/tensor_filter/tensor_filter_tensorflow_core.cc b/gst/tensor_filter/tensor_filter_tensorflow_core.cc index 2018c62..603ca76 100644 --- a/gst/tensor_filter/tensor_filter_tensorflow_core.cc +++ b/gst/tensor_filter/tensor_filter_tensorflow_core.cc @@ -102,32 +102,30 @@ TFCore::getTensorType (int tensor_idx, tensor_type * type) /** * @brief return the Dimension of Input Tensor. - * @param idx : the index of the input tensor - * @param[out] dim : the array of the input tensor - * @param[out] type : the data type of the input tensor + * @param[out] info Structure for tensor info. * @return 0 if OK. non-zero if error. */ int -TFCore::getInputTensorDim (tensor_dim dim, tensor_type * type, - unsigned int *num_tensors) +TFCore::getInputTensorDim (GstTensorsInfo * info) { - int ret = getTensorDim (dim, type); - return ret; + /** + * @todo fill here + */ + return 0; } /** * @brief return the Dimension of Output Tensor. - * @param idx : the index of the output tensor - * @param[out] dim : the array of the output tensor - * @param[out] type : the data type of the output tensor + * @param[out] info Structure for tensor info. * @return 0 if OK. non-zero if error. */ int -TFCore::getOutputTensorDim (tensor_dim dim, tensor_type * type, - unsigned int *num_tensors) +TFCore::getOutputTensorDim (GstTensorsInfo * info) { - int ret = getTensorDim (dim, type); - return ret; + /** + * @todo fill here + */ + return 0; } /** @@ -208,34 +206,28 @@ tf_core_getModelPath (void *tf) /** * @brief get the Dimension of Input Tensor of model - * @param tf : the class object - * @param idx : the index of the input tensor - * @param[out] dim : the array of the input tensor - * @param[out] type : the data type of the input tensor + * @param tf the class object + * @param[out] info Structure for tensor info. * @return 0 if OK. non-zero if error. */ int -tf_core_getInputDim (void *tf, tensor_dim dim, tensor_type * type, - unsigned int *num_tensors) +tf_core_getInputDim (void *tf, GstTensorsInfo * info) { TFCore *c = (TFCore *) tf; - return c->getInputTensorDim (dim, type, num_tensors); + return c->getInputTensorDim (info); } /** * @brief get the Dimension of Output Tensor of model - * @param tf : the class object - * @param idx : the index of the output tensor - * @param[out] dim : the array of the output tensor - * @param[out] type : the data type of the output tensor + * @param tf the class object + * @param[out] info Structure for tensor info. * @return 0 if OK. non-zero if error. */ int -tf_core_getOutputDim (void *tf, tensor_dim dim, tensor_type * type, - unsigned int *num_tensors) +tf_core_getOutputDim (void *tf, GstTensorsInfo * info) { TFCore *c = (TFCore *) tf; - return c->getOutputTensorDim (dim, type, num_tensors); + return c->getOutputTensorDim (info); } /** diff --git a/gst/tensor_filter/tensor_filter_tensorflow_lite.c b/gst/tensor_filter/tensor_filter_tensorflow_lite.c index ea1ff04..90adb0a 100644 --- a/gst/tensor_filter/tensor_filter_tensorflow_lite.c +++ b/gst/tensor_filter/tensor_filter_tensorflow_lite.c @@ -23,7 +23,7 @@ * @bug No known bugs except for NYI items * * This is the per-NN-framework plugin (tensorflow-lite) for tensor_filter. - * Fill in "GstTensor_Filter_Framework" for tensor_filter.h/c + * Fill in "GstTensorFilterFramework" for tensor_filter.h/c * */ @@ -47,16 +47,16 @@ typedef struct _Tflite_data tflite_data; * @return 0 if successfully loaded. 1 if skipped (already loaded). -1 if error */ static int -tflite_loadModelFile (const GstTensor_Filter * filter, void **private_data) +tflite_loadModelFile (const GstTensorFilter * filter, void **private_data) { tflite_data *tf; if (filter->privateData != NULL) { - /** @todo : Check the integrity of filter->data and filter->modelFilename, nnfw */ + /** @todo : Check the integrity of filter->data and filter->model_file, nnfw */ return 1; } tf = g_new0 (tflite_data, 1); /** initialize tf Fill Zero! */ *private_data = tf; - tf->tflite_private_data = tflite_core_new (filter->prop.modelFilename); + tf->tflite_private_data = tflite_core_new (filter->prop.model_file); if (tf->tflite_private_data) { return 0; } else { @@ -65,24 +65,24 @@ tflite_loadModelFile (const GstTensor_Filter * filter, void **private_data) } /** - * @brief The open callback for GstTensor_Filter_Framework. Called before anything else + * @brief The open callback for GstTensorFilterFramework. Called before anything else * @param filter : tensor_filter instance * @param private_data : tensorflow lite plugin's private data */ static void -tflite_open (const GstTensor_Filter * filter, void **private_data) +tflite_open (const GstTensorFilter * filter, void **private_data) { int retval = tflite_loadModelFile (filter, private_data); g_assert (retval == 0); /** This must be called only once */ } /** - * @brief The mandatory callback for GstTensor_Filter_Framework + * @brief The mandatory callback for GstTensorFilterFramework * @param[in] inptr The input tensor * @param[out] outptr The output tensor */ static uint8_t * -tflite_invoke (const GstTensor_Filter * filter, void **private_data, +tflite_invoke (const GstTensorFilter * filter, void **private_data, const uint8_t * inptr, uint8_t * outptr) { int retval; @@ -98,40 +98,39 @@ tflite_invoke (const GstTensor_Filter * filter, void **private_data, } /** - * @brief The optional callback for GstTensor_Filter_Framework + * @brief The optional callback for GstTensorFilterFramework */ static int -tflite_getInputDim (const GstTensor_Filter * filter, void **private_data, - GstTensor_TensorsMeta * meta) +tflite_getInputDim (const GstTensorFilter * filter, void **private_data, + GstTensorsInfo * info) { tflite_data *tf; tf = *private_data; g_assert (filter->privateData && *private_data == filter->privateData); - int ret = tflite_core_getInputDim (tf->tflite_private_data, meta); + int ret = tflite_core_getInputDim (tf->tflite_private_data, info); return ret; } /** - * @brief The optional callback for GstTensor_Filter_Framework + * @brief The optional callback for GstTensorFilterFramework */ static int -tflite_getOutputDim (const GstTensor_Filter * filter, void **private_data, - GstTensor_TensorsMeta * meta) +tflite_getOutputDim (const GstTensorFilter * filter, void **private_data, + GstTensorsInfo * info) { tflite_data *tf; tf = *private_data; g_assert (filter->privateData && *private_data == filter->privateData); - int ret = tflite_core_getOutputDim (tf->tflite_private_data, meta); + int ret = tflite_core_getOutputDim (tf->tflite_private_data, info); return ret; } /** - * @brief The set-input-dim callback for GstTensor_Filter_Framework + * @brief The set-input-dim callback for GstTensorFilterFramework */ static int -tflite_setInputDim (const GstTensor_Filter * filter, void **private_data, - const tensor_dim iDimension, const tensor_type iType, - tensor_dim oDimension, tensor_type * oType) +tflite_setInputDim (const GstTensorFilter * filter, void **private_data, + const GstTensorsInfo * in_info, GstTensorsInfo * out_info) { /** @todo call tflite core apis */ return 0; /** NYI */ @@ -141,7 +140,7 @@ tflite_setInputDim (const GstTensor_Filter * filter, void **private_data, * @brief Free privateData and move on. */ static void -tflite_close (const GstTensor_Filter * filter, void **private_data) +tflite_close (const GstTensorFilter * filter, void **private_data) { tflite_data *tf; tf = *private_data; @@ -151,7 +150,7 @@ tflite_close (const GstTensor_Filter * filter, void **private_data) g_assert (filter->privateData == NULL); } -GstTensor_Filter_Framework NNS_support_tensorflow_lite = { +GstTensorFilterFramework NNS_support_tensorflow_lite = { .name = "tensorflow-lite", .allow_in_place = FALSE, /** @todo: support this to optimize performance later. */ .allocate_in_invoke = TRUE, diff --git a/gst/tensor_filter/tensor_filter_tensorflow_lite_core.cc b/gst/tensor_filter/tensor_filter_tensorflow_lite_core.cc index d3cb0b8..272e520 100644 --- a/gst/tensor_filter/tensor_filter_tensorflow_lite_core.cc +++ b/gst/tensor_filter/tensor_filter_tensorflow_lite_core.cc @@ -117,7 +117,8 @@ TFLiteCore::loadModel () * @param tfType : the defined type of Tensorflow Lite * @return the enum of defined _NNS_TYPE */ -_nns_tensor_type TFLiteCore::getTensorType (TfLiteType tfType) +_nns_tensor_type +TFLiteCore::getTensorType (TfLiteType tfType) { switch (tfType) { case kTfLiteFloat32: @@ -150,20 +151,18 @@ TFLiteCore::setInputTensorProp () inputTensorMeta.num_tensors = input_idx_list.size (); for (int i = 0; i < inputTensorMeta.num_tensors; i++) { - inputTensorMeta.ranks[i] = NNS_TENSOR_RANK_LIMIT; - - if (getTensorDim (input_idx_list[i], inputTensorMeta.dims[i], - &inputTensorMeta.ranks[i])) { + if (getTensorDim (input_idx_list[i], inputTensorMeta.info[i].dimension)) { return -1; } - inputTensorMeta.types[i] = + inputTensorMeta.info[i].type = getTensorType (interpreter->tensor (input_idx_list[i])->type); #if (DBG) _print_log ("inputTensorMeta[%d] >> type:%d, dim[%d:%d:%d:%d], rank: %d", - i, inputTensorMeta.types[i], inputTensorMeta.dims[i][0], - inputTensorMeta.dims[i][1], inputTensorMeta.dims[i][2], - inputTensorMeta.dims[i][3], inputTensorMeta.ranks[i]); + i, inputTensorMeta.info[i].type, inputTensorMeta.info[i].dimension[0], + inputTensorMeta.info[i].dimension[1], + inputTensorMeta.info[i].dimension[2], + inputTensorMeta.info[i].dimension[3]); #endif } return 0; @@ -180,20 +179,18 @@ TFLiteCore::setOutputTensorProp () outputTensorMeta.num_tensors = output_idx_list.size (); for (int i = 0; i < outputTensorMeta.num_tensors; i++) { - outputTensorMeta.ranks[i] = NNS_TENSOR_RANK_LIMIT; - - if (getTensorDim (output_idx_list[i], outputTensorMeta.dims[i], - &outputTensorMeta.ranks[i])) { + if (getTensorDim (output_idx_list[i], outputTensorMeta.info[i].dimension)) { return -1; } - outputTensorMeta.types[i] = + outputTensorMeta.info[i].type = getTensorType (interpreter->tensor (output_idx_list[i])->type); #if (DBG) _print_log ("outputTensorMeta[%d] >> type:%d, dim[%d:%d:%d:%d], rank: %d", - i, outputTensorMeta.types[i], outputTensorMeta.dims[i][0], - outputTensorMeta.dims[i][1], outputTensorMeta.dims[i][2], - outputTensorMeta.dims[i][3], outputTensorMeta.ranks[i]); + i, outputTensorMeta.info[i].type, outputTensorMeta.info[i].dimension[0], + outputTensorMeta.info[i].dimension[1], + outputTensorMeta.info[i].dimension[2], + outputTensorMeta.info[i].dimension[3]); #endif } return 0; @@ -203,14 +200,12 @@ TFLiteCore::setOutputTensorProp () * @brief return the Dimension of Tensor. * @param tensor_idx : the real index of model of the tensor * @param[out] dim : the array of the tensor - * @param[out] rank : the rank of the tensor * @return 0 if OK. non-zero if error. */ int -TFLiteCore::getTensorDim (int tensor_idx, tensor_dim dim, int *rank) +TFLiteCore::getTensorDim (int tensor_idx, tensor_dim dim) { int len = interpreter->tensor (tensor_idx)->dims->size; - *rank = len; g_assert (len <= NNS_TENSOR_RANK_LIMIT); /* the order of dimension is reversed at CAPS negotiation */ @@ -247,39 +242,31 @@ TFLiteCore::getOutputTensorSize () /** * @brief return the Dimension of Input Tensor. - * @param[out] dim : the array of the input tensors - * @param[out] type : the data type of the input tensors - * @todo : return whole array rather than index 0 + * @param[out] info Structure for tensor info. + * @todo return whole array rather than index 0 * @return 0 if OK. non-zero if error. */ int -TFLiteCore::getInputTensorDim (GstTensor_TensorsMeta * meta) +TFLiteCore::getInputTensorDim (GstTensorsInfo * info) { - for (int i = 0; i < inputTensorMeta.num_tensors; i++) { - memcpy (meta->dims[i], inputTensorMeta.dims[i], sizeof (meta->dims[i])); - meta->types[i] = inputTensorMeta.types[i]; - meta->ranks[i] = inputTensorMeta.ranks[i]; - } - meta->num_tensors = inputTensorMeta.num_tensors; + info->num_tensors = inputTensorMeta.num_tensors; + memcpy (info->info, inputTensorMeta.info, + sizeof (GstTensorInfo) * inputTensorMeta.num_tensors); return 0; } /** * @brief return the Dimension of Tensor. - * @param[out] dim : the array of the tensors - * @param[out] type : the data type of the tensors - * @todo : return whole array rather than index 0 + * @param[out] info Structure for tensor info. + * @todo return whole array rather than index 0 * @return 0 if OK. non-zero if error. */ int -TFLiteCore::getOutputTensorDim (GstTensor_TensorsMeta * meta) +TFLiteCore::getOutputTensorDim (GstTensorsInfo * info) { - for (int i = 0; i < outputTensorMeta.num_tensors; i++) { - memcpy (meta->dims, outputTensorMeta.dims[i], sizeof (meta->dims[i])); - meta->types[i] = outputTensorMeta.types[i]; - meta->ranks[i] = outputTensorMeta.ranks[i]; - } - meta->num_tensors = outputTensorMeta.num_tensors; + info->num_tensors = outputTensorMeta.num_tensors; + memcpy (info->info, outputTensorMeta.info, + sizeof (GstTensorInfo) * outputTensorMeta.num_tensors); return 0; } @@ -302,7 +289,7 @@ TFLiteCore::invoke (uint8_t * inptr, uint8_t ** outptr) int sizeOfArray = NNS_TENSOR_RANK_LIMIT; for (int i = 0; i < sizeOfArray; i++) { - output_number_of_pixels *= inputTensorMeta.dims[0][i]; + output_number_of_pixels *= inputTensorMeta.info[0].dimension[i]; } for (int i = 0; i < getInputTensorSize (); i++) { @@ -315,10 +302,10 @@ TFLiteCore::invoke (uint8_t * inptr, uint8_t ** outptr) inputTensors[0] = inptr; for (int j = 0; j < output_number_of_pixels; j++) { - if (inputTensorMeta.types[i] == _NNS_FLOAT32) { + if (inputTensorMeta.info[i].type == _NNS_FLOAT32) { (interpreter->typed_tensor < float >(input))[j] = ((float) inputTensors[i][j] - 127.5f) / 127.5f; - } else if (inputTensorMeta.types[i] == _NNS_UINT8) { + } else if (inputTensorMeta.info[i].type == _NNS_UINT8) { (interpreter->typed_tensor < uint8_t > (input))[j] = inputTensors[i][j]; } } @@ -331,10 +318,10 @@ TFLiteCore::invoke (uint8_t * inptr, uint8_t ** outptr) for (int i = 0; i < outputTensorMeta.num_tensors; i++) { - if (outputTensorMeta.types[i] == _NNS_FLOAT32) { + if (outputTensorMeta.info[i].type == _NNS_FLOAT32) { outputTensors[i] = (uint8_t *) interpreter->typed_output_tensor < float >(i); - } else if (outputTensorMeta.types[i] == _NNS_UINT8) { + } else if (outputTensorMeta.info[i].type == _NNS_UINT8) { outputTensors[i] = interpreter->typed_output_tensor < uint8_t > (i); } } @@ -375,30 +362,28 @@ tflite_core_delete (void *tflite) /** * @brief get the Dimension of Input Tensor of model * @param tflite : the class object - * @param[out] dim : the array of the input tensor - * @param[out] type : the data type of the input tensor + * @param[out] info Structure for tensor info. * @return 0 if OK. non-zero if error. */ int -tflite_core_getInputDim (void *tflite, GstTensor_TensorsMeta * meta) +tflite_core_getInputDim (void *tflite, GstTensorsInfo * info) { TFLiteCore *c = (TFLiteCore *) tflite; - int ret = c->getInputTensorDim (meta); + int ret = c->getInputTensorDim (info); return ret; } /** * @brief get the Dimension of Output Tensor of model * @param tflite : the class object - * @param[out] dim : the array of the output tensor - * @param[out] type : the data type of the output tensor + * @param[out] info Structure for tensor info. * @return 0 if OK. non-zero if error. */ int -tflite_core_getOutputDim (void *tflite, GstTensor_TensorsMeta * meta) +tflite_core_getOutputDim (void *tflite, GstTensorsInfo * info) { TFLiteCore *c = (TFLiteCore *) tflite; - int ret = c->getOutputTensorDim (meta); + int ret = c->getOutputTensorDim (info); return ret; } diff --git a/include/tensor_common.h b/include/tensor_common.h index 156e99e..9433a16 100644 --- a/include/tensor_common.h +++ b/include/tensor_common.h @@ -312,11 +312,19 @@ extern int find_key_strv (const gchar ** strv, const gchar * key); /** * @brief Parse tensor dimension parameter string - * @return The Rank. - * @param param The parameter string in the format of d1:d2:d3:d4, d1:d2:d3, d1:d2, or d1, where dN is a positive integer and d1 is the innermost dimension; i.e., dim[d4][d3][d2][d1]; + * @return The Rank. 0 if error. + * @param dimstr The dimension string in the format of d1:d2:d3:d4, d1:d2:d3, d1:d2, or d1, where dN is a positive integer and d1 is the innermost dimension; i.e., dim[d4][d3][d2][d1]; + * @param dim dimension to be filled. */ -extern int get_tensor_dimension (const gchar * param, - uint32_t dim[NNS_TENSOR_SIZE_LIMIT][NNS_TENSOR_RANK_LIMIT]); +extern int get_tensor_dimension (const gchar * dimstr, tensor_dim dim); + +/** + * @brief Get dimension string from given tensor dimension. + * @param dim tensor dimension + * @return Formatted string of given dimension (d1:d2:d3:d4). + * @note The returned value should be freed with g_free() + */ +extern gchar *get_tensor_dimension_string (const tensor_dim dim); /** * @brief Count the number of elemnts of a tensor @@ -352,18 +360,6 @@ get_tensor_from_padcap (const GstCaps * caps, tensor_dim dim, tensor_type * type, int *framerate_num, int *framerate_denum); /** - * @brief Read GstStructure, return corresponding tensor-dim/type. (other/tensor) - * @return The number of tensors. - * @param[in] str the GstStructure to be interpreted. - * @param[out] meta An allocated but filled with Null meta, to be used as output. - * @param[out] framerate_num Numerator of framerate. Set null to not use this. - * @param[out] framerate_denum Denumerator of framerate. Set null to not use this. - */ -extern int -get_tensors_from_structure (const GstStructure * str, - GstTensor_TensorsMeta * meta, int *framerate_num, int *framerate_denum); - -/** * @brief Make str(xyz) ==> "xyz" with macro expansion */ #define str(s) xstr(s) diff --git a/include/tensor_filter_custom.h b/include/tensor_filter_custom.h index e5ecfa9..eadfe67 100644 --- a/include/tensor_filter_custom.h +++ b/include/tensor_filter_custom.h @@ -43,80 +43,77 @@ * @param[in] prop Tensor_Filter's property values. Do not change its values. * @return The returned pointer will be passed to other functions as "private_data". */ -typedef void *(*NNS_custom_init_func)(const GstTensor_Filter_Properties *prop); +typedef void *(*NNS_custom_init_func) (const GstTensorFilterProperties * prop); /** * @brief A function that is called after calling other functions, when it's ready to close. * @param[in] private_data If you have allocated *private_data at init, free it here. * @param[in] prop Tensor_Filter's property values. Do not change its values. */ -typedef void (*NNS_custom_exit_func)(void *private_data, const GstTensor_Filter_Properties *prop); +typedef void (*NNS_custom_exit_func) (void *private_data, + const GstTensorFilterProperties * prop); /** * @brief Get input tensor type. * @param[in] private_data The pointer returned by NNStreamer_custom_init. * @param[in] prop Tensor_Filter's property values. Do not change its values. - * @param[out] inputDimension uint32_t[NNS_TENSOR_RANK_LIMIT] (tensor_dim) - * @param[out] type Type of each element in the input tensor + * @param[out] info Structure for tensor info. */ typedef int (*NNS_custom_get_input_dimension) (void *private_data, - const GstTensor_Filter_Properties * prop, GstTensor_TensorsMeta * meta); + const GstTensorFilterProperties * prop, GstTensorsInfo * info); /** * @brief Get output tensor type. * @param[in] private_data The pointer returned by NNStreamer_custom_init. * @param[in] prop Tensor_Filter's property values. Do not change its values. - * @param[out] outputDimension uint32_t[NNS_TENSOR_RANK_LIMIT] (tensor_dim) - * @param[out] type Type of each element in the output tensor + * @param[out] info Structure for tensor info. */ typedef int (*NNS_custom_get_output_dimension) (void *private_data, - const GstTensor_Filter_Properties * prop, GstTensor_TensorsMeta * meta); + const GstTensorFilterProperties * prop, GstTensorsInfo * info); /** * @brief Set input dim by framework. Let custom plutin set output dim accordingly. * @param[in] private_data The pointer returned by NNStreamer_custom_init * @param[in] prop Tensor_Filter's property values. Do not change its values. - * @param[in] inputDimension Input dimension designated by the gstreamer framework. Note that this is not a fixed value and gstreamer may try different values during pad-cap negotiations. - * @param[in] inputType Input element type designated by the gstreamer framework. - * @param[out] outputDimension Output dimension according to the inputDimension/type. - * @param[out] outputType Output element type according to the inputDimension/type. + * @param[in] in_info Input tensor info designated by the gstreamer framework. Note that this is not a fixed value and gstreamer may try different values during pad-cap negotiations. + * @param[out] out_info Output tensor info according to the input tensor info. * * @caution Do not fix internal values based on this call. Gstreamer may call * this function repeatedly with different values during pad-cap negotiations. * Fix values when invoke is finally called. */ -typedef int (*NNS_custom_set_input_dimension)(void *private_data, const GstTensor_Filter_Properties *prop, - const tensor_dim inputDimension, const tensor_type inputType, - tensor_dim outputDimension, tensor_type *outputType); +typedef int (*NNS_custom_set_input_dimension) (void *private_data, + const GstTensorFilterProperties * prop, const GstTensorsInfo * in_info, GstTensorsInfo * out_info); /** * @brief Invoke the "main function". Without allocating output buffer. (fill in the given output buffer) * @param[in] private_data The pointer returned by NNStreamer_custom_init. * @param[in] prop Tensor_Filter's property values. Do not change its values. - * @param[in] inputPtr pointer to input tensor, size = dim1 x dim2 x dim3 x dim4 x typesize, allocated by caller - * @param[out] outputPtr pointer to output tensor, size = dim1 x dim2 x dim3 x dim4 x typesize, allocated by caller + * @param[in] inptr pointer to input tensor, size = dim1 x dim2 x dim3 x dim4 x typesize, allocated by caller + * @param[out] outptr pointer to output tensor, size = dim1 x dim2 x dim3 x dim4 x typesize, allocated by caller * @return 0 if success */ -typedef int (*NNS_custom_invoke)(void *private_data, const GstTensor_Filter_Properties *prop, - const uint8_t *inputPtr, uint8_t *outputPtr); +typedef int (*NNS_custom_invoke) (void *private_data, + const GstTensorFilterProperties * prop, const uint8_t * inptr, uint8_t * outptr); /** * @brief Invoke the "main function". Without allocating output buffer. (fill in the given output buffer) * @param[in] private_data The pointer returned by NNStreamer_custom_init. * @param[in] prop Tensor_Filter's property values. Do not change its values. - * @param[in] inputPtr pointer to input tensor, size = dim1 x dim2 x dim3 x dim4 x typesize, allocated by caller + * @param[in] inptr pointer to input tensor, size = dim1 x dim2 x dim3 x dim4 x typesize, allocated by caller * @param[out] size The allocated size. * @return The output buffer allocated in the invoke function */ -typedef uint8_t * (*NNS_custom_allocate_invoke)(void *private_data, const GstTensor_Filter_Properties *prop, - const uint8_t *inputPtr, size_t *size); +typedef uint8_t *(*NNS_custom_allocate_invoke) (void *private_data, + const GstTensorFilterProperties * prop, const uint8_t * inptr, size_t * size); /** * @brief Custom Filter Class * * Note that exery function pointer is MANDATORY! */ -struct _NNStreamer_custom_class { +struct _NNStreamer_custom_class +{ int allocate_outbuf_in_invoke; /**< Set non-zero if invoke function is to allocate output buffer. Note that the allocated outbuf size MUST be consistent with output tensor dimension & type */ NNS_custom_init_func initfunc; /**< called before any other callbacks from tensor_filter_custom.c */ NNS_custom_exit_func exitfunc; /**< will not call other callbacks after this call */ diff --git a/include/tensor_filter_tensorflow_core.h b/include/tensor_filter_tensorflow_core.h index 468fdf8..a975dc9 100644 --- a/include/tensor_filter_tensorflow_core.h +++ b/include/tensor_filter_tensorflow_core.h @@ -64,10 +64,8 @@ public: double get_ms (struct timeval t); int getInputTensorSize (); int getOutputTensorSize (); - int getInputTensorDim (tensor_dim dim, tensor_type * type, - unsigned int *num_tensors); - int getOutputTensorDim (tensor_dim dim, tensor_type * type, - unsigned int *num_tensors); + int getInputTensorDim (GstTensorsInfo * info); + int getOutputTensorDim (GstTensorsInfo * info); int getInputTensorDimSize (); int getOutputTensorDimSize (); int invoke (uint8_t * inptr, uint8_t ** outptr); @@ -95,10 +93,8 @@ extern "C" extern void *tf_core_new (const char *_model_path); extern void tf_core_delete (void *tf); extern const char *tf_core_getModelPath (void *tf); - extern int tf_core_getInputDim (void *tf, tensor_dim dim, - tensor_type * type, unsigned int *num_tensors); - extern int tf_core_getOutputDim (void *tf, tensor_dim dim, - tensor_type * type, unsigned int *num_tensors); + extern int tf_core_getInputDim (void *tf, GstTensorsInfo * info); + extern int tf_core_getOutputDim (void *tf, GstTensorsInfo * info); extern int tf_core_getInputSize (void *tf); extern int tf_core_getOutputSize (void *tf); extern int tf_core_invoke (void *tf, uint8_t * inptr, uint8_t ** outptr); diff --git a/include/tensor_filter_tensorflow_lite_core.h b/include/tensor_filter_tensorflow_lite_core.h index 1659661..6415a2b 100644 --- a/include/tensor_filter_tensorflow_lite_core.h +++ b/include/tensor_filter_tensorflow_lite_core.h @@ -49,8 +49,8 @@ public: int setOutputTensorProp (); int getInputTensorSize (); int getOutputTensorSize (); - int getInputTensorDim (GstTensor_TensorsMeta * meta); - int getOutputTensorDim (GstTensor_TensorsMeta * meta); + int getInputTensorDim (GstTensorsInfo * info); + int getOutputTensorDim (GstTensorsInfo * info); int invoke (uint8_t * inptr, uint8_t ** outptr); private: @@ -60,15 +60,15 @@ private: tensors inputTensors; /**< The list of input tensors */ tensors outputTensors; /**< The list of output tensors */ - GstTensor_TensorsMeta inputTensorMeta; /**< The meta of input tensors */ - GstTensor_TensorsMeta outputTensorMeta; /**< The meta of input tensors */ + GstTensorsInfo inputTensorMeta; /**< The meta of input tensors */ + GstTensorsInfo outputTensorMeta; /**< The meta of input tensors */ std::unique_ptr < tflite::Interpreter > interpreter; std::unique_ptr < tflite::FlatBufferModel > model; double get_ms (struct timeval t); _nns_tensor_type getTensorType (TfLiteType tfType); - int getTensorDim (int tensor_idx, tensor_dim dim, int *rank); + int getTensorDim (int tensor_idx, tensor_dim dim); }; /** @@ -82,9 +82,9 @@ extern "C" extern void tflite_core_delete (void *tflite); extern const char *tflite_core_getModelPath (void *tflite); extern int tflite_core_getInputDim (void *tflite, - GstTensor_TensorsMeta * meta); + GstTensorsInfo * info); extern int tflite_core_getOutputDim (void *tflite, - GstTensor_TensorsMeta * meta); + GstTensorsInfo * info); extern int tflite_core_getOutputSize (void *tflite); extern int tflite_core_getInputSize (void *tflite); extern int tflite_core_invoke (void *tflite, uint8_t * inptr, diff --git a/include/tensor_typedef.h b/include/tensor_typedef.h index a6b0964..093dc6f 100644 --- a/include/tensor_typedef.h +++ b/include/tensor_typedef.h @@ -84,8 +84,8 @@ typedef enum _nnfw_type { _T_F_NNFW_END, } nnfw_type; -struct _GstTensor_Filter_Framework; -typedef struct _GstTensor_Filter_Framework GstTensor_Filter_Framework; +struct _GstTensorFilterFramework; +typedef struct _GstTensorFilterFramework GstTensorFilterFramework; typedef enum { _TFC_INIT = 0, @@ -102,18 +102,6 @@ typedef uint32_t tensor_dim[NNS_TENSOR_RANK_LIMIT]; typedef uint8_t *tensors[NNS_TENSOR_SIZE_LIMIT]; /**< Array of tensors */ /** - * @brief Internal meta data exchange format for a other/tensors instance - * @todo replace this to GstTensorsInfo - */ -typedef struct -{ - unsigned int num_tensors; /**< The number of tensors */ - tensor_dim dims[NNS_TENSOR_SIZE_LIMIT]; /**< The list of dimensions of each tensors */ - tensor_type types[NNS_TENSOR_SIZE_LIMIT]; /**< The list of types for each tensors */ - int ranks[NNS_TENSOR_SIZE_LIMIT]; /**< The list of types for each tensors */ -} GstTensor_TensorsMeta; - -/** * @brief Internal data structure for tensor info. */ typedef struct @@ -132,29 +120,25 @@ typedef struct } GstTensorsInfo; /** - * @brief Tensor_Filter's properties (internal data structure) + * @brief Tensor_Filter's properties for NN framework (internal data structure) * * Because custom filters of tensor_filter may need to access internal data * of Tensor_Filter, we define this data structure here. */ -typedef struct _GstTensor_Filter_Properties +typedef struct _GstTensorFilterProperties { - int silent; /**< Verbose mode if FALSE. int instead of gboolean for non-glib custom plugins */ - GstTensor_Filter_CheckStatus inputConfigured; /**< input dimension status */ - GstTensor_Filter_CheckStatus outputConfigured; /**< output dimension status */ nnfw_type nnfw; /**< The enum value of corresponding NNFW. _T_F_UNDEFINED if not configured */ - GstTensor_Filter_Framework *fw; /**< The implementation core of the NNFW. NULL if not configured */ - int fwOpened; /**< true IF open() is called or tried. Use int instead of gboolean because this is refered by custom plugins. */ - int fwClosed; /**< true IF close() is called or tried. Use int instead of gboolean because this is refered by custom plugins. */ - const char *modelFilename; /**< Filepath to the model file (as an argument for NNFW). char instead of gchar for non-glib custom plugins */ + GstTensorFilterFramework *fw; /**< The implementation core of the NNFW. NULL if not configured */ + int fw_opened; /**< TRUE IF open() is called or tried. Use int instead of gboolean because this is refered by custom plugins. */ + const char *model_file; /**< Filepath to the model file (as an argument for NNFW). char instead of gchar for non-glib custom plugins */ - int inputCapNegotiated; /**< @todo check if this is really needed */ - GstTensor_TensorsMeta inputMeta; + int input_configured; /**< TRUE if input tensor is configured. Use int instead of gboolean because this is refered by custom plugins. */ + GstTensorsInfo input_meta; /**< configured input tensor info */ - int outputCapNegotiated; /**< @todo check if this is really needed */ - GstTensor_TensorsMeta outputMeta; + int output_configured; /**< TRUE if output tensor is configured. Use int instead of gboolean because this is refered by custom plugins. */ + GstTensorsInfo output_meta; /**< configured output tensor info */ - const char *customProperties; /**< sub-plugin specific custom property values in string */ -} GstTensor_Filter_Properties; + const char *custom_properties; /**< sub-plugin specific custom property values in string */ +} GstTensorFilterProperties; #endif /*__GST_TENSOR_TYPEDEF_H__*/ diff --git a/nnstreamer_example/custom_example_average/nnstreamer_customfilter_example_average.c b/nnstreamer_example/custom_example_average/nnstreamer_customfilter_example_average.c index 8b3b22f..3d085c4 100644 --- a/nnstreamer_example/custom_example_average/nnstreamer_customfilter_example_average.c +++ b/nnstreamer_example/custom_example_average/nnstreamer_customfilter_example_average.c @@ -33,7 +33,7 @@ typedef struct _pt_data * @brief pt_init */ static void * -pt_init (const GstTensor_Filter_Properties * prop) +pt_init (const GstTensorFilterProperties * prop) { pt_data *data = (pt_data *) malloc (sizeof (pt_data)); @@ -45,7 +45,7 @@ pt_init (const GstTensor_Filter_Properties * prop) * @brief pt_exit */ static void -pt_exit (void *private_data, const GstTensor_Filter_Properties * prop) +pt_exit (void *private_data, const GstTensorFilterProperties * prop) { pt_data *data = private_data; assert (data); @@ -56,22 +56,26 @@ pt_exit (void *private_data, const GstTensor_Filter_Properties * prop) * @brief set_inputDim */ static int -set_inputDim (void *private_data, const GstTensor_Filter_Properties * prop, - const tensor_dim iDim, const tensor_type iType, - tensor_dim oDim, tensor_type * oType) +set_inputDim (void *private_data, const GstTensorFilterProperties * prop, + const GstTensorsInfo * in_info, GstTensorsInfo * out_info) { int i; pt_data *data = private_data; + assert (data); + assert (in_info); + assert (out_info); + + out_info->num_tensors = 1; for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) - oDim[i] = iDim[i]; + out_info->info[0].dimension[i] = in_info->info[0].dimension[i]; - /* Update [1] and [2] oDim with new-x, new-y */ - oDim[1] = 1; - oDim[2] = 1; + /* Update output dimension [1] and [2] with new-x, new-y */ + out_info->info[0].dimension[1] = 1; + out_info->info[0].dimension[2] = 1; - *oType = iType; + out_info->info[0].type = in_info->info[0].type; return 0; } @@ -80,21 +84,21 @@ set_inputDim (void *private_data, const GstTensor_Filter_Properties * prop, * @brief do_avg */ #define do_avg(type, sumtype) do {\ - sumtype *avg = (sumtype *) malloc(sizeof(sumtype) * prop->inputMeta.dims[0][0]); \ + sumtype *avg = (sumtype *) malloc(sizeof(sumtype) * prop->input_meta.info[0].dimension[0]); \ type *iptr = (type *) inptr; \ type *optr = (type *) outptr; \ - for (z = 0; z < prop->inputMeta.dims[0][3]; z++) { \ - for (y = 0; y < prop->inputMeta.dims[0][0]; y++) \ + for (z = 0; z < prop->input_meta.info[0].dimension[3]; z++) { \ + for (y = 0; y < prop->input_meta.info[0].dimension[0]; y++) \ avg[y] = 0; \ - for (y = 0; y < prop->inputMeta.dims[0][2]; y++) { \ - for (x = 0; x < prop->inputMeta.dims[0][1]; x++) { \ - for (c = 0; c < prop->inputMeta.dims[0][0]; c++) { \ + for (y = 0; y < prop->input_meta.info[0].dimension[2]; y++) { \ + for (x = 0; x < prop->input_meta.info[0].dimension[1]; x++) { \ + for (c = 0; c < prop->input_meta.info[0].dimension[0]; c++) { \ avg[c] += *(iptr + c + x * ix + y * iy + z * iz); \ } \ } \ } \ - for (c = 0; c < prop->inputMeta.dims[0][0]; c++) { \ - *(optr + c + z * prop->inputMeta.dims[0][0]) = (type) (avg[c] / xy); \ + for (c = 0; c < prop->input_meta.info[0].dimension[0]; c++) { \ + *(optr + c + z * prop->input_meta.info[0].dimension[0]) = (type) (avg[c] / xy); \ } \ } \ free(avg); \ @@ -104,18 +108,23 @@ set_inputDim (void *private_data, const GstTensor_Filter_Properties * prop, * @brief pt_invoke */ static int -pt_invoke (void *private_data, const GstTensor_Filter_Properties * prop, +pt_invoke (void *private_data, const GstTensorFilterProperties * prop, const uint8_t * inptr, uint8_t * outptr) { pt_data *data = private_data; uint32_t c, x, y, z; - unsigned ix = prop->inputMeta.dims[0][0]; - unsigned iy = prop->inputMeta.dims[0][0] * prop->inputMeta.dims[0][1]; - unsigned iz = - prop->inputMeta.dims[0][0] * prop->inputMeta.dims[0][1] * - prop->inputMeta.dims[0][2]; - unsigned xy = prop->inputMeta.dims[0][1] * prop->inputMeta.dims[0][2]; + uint32_t ix = prop->input_meta.info[0].dimension[0]; + uint32_t iy = + prop->input_meta.info[0].dimension[0] * + prop->input_meta.info[0].dimension[1]; + uint32_t iz = + prop->input_meta.info[0].dimension[0] * + prop->input_meta.info[0].dimension[1] * + prop->input_meta.info[0].dimension[2]; + uint32_t xy = + prop->input_meta.info[0].dimension[1] * + prop->input_meta.info[0].dimension[2]; assert (data); assert (inptr); @@ -124,11 +133,13 @@ pt_invoke (void *private_data, const GstTensor_Filter_Properties * prop, /* This assumes the limit is 4 */ assert (NNS_TENSOR_RANK_LIMIT == 4); - assert (prop->inputMeta.dims[0][0] == prop->outputMeta.dims[0][0]); - assert (prop->inputMeta.dims[0][3] == prop->outputMeta.dims[0][3]); - assert (prop->inputMeta.types[0] == prop->outputMeta.types[0]); + assert (prop->input_meta.info[0].dimension[0] == + prop->output_meta.info[0].dimension[0]); + assert (prop->input_meta.info[0].dimension[3] == + prop->output_meta.info[0].dimension[3]); + assert (prop->input_meta.info[0].type == prop->output_meta.info[0].type); - switch (prop->inputMeta.types[0]) { + switch (prop->input_meta.info[0].type) { case _NNS_INT8: do_avg (int8_t, int64_t); break; diff --git a/nnstreamer_example/custom_example_passthrough/nnstreamer_customfilter_example_passthrough.c b/nnstreamer_example/custom_example_passthrough/nnstreamer_customfilter_example_passthrough.c index b19b8f6..46a0002 100644 --- a/nnstreamer_example/custom_example_passthrough/nnstreamer_customfilter_example_passthrough.c +++ b/nnstreamer_example/custom_example_passthrough/nnstreamer_customfilter_example_passthrough.c @@ -37,7 +37,7 @@ typedef struct _pt_data * @brief _pt_data */ static void * -pt_init (const GstTensor_Filter_Properties * prop) +pt_init (const GstTensorFilterProperties * prop) { pt_data *data = (pt_data *) malloc (sizeof (pt_data)); int i; @@ -57,7 +57,7 @@ pt_init (const GstTensor_Filter_Properties * prop) * @brief _pt_data */ static void -pt_exit (void *private_data, const GstTensor_Filter_Properties * prop) +pt_exit (void *private_data, const GstTensorFilterProperties * prop) { pt_data *data = private_data; g_assert (data); @@ -68,21 +68,22 @@ pt_exit (void *private_data, const GstTensor_Filter_Properties * prop) * @brief _pt_data */ static int -get_inputDim (void *private_data, const GstTensor_Filter_Properties * prop, - GstTensor_TensorsMeta * meta) +get_inputDim (void *private_data, const GstTensorFilterProperties * prop, + GstTensorsInfo * info) { pt_data *data = private_data; int i; g_assert (data); g_assert (NNS_TENSOR_RANK_LIMIT >= 3); - meta->dims[0][0] = D1; - meta->dims[0][1] = D2; - meta->dims[0][2] = D3; + + info->info[0].dimension[0] = D1; + info->info[0].dimension[1] = D2; + info->info[0].dimension[2] = D3; for (i = 3; i < NNS_TENSOR_RANK_LIMIT; i++) - meta->dims[0][i] = 1; - meta->types[0] = _NNS_UINT8; - meta->num_tensors = 1; + info->info[0].dimension[i] = 1; + info->info[0].type = _NNS_UINT8; + info->num_tensors = 1; return 0; } @@ -90,21 +91,22 @@ get_inputDim (void *private_data, const GstTensor_Filter_Properties * prop, * @brief _pt_data */ static int -get_outputDim (void *private_data, const GstTensor_Filter_Properties * prop, - GstTensor_TensorsMeta * meta) +get_outputDim (void *private_data, const GstTensorFilterProperties * prop, + GstTensorsInfo * info) { pt_data *data = private_data; int i; g_assert (data); g_assert (NNS_TENSOR_RANK_LIMIT >= 3); - meta->dims[0][0] = D1; - meta->dims[0][1] = D2; - meta->dims[0][2] = D3; + + info->info[0].dimension[0] = D1; + info->info[0].dimension[1] = D2; + info->info[0].dimension[2] = D3; for (i = 3; i < NNS_TENSOR_RANK_LIMIT; i++) - meta->dims[0][i] = 1; - meta->types[0] = _NNS_UINT8; - meta->num_tensors = 1; + info->info[0].dimension[i] = 1; + info->info[0].type = _NNS_UINT8; + info->num_tensors = 1; return 0; } @@ -112,7 +114,7 @@ get_outputDim (void *private_data, const GstTensor_Filter_Properties * prop, * @brief _pt_data */ static int -pt_invoke (void *private_data, const GstTensor_Filter_Properties * prop, +pt_invoke (void *private_data, const GstTensorFilterProperties * prop, const uint8_t * inptr, uint8_t * outptr) { pt_data *data = private_data; diff --git a/nnstreamer_example/custom_example_passthrough/nnstreamer_customfilter_example_passthrough_variable.c b/nnstreamer_example/custom_example_passthrough/nnstreamer_customfilter_example_passthrough_variable.c index 8ecfed7..a1047db 100644 --- a/nnstreamer_example/custom_example_passthrough/nnstreamer_customfilter_example_passthrough_variable.c +++ b/nnstreamer_example/custom_example_passthrough/nnstreamer_customfilter_example_passthrough_variable.c @@ -29,7 +29,7 @@ typedef struct _pt_data * @brief pt_init */ static void * -pt_init (const GstTensor_Filter_Properties * prop) +pt_init (const GstTensorFilterProperties * prop) { pt_data *data = (pt_data *) malloc (sizeof (pt_data)); @@ -41,7 +41,7 @@ pt_init (const GstTensor_Filter_Properties * prop) * @brief pt_exit */ static void -pt_exit (void *private_data, const GstTensor_Filter_Properties * prop) +pt_exit (void *private_data, const GstTensorFilterProperties * prop) { pt_data *data = private_data; g_assert (data); @@ -52,15 +52,20 @@ pt_exit (void *private_data, const GstTensor_Filter_Properties * prop) * @brief set_inputDim */ static int -set_inputDim (void *private_data, const GstTensor_Filter_Properties * prop, - const tensor_dim iDim, const tensor_type iType, - tensor_dim oDim, tensor_type * oType) +set_inputDim (void *private_data, const GstTensorFilterProperties * prop, + const GstTensorsInfo * in_info, GstTensorsInfo * out_info) { int i; + g_assert (in_info); + g_assert (out_info); + + out_info->num_tensors = 1; + for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) - oDim[i] = iDim[i]; - *oType = iType; + out_info->info[0].dimension[i] = in_info->info[0].dimension[i]; + + out_info->info[0].type = in_info->info[0].type; return 0; } @@ -69,7 +74,7 @@ set_inputDim (void *private_data, const GstTensor_Filter_Properties * prop, * @brief pt_invoke */ static int -pt_invoke (void *private_data, const GstTensor_Filter_Properties * prop, +pt_invoke (void *private_data, const GstTensorFilterProperties * prop, const uint8_t * inptr, uint8_t * outptr) { pt_data *data = private_data; @@ -79,8 +84,8 @@ pt_invoke (void *private_data, const GstTensor_Filter_Properties * prop, g_assert (inptr); g_assert (outptr); - size = get_tensor_element_count (prop->outputMeta.dims[0]) * - tensor_element_size[prop->outputMeta.types[0]]; + size = get_tensor_element_count (prop->output_meta.info[0].dimension) * + tensor_element_size[prop->output_meta.info[0].type]; g_assert (inptr != outptr); memcpy (outptr, inptr, size); diff --git a/nnstreamer_example/custom_example_scaler/nnstreamer_customfilter_example_scaler.c b/nnstreamer_example/custom_example_scaler/nnstreamer_customfilter_example_scaler.c index 84debca..21d6f6c 100644 --- a/nnstreamer_example/custom_example_scaler/nnstreamer_customfilter_example_scaler.c +++ b/nnstreamer_example/custom_example_scaler/nnstreamer_customfilter_example_scaler.c @@ -53,12 +53,12 @@ _strdup (const char *src) * @brief tensor_filter_custom::NNS_custom_init_func */ static void * -pt_init (const GstTensor_Filter_Properties * prop) +pt_init (const GstTensorFilterProperties * prop) { pt_data *data = (pt_data *) malloc (sizeof (pt_data)); - if (prop->customProperties && strlen (prop->customProperties) > 0) - data->property = _strdup (prop->customProperties); + if (prop->custom_properties && strlen (prop->custom_properties) > 0) + data->property = _strdup (prop->custom_properties); else data->property = NULL; data->new_x = 0; @@ -93,7 +93,7 @@ pt_init (const GstTensor_Filter_Properties * prop) * @brief tensor_filter_custom::NNS_custom_exit_func */ static void -pt_exit (void *private_data, const GstTensor_Filter_Properties * prop) +pt_exit (void *private_data, const GstTensorFilterProperties * prop) { pt_data *data = private_data; assert (data); @@ -106,24 +106,28 @@ pt_exit (void *private_data, const GstTensor_Filter_Properties * prop) * @brief tensor_filter_custom::NNS_custom_set_input_dimension */ static int -set_inputDim (void *private_data, const GstTensor_Filter_Properties * prop, - const tensor_dim iDim, const tensor_type iType, - tensor_dim oDim, tensor_type * oType) +set_inputDim (void *private_data, const GstTensorFilterProperties * prop, + const GstTensorsInfo * in_info, GstTensorsInfo * out_info) { int i; pt_data *data = private_data; + assert (data); + assert (in_info); + assert (out_info); + + out_info->num_tensors = 1; for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) - oDim[i] = iDim[i]; + out_info->info[0].dimension[i] = in_info->info[0].dimension[i]; /* Update [1] and [2] oDim with new-x, new-y */ if (data->new_x > 0) - oDim[1] = data->new_x; + out_info->info[0].dimension[1] = data->new_x; if (data->new_y > 0) - oDim[2] = data->new_y; + out_info->info[0].dimension[2] = data->new_y; - *oType = iType; + out_info->info[0].type = in_info->info[0].type; return 0; } @@ -131,7 +135,7 @@ set_inputDim (void *private_data, const GstTensor_Filter_Properties * prop, * @brief tensor_filter_custom::NNS_custom_invoke */ static int -pt_invoke (void *private_data, const GstTensor_Filter_Properties * prop, +pt_invoke (void *private_data, const GstTensorFilterProperties * prop, const uint8_t * inptr, uint8_t * outptr) { pt_data *data = private_data; @@ -146,37 +150,40 @@ pt_invoke (void *private_data, const GstTensor_Filter_Properties * prop, /* This assumes the limit is 4 */ assert (NNS_TENSOR_RANK_LIMIT == 4); - assert (prop->inputMeta.dims[0][0] == prop->outputMeta.dims[0][0]); - assert (prop->inputMeta.dims[0][3] == prop->outputMeta.dims[0][3]); - assert (prop->inputMeta.types[0] == prop->outputMeta.types[0]); + assert (prop->input_meta.info[0].dimension[0] == + prop->output_meta.info[0].dimension[0]); + assert (prop->input_meta.info[0].dimension[3] == + prop->output_meta.info[0].dimension[3]); + assert (prop->input_meta.info[0].type == prop->output_meta.info[0].type); - elementsize = tensor_element_size[prop->inputMeta.types[0]]; + elementsize = tensor_element_size[prop->input_meta.info[0].type]; - ox = (data->new_x > 0) ? data->new_x : prop->outputMeta.dims[0][1]; - oy = (data->new_y > 0) ? data->new_y : prop->outputMeta.dims[0][2]; + ox = (data->new_x > 0) ? data->new_x : prop->output_meta.info[0].dimension[1]; + oy = (data->new_y > 0) ? data->new_y : prop->output_meta.info[0].dimension[2]; - oidx0 = prop->outputMeta.dims[0][0]; - oidx1 = oidx0 * prop->outputMeta.dims[0][1]; - oidx2 = oidx1 * prop->outputMeta.dims[0][2]; + oidx0 = prop->output_meta.info[0].dimension[0]; + oidx1 = oidx0 * prop->output_meta.info[0].dimension[1]; + oidx2 = oidx1 * prop->output_meta.info[0].dimension[2]; - iidx0 = prop->inputMeta.dims[0][0]; - iidx1 = iidx0 * prop->inputMeta.dims[0][1]; - iidx2 = iidx1 * prop->inputMeta.dims[0][2]; + iidx0 = prop->input_meta.info[0].dimension[0]; + iidx1 = iidx0 * prop->input_meta.info[0].dimension[1]; + iidx2 = iidx1 * prop->input_meta.info[0].dimension[2]; - for (z = 0; z < prop->inputMeta.dims[0][3]; z++) { + for (z = 0; z < prop->input_meta.info[0].dimension[3]; z++) { for (y = 0; y < oy; y++) { for (x = 0; x < ox; x++) { unsigned int c; - for (c = 0; c < prop->inputMeta.dims[0][0]; c++) { + for (c = 0; c < prop->input_meta.info[0].dimension[0]; c++) { int sz; /* Output[y'][x'] = Input[ y' * y / new-y ][ x' * x / new-x ]. Yeah This is Way too Simple. But this is just an example :D */ unsigned ix, iy; - ix = x * prop->inputMeta.dims[0][1] / ox; - iy = y * prop->inputMeta.dims[0][2] / oy; + ix = x * prop->input_meta.info[0].dimension[1] / ox; + iy = y * prop->input_meta.info[0].dimension[2] / oy; - assert (ix >= 0 && iy >= 0 && ix < prop->inputMeta.dims[0][1] - && iy < prop->inputMeta.dims[0][2]); + assert (ix >= 0 && iy >= 0 + && ix < prop->input_meta.info[0].dimension[1] + && iy < prop->input_meta.info[0].dimension[2]); /* outptr[z][y][x][c] = inptr[z][iy][ix][c]; */ for (sz = 0; sz < elementsize; sz++) diff --git a/nnstreamer_example/custom_example_scaler/nnstreamer_customfilter_example_scaler_allocator.c b/nnstreamer_example/custom_example_scaler/nnstreamer_customfilter_example_scaler_allocator.c index 1234f98..c453e51 100644 --- a/nnstreamer_example/custom_example_scaler/nnstreamer_customfilter_example_scaler_allocator.c +++ b/nnstreamer_example/custom_example_scaler/nnstreamer_customfilter_example_scaler_allocator.c @@ -54,12 +54,12 @@ _strdup (const char *src) * @brief init callback of tensor_filter custom */ static void * -pt_init (const GstTensor_Filter_Properties * prop) +pt_init (const GstTensorFilterProperties * prop) { pt_data *data = (pt_data *) malloc (sizeof (pt_data)); - if (prop->customProperties && strlen (prop->customProperties) > 0) - data->property = _strdup (prop->customProperties); + if (prop->custom_properties && strlen (prop->custom_properties) > 0) + data->property = _strdup (prop->custom_properties); else data->property = NULL; data->new_x = 0; @@ -94,7 +94,7 @@ pt_init (const GstTensor_Filter_Properties * prop) * @brief exit callback of tensor_filter custom */ static void -pt_exit (void *private_data, const GstTensor_Filter_Properties * prop) +pt_exit (void *private_data, const GstTensorFilterProperties * prop) { pt_data *data = private_data; assert (data); @@ -107,24 +107,28 @@ pt_exit (void *private_data, const GstTensor_Filter_Properties * prop) * @brief setInputDimension callback of tensor_filter custom */ static int -set_inputDim (void *private_data, const GstTensor_Filter_Properties * prop, - const tensor_dim iDim, const tensor_type iType, - tensor_dim oDim, tensor_type * oType) +set_inputDim (void *private_data, const GstTensorFilterProperties * prop, + const GstTensorsInfo * in_info, GstTensorsInfo * out_info) { int i; pt_data *data = private_data; + assert (data); + assert (in_info); + assert (out_info); + + out_info->num_tensors = 1; for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) - oDim[i] = iDim[i]; + out_info->info[0].dimension[i] = in_info->info[0].dimension[i]; - /* Update [1] and [2] oDim with new-x, new-y */ + /* Update output dimension [1] and [2] with new-x, new-y */ if (data->new_x > 0) - oDim[1] = data->new_x; + out_info->info[0].dimension[1] = data->new_x; if (data->new_y > 0) - oDim[2] = data->new_y; + out_info->info[0].dimension[2] = data->new_y; - *oType = iType; + out_info->info[0].type = in_info->info[0].type; return 0; } @@ -133,7 +137,7 @@ set_inputDim (void *private_data, const GstTensor_Filter_Properties * prop, */ static uint8_t * pt_allocate_invoke (void *private_data, - const GstTensor_Filter_Properties * prop, const uint8_t * inptr, + const GstTensorFilterProperties * prop, const uint8_t * inptr, size_t * size) { pt_data *data = private_data; @@ -142,8 +146,8 @@ pt_allocate_invoke (void *private_data, uint32_t iidx0, iidx1, iidx2; *size = - get_tensor_element_count (prop->outputMeta.dims[0]) * - tensor_element_size[prop->outputMeta.types[0]]; + get_tensor_element_count (prop->output_meta.info[0].dimension) * + tensor_element_size[prop->output_meta.info[0].type]; uint8_t *outptr = (uint8_t *) malloc (sizeof (uint8_t) * *size); assert (data); @@ -153,36 +157,39 @@ pt_allocate_invoke (void *private_data, /* This assumes the limit is 4 */ assert (NNS_TENSOR_RANK_LIMIT == 4); - assert (prop->inputMeta.dims[0][0] == prop->outputMeta.dims[0][0]); - assert (prop->inputMeta.dims[0][3] == prop->outputMeta.dims[0][3]); - assert (prop->inputMeta.types[0] == prop->outputMeta.types[0]); + assert (prop->input_meta.info[0].dimension[0] == + prop->output_meta.info[0].dimension[0]); + assert (prop->input_meta.info[0].dimension[3] == + prop->output_meta.info[0].dimension[3]); + assert (prop->input_meta.info[0].type == prop->output_meta.info[0].type); - elementsize = tensor_element_size[prop->inputMeta.types[0]]; + elementsize = tensor_element_size[prop->input_meta.info[0].type]; - ox = (data->new_x > 0) ? data->new_x : prop->outputMeta.dims[0][1]; - oy = (data->new_y > 0) ? data->new_y : prop->outputMeta.dims[0][2]; + ox = (data->new_x > 0) ? data->new_x : prop->output_meta.info[0].dimension[1]; + oy = (data->new_y > 0) ? data->new_y : prop->output_meta.info[0].dimension[2]; - oidx0 = prop->outputMeta.dims[0][0]; - oidx1 = oidx0 * prop->outputMeta.dims[0][1]; - oidx2 = oidx1 * prop->outputMeta.dims[0][2]; + oidx0 = prop->output_meta.info[0].dimension[0]; + oidx1 = oidx0 * prop->output_meta.info[0].dimension[1]; + oidx2 = oidx1 * prop->output_meta.info[0].dimension[2]; - iidx0 = prop->inputMeta.dims[0][0]; - iidx1 = iidx0 * prop->inputMeta.dims[0][1]; - iidx2 = iidx1 * prop->inputMeta.dims[0][2]; + iidx0 = prop->input_meta.info[0].dimension[0]; + iidx1 = iidx0 * prop->input_meta.info[0].dimension[1]; + iidx2 = iidx1 * prop->input_meta.info[0].dimension[2]; - for (z = 0; z < prop->inputMeta.dims[0][3]; z++) { + for (z = 0; z < prop->input_meta.info[0].dimension[3]; z++) { for (y = 0; y < oy; y++) { for (x = 0; x < ox; x++) { unsigned int c; - for (c = 0; c < prop->inputMeta.dims[0][0]; c++) { + for (c = 0; c < prop->input_meta.info[0].dimension[0]; c++) { /* Output[y'][x'] = Input[ y' * y / new-y ][ x' * x / new-x ]. Yeah This is Way too Simple. But this is just an example :D */ unsigned ix, iy, sz; - ix = x * prop->inputMeta.dims[0][1] / ox; - iy = y * prop->inputMeta.dims[0][2] / oy; + ix = x * prop->input_meta.info[0].dimension[1] / ox; + iy = y * prop->input_meta.info[0].dimension[2] / oy; - assert (ix >= 0 && iy >= 0 && ix < prop->inputMeta.dims[0][1] - && iy < prop->inputMeta.dims[0][2]); + assert (ix >= 0 && iy >= 0 + && ix < prop->input_meta.info[0].dimension[1] + && iy < prop->input_meta.info[0].dimension[2]); /* outptr[z][y][x][c] = inptr[z][iy][ix][c]; */ for (sz = 0; sz < elementsize; sz++) diff --git a/tests/common/unittest_common.cpp b/tests/common/unittest_common.cpp index 77deeb2..7075bba 100644 --- a/tests/common/unittest_common.cpp +++ b/tests/common/unittest_common.cpp @@ -174,13 +174,13 @@ TEST (common_find_key_strv, key_index) */ TEST (common_get_tensor_dimension, case1) { - uint32_t dim[NNS_TENSOR_SIZE_LIMIT][NNS_TENSOR_RANK_LIMIT]; - int num_tensors = get_tensor_dimension ("345:123:433:177", dim); - EXPECT_EQ (num_tensors, 1); - EXPECT_EQ (dim[0][0], 345); - EXPECT_EQ (dim[0][1], 123); - EXPECT_EQ (dim[0][2], 433); - EXPECT_EQ (dim[0][3], 177); + tensor_dim dim; + int rank = get_tensor_dimension ("345:123:433:177", dim); + EXPECT_EQ (rank, 4); + EXPECT_EQ (dim[0], 345); + EXPECT_EQ (dim[1], 123); + EXPECT_EQ (dim[2], 433); + EXPECT_EQ (dim[3], 177); } /** @@ -188,13 +188,13 @@ TEST (common_get_tensor_dimension, case1) */ TEST (common_get_tensor_dimension, case2) { - uint32_t dim[NNS_TENSOR_SIZE_LIMIT][NNS_TENSOR_RANK_LIMIT]; - int num_tensors = get_tensor_dimension ("345:123:433", dim); - EXPECT_EQ (num_tensors, 1); - EXPECT_EQ (dim[0][0], 345); - EXPECT_EQ (dim[0][1], 123); - EXPECT_EQ (dim[0][2], 433); - EXPECT_EQ (dim[0][3], 1); + tensor_dim dim; + int rank = get_tensor_dimension ("345:123:433", dim); + EXPECT_EQ (rank, 3); + EXPECT_EQ (dim[0], 345); + EXPECT_EQ (dim[1], 123); + EXPECT_EQ (dim[2], 433); + EXPECT_EQ (dim[3], 1); } /** @@ -202,13 +202,13 @@ TEST (common_get_tensor_dimension, case2) */ TEST (common_get_tensor_dimension, case3) { - uint32_t dim[NNS_TENSOR_SIZE_LIMIT][NNS_TENSOR_RANK_LIMIT]; - int num_tensors = get_tensor_dimension ("345:123", dim); - EXPECT_EQ (num_tensors, 1); - EXPECT_EQ (dim[0][0], 345); - EXPECT_EQ (dim[0][1], 123); - EXPECT_EQ (dim[0][2], 1); - EXPECT_EQ (dim[0][3], 1); + tensor_dim dim; + int rank = get_tensor_dimension ("345:123", dim); + EXPECT_EQ (rank, 2); + EXPECT_EQ (dim[0], 345); + EXPECT_EQ (dim[1], 123); + EXPECT_EQ (dim[2], 1); + EXPECT_EQ (dim[3], 1); } /** @@ -216,13 +216,13 @@ TEST (common_get_tensor_dimension, case3) */ TEST (common_get_tensor_dimension, case4) { - uint32_t dim[NNS_TENSOR_SIZE_LIMIT][NNS_TENSOR_RANK_LIMIT]; - int num_tensors = get_tensor_dimension ("345", dim); - EXPECT_EQ (num_tensors, 1); - EXPECT_EQ (dim[0][0], 345); - EXPECT_EQ (dim[0][1], 1); - EXPECT_EQ (dim[0][2], 1); - EXPECT_EQ (dim[0][3], 1); + tensor_dim dim; + int rank = get_tensor_dimension ("345", dim); + EXPECT_EQ (rank, 1); + EXPECT_EQ (dim[0], 345); + EXPECT_EQ (dim[1], 1); + EXPECT_EQ (dim[2], 1); + EXPECT_EQ (dim[3], 1); } /** diff --git a/tests/nnstreamer_sink/unittest_sink.cpp b/tests/nnstreamer_sink/unittest_sink.cpp index e74d7d6..c25959f 100644 --- a/tests/nnstreamer_sink/unittest_sink.cpp +++ b/tests/nnstreamer_sink/unittest_sink.cpp @@ -102,7 +102,8 @@ typedef struct gboolean start; /**< stream started */ gboolean end; /**< eos reached */ gchar *caps_name; /**< negotiated caps name */ - GstTensorConfig config; /**< tensor config from negotiated caps */ + GstTensorConfig tensor_config; /**< tensor config from negotiated caps */ + GstTensorsConfig tensors_config; /**< tensors config from negotiated caps */ } TestData; /** @@ -188,6 +189,7 @@ _new_data_cb (GstElement * element, GstBuffer * buffer, gpointer user_data) _print_log ("pts %" GST_TIME_FORMAT, GST_TIME_ARGS (pts)); _print_log ("dts %" GST_TIME_FORMAT, GST_TIME_ARGS (dts)); + _print_log ("number of memory blocks %d", gst_buffer_n_memory (buffer)); } if (g_test_data.caps_name == NULL) { @@ -204,8 +206,14 @@ _new_data_cb (GstElement * element, GstBuffer * buffer, gpointer user_data) _print_log ("caps name [%s]", g_test_data.caps_name); if (g_str_equal (g_test_data.caps_name, "other/tensor")) { - if (!gst_tensor_config_from_structure (&g_test_data.config, structure)) { - _print_log ("failed to get config from caps"); + if (!gst_tensor_config_from_structure (&g_test_data.tensor_config, + structure)) { + _print_log ("failed to get tensor config from caps"); + } + } else if (g_str_equal (g_test_data.caps_name, "other/tensors")) { + if (!gst_tensors_config_from_structure (&g_test_data.tensors_config, + structure)) { + _print_log ("failed to get tensors config from caps"); } } @@ -291,7 +299,8 @@ _setup_pipeline (TestOption & option) g_test_data.caps_name = NULL; g_test_data.tc_type = option.test_type; g_test_data.t_type = option.t_type; - gst_tensor_config_init (&g_test_data.config); + gst_tensor_config_init (&g_test_data.tensor_config); + gst_tensors_config_init (&g_test_data.tensors_config); _print_log ("option num_buffers[%d] test_type[%d]", option.num_buffers, option.test_type); @@ -658,6 +667,7 @@ TEST (tensor_sink_test, caps_tensors) { const guint num_buffers = 5; TestOption option = { num_buffers, TEST_TYPE_TENSORS }; + guint i; ASSERT_TRUE (_setup_pipeline (option)); @@ -675,6 +685,21 @@ TEST (tensor_sink_test, caps_tensors) /** check caps name */ EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensors")); + /** check tensors config for video */ + EXPECT_TRUE (gst_tensors_config_validate (&g_test_data.tensors_config)); + EXPECT_EQ (g_test_data.tensors_config.info.num_tensors, 2); + + for (i = 0; i < g_test_data.tensors_config.info.num_tensors; i++) { + EXPECT_EQ (g_test_data.tensors_config.info.info[i].type, _NNS_UINT8); + EXPECT_EQ (g_test_data.tensors_config.info.info[i].dimension[0], 3); + EXPECT_EQ (g_test_data.tensors_config.info.info[i].dimension[1], 160); + EXPECT_EQ (g_test_data.tensors_config.info.info[i].dimension[2], 120); + EXPECT_EQ (g_test_data.tensors_config.info.info[i].dimension[3], 1); + } + + EXPECT_EQ (g_test_data.tensors_config.rate_n, 30); + EXPECT_EQ (g_test_data.tensors_config.rate_d, 1); + _free_test_data (); } @@ -703,14 +728,14 @@ TEST (tensor_stream_test, video_rgb) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for video */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, _NNS_UINT8); - EXPECT_EQ (g_test_data.config.info.dimension[0], 3); - EXPECT_EQ (g_test_data.config.info.dimension[1], 160); - EXPECT_EQ (g_test_data.config.info.dimension[2], 120); - EXPECT_EQ (g_test_data.config.info.dimension[3], 1); - EXPECT_EQ (g_test_data.config.rate_n, 30); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, _NNS_UINT8); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], 3); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 160); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 120); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 1); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 30); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -740,14 +765,14 @@ TEST (tensor_stream_test, video_rgb_padding) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for video */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, _NNS_UINT8); - EXPECT_EQ (g_test_data.config.info.dimension[0], 3); - EXPECT_EQ (g_test_data.config.info.dimension[1], 162); - EXPECT_EQ (g_test_data.config.info.dimension[2], 120); - EXPECT_EQ (g_test_data.config.info.dimension[3], 1); - EXPECT_EQ (g_test_data.config.rate_n, 30); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, _NNS_UINT8); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], 3); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 162); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 120); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 1); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 30); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -777,14 +802,14 @@ TEST (tensor_stream_test, video_rgb_3f) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for video */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, _NNS_UINT8); - EXPECT_EQ (g_test_data.config.info.dimension[0], 3); - EXPECT_EQ (g_test_data.config.info.dimension[1], 160); - EXPECT_EQ (g_test_data.config.info.dimension[2], 120); - EXPECT_EQ (g_test_data.config.info.dimension[3], 3); - EXPECT_EQ (g_test_data.config.rate_n, 30); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, _NNS_UINT8); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], 3); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 160); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 120); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 3); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 30); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -814,14 +839,14 @@ TEST (tensor_stream_test, video_bgrx) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for video */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, _NNS_UINT8); - EXPECT_EQ (g_test_data.config.info.dimension[0], 4); - EXPECT_EQ (g_test_data.config.info.dimension[1], 160); - EXPECT_EQ (g_test_data.config.info.dimension[2], 120); - EXPECT_EQ (g_test_data.config.info.dimension[3], 1); - EXPECT_EQ (g_test_data.config.rate_n, 30); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, _NNS_UINT8); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], 4); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 160); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 120); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 1); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 30); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -851,14 +876,14 @@ TEST (tensor_stream_test, video_bgrx_2f) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for video */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, _NNS_UINT8); - EXPECT_EQ (g_test_data.config.info.dimension[0], 4); - EXPECT_EQ (g_test_data.config.info.dimension[1], 160); - EXPECT_EQ (g_test_data.config.info.dimension[2], 120); - EXPECT_EQ (g_test_data.config.info.dimension[3], 2); - EXPECT_EQ (g_test_data.config.rate_n, 30); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, _NNS_UINT8); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], 4); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 160); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 120); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 2); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 30); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -888,14 +913,14 @@ TEST (tensor_stream_test, video_gray8) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for video */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, _NNS_UINT8); - EXPECT_EQ (g_test_data.config.info.dimension[0], 1); - EXPECT_EQ (g_test_data.config.info.dimension[1], 160); - EXPECT_EQ (g_test_data.config.info.dimension[2], 120); - EXPECT_EQ (g_test_data.config.info.dimension[3], 1); - EXPECT_EQ (g_test_data.config.rate_n, 30); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, _NNS_UINT8); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 160); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 120); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 1); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 30); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -925,14 +950,14 @@ TEST (tensor_stream_test, video_gray8_padding) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for video */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, _NNS_UINT8); - EXPECT_EQ (g_test_data.config.info.dimension[0], 1); - EXPECT_EQ (g_test_data.config.info.dimension[1], 162); - EXPECT_EQ (g_test_data.config.info.dimension[2], 120); - EXPECT_EQ (g_test_data.config.info.dimension[3], 1); - EXPECT_EQ (g_test_data.config.rate_n, 30); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, _NNS_UINT8); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 162); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 120); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 1); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 30); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -962,14 +987,14 @@ TEST (tensor_stream_test, video_gray8_3f_padding) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for video */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, _NNS_UINT8); - EXPECT_EQ (g_test_data.config.info.dimension[0], 1); - EXPECT_EQ (g_test_data.config.info.dimension[1], 162); - EXPECT_EQ (g_test_data.config.info.dimension[2], 120); - EXPECT_EQ (g_test_data.config.info.dimension[3], 3); - EXPECT_EQ (g_test_data.config.rate_n, 30); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, _NNS_UINT8); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 162); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 120); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 3); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 30); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -999,14 +1024,14 @@ TEST (tensor_stream_test, audio_s8) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for audio */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, _NNS_INT8); - EXPECT_EQ (g_test_data.config.info.dimension[0], 1); - EXPECT_EQ (g_test_data.config.info.dimension[1], 500); - EXPECT_EQ (g_test_data.config.info.dimension[2], 1); - EXPECT_EQ (g_test_data.config.info.dimension[3], 1); - EXPECT_EQ (g_test_data.config.rate_n, 16000); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, _NNS_INT8); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 500); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 1); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 16000); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -1036,14 +1061,14 @@ TEST (tensor_stream_test, audio_u8_100F) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for audio */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, _NNS_UINT8); - EXPECT_EQ (g_test_data.config.info.dimension[0], 1); - EXPECT_EQ (g_test_data.config.info.dimension[1], 100); - EXPECT_EQ (g_test_data.config.info.dimension[2], 1); - EXPECT_EQ (g_test_data.config.info.dimension[3], 1); - EXPECT_EQ (g_test_data.config.rate_n, 16000); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, _NNS_UINT8); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 100); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 1); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 16000); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -1073,14 +1098,14 @@ TEST (tensor_stream_test, audio_s16) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for audio */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, _NNS_INT16); - EXPECT_EQ (g_test_data.config.info.dimension[0], 1); - EXPECT_EQ (g_test_data.config.info.dimension[1], 500); - EXPECT_EQ (g_test_data.config.info.dimension[2], 1); - EXPECT_EQ (g_test_data.config.info.dimension[3], 1); - EXPECT_EQ (g_test_data.config.rate_n, 16000); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, _NNS_INT16); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 500); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 1); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 16000); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -1110,14 +1135,14 @@ TEST (tensor_stream_test, audio_u16_1000f) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for audio */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, _NNS_UINT16); - EXPECT_EQ (g_test_data.config.info.dimension[0], 1); - EXPECT_EQ (g_test_data.config.info.dimension[1], 1000); - EXPECT_EQ (g_test_data.config.info.dimension[2], 1); - EXPECT_EQ (g_test_data.config.info.dimension[3], 1); - EXPECT_EQ (g_test_data.config.rate_n, 16000); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, _NNS_UINT16); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 1000); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 1); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 16000); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -1150,14 +1175,15 @@ TEST (tensor_stream_test, text_utf8) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for text */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, _NNS_INT8); - EXPECT_EQ (g_test_data.config.info.dimension[0], GST_TENSOR_STRING_SIZE); - EXPECT_EQ (g_test_data.config.info.dimension[1], 1); - EXPECT_EQ (g_test_data.config.info.dimension[2], 1); - EXPECT_EQ (g_test_data.config.info.dimension[3], 1); - EXPECT_EQ (g_test_data.config.rate_n, 0); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, _NNS_INT8); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], + GST_TENSOR_STRING_SIZE); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 1); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 0); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -1190,14 +1216,15 @@ TEST (tensor_stream_test, text_utf8_3f) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for text */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, _NNS_INT8); - EXPECT_EQ (g_test_data.config.info.dimension[0], GST_TENSOR_STRING_SIZE); - EXPECT_EQ (g_test_data.config.info.dimension[1], 3); - EXPECT_EQ (g_test_data.config.info.dimension[2], 1); - EXPECT_EQ (g_test_data.config.info.dimension[3], 1); - EXPECT_EQ (g_test_data.config.rate_n, 0); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, _NNS_INT8); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], + GST_TENSOR_STRING_SIZE); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 3); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 1); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 0); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -1232,14 +1259,15 @@ TEST (tensor_stream_test, typecast_int32) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for text */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, t_type); - EXPECT_EQ (g_test_data.config.info.dimension[0], GST_TENSOR_STRING_SIZE); - EXPECT_EQ (g_test_data.config.info.dimension[1], 1); - EXPECT_EQ (g_test_data.config.info.dimension[2], 1); - EXPECT_EQ (g_test_data.config.info.dimension[3], 1); - EXPECT_EQ (g_test_data.config.rate_n, 0); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, t_type); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], + GST_TENSOR_STRING_SIZE); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 1); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 0); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -1274,14 +1302,15 @@ TEST (tensor_stream_test, typecast_uint32) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for text */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, t_type); - EXPECT_EQ (g_test_data.config.info.dimension[0], GST_TENSOR_STRING_SIZE); - EXPECT_EQ (g_test_data.config.info.dimension[1], 1); - EXPECT_EQ (g_test_data.config.info.dimension[2], 1); - EXPECT_EQ (g_test_data.config.info.dimension[3], 1); - EXPECT_EQ (g_test_data.config.rate_n, 0); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, t_type); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], + GST_TENSOR_STRING_SIZE); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 1); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 0); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -1316,14 +1345,15 @@ TEST (tensor_stream_test, typecast_int16) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for text */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, t_type); - EXPECT_EQ (g_test_data.config.info.dimension[0], GST_TENSOR_STRING_SIZE); - EXPECT_EQ (g_test_data.config.info.dimension[1], 1); - EXPECT_EQ (g_test_data.config.info.dimension[2], 1); - EXPECT_EQ (g_test_data.config.info.dimension[3], 1); - EXPECT_EQ (g_test_data.config.rate_n, 0); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, t_type); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], + GST_TENSOR_STRING_SIZE); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 1); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 0); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -1358,14 +1388,15 @@ TEST (tensor_stream_test, typecast_uint16) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for text */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, t_type); - EXPECT_EQ (g_test_data.config.info.dimension[0], GST_TENSOR_STRING_SIZE); - EXPECT_EQ (g_test_data.config.info.dimension[1], 1); - EXPECT_EQ (g_test_data.config.info.dimension[2], 1); - EXPECT_EQ (g_test_data.config.info.dimension[3], 1); - EXPECT_EQ (g_test_data.config.rate_n, 0); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, t_type); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], + GST_TENSOR_STRING_SIZE); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 1); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 0); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -1400,14 +1431,15 @@ TEST (tensor_stream_test, typecast_float64) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for text */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, t_type); - EXPECT_EQ (g_test_data.config.info.dimension[0], GST_TENSOR_STRING_SIZE); - EXPECT_EQ (g_test_data.config.info.dimension[1], 1); - EXPECT_EQ (g_test_data.config.info.dimension[2], 1); - EXPECT_EQ (g_test_data.config.info.dimension[3], 1); - EXPECT_EQ (g_test_data.config.rate_n, 0); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, t_type); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], + GST_TENSOR_STRING_SIZE); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 1); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 0); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -1442,14 +1474,15 @@ TEST (tensor_stream_test, typecast_float32) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for text */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, t_type); - EXPECT_EQ (g_test_data.config.info.dimension[0], GST_TENSOR_STRING_SIZE); - EXPECT_EQ (g_test_data.config.info.dimension[1], 1); - EXPECT_EQ (g_test_data.config.info.dimension[2], 1); - EXPECT_EQ (g_test_data.config.info.dimension[3], 1); - EXPECT_EQ (g_test_data.config.rate_n, 0); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, t_type); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], + GST_TENSOR_STRING_SIZE); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 1); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 0); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -1484,14 +1517,15 @@ TEST (tensor_stream_test, typecast_int64) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for text */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, t_type); - EXPECT_EQ (g_test_data.config.info.dimension[0], GST_TENSOR_STRING_SIZE); - EXPECT_EQ (g_test_data.config.info.dimension[1], 1); - EXPECT_EQ (g_test_data.config.info.dimension[2], 1); - EXPECT_EQ (g_test_data.config.info.dimension[3], 1); - EXPECT_EQ (g_test_data.config.rate_n, 0); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, t_type); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], + GST_TENSOR_STRING_SIZE); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 1); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 0); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -1526,14 +1560,15 @@ TEST (tensor_stream_test, typecast_uint64) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for text */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, t_type); - EXPECT_EQ (g_test_data.config.info.dimension[0], GST_TENSOR_STRING_SIZE); - EXPECT_EQ (g_test_data.config.info.dimension[1], 1); - EXPECT_EQ (g_test_data.config.info.dimension[2], 1); - EXPECT_EQ (g_test_data.config.info.dimension[3], 1); - EXPECT_EQ (g_test_data.config.rate_n, 0); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, t_type); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], + GST_TENSOR_STRING_SIZE); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 1); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 0); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -1563,14 +1598,14 @@ TEST (tensor_stream_test, video_aggregate) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for video */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, _NNS_UINT8); - EXPECT_EQ (g_test_data.config.info.dimension[0], 3); - EXPECT_EQ (g_test_data.config.info.dimension[1], 160); - EXPECT_EQ (g_test_data.config.info.dimension[2], 120); - EXPECT_EQ (g_test_data.config.info.dimension[3], 10); - EXPECT_EQ (g_test_data.config.rate_n, 30); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, _NNS_UINT8); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], 3); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 160); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 120); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 10); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 30); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -1600,14 +1635,14 @@ TEST (tensor_stream_test, audio_aggregate_s16) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for audio */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, _NNS_INT16); - EXPECT_EQ (g_test_data.config.info.dimension[0], 1); - EXPECT_EQ (g_test_data.config.info.dimension[1], 2000); - EXPECT_EQ (g_test_data.config.info.dimension[2], 1); - EXPECT_EQ (g_test_data.config.info.dimension[3], 1); - EXPECT_EQ (g_test_data.config.rate_n, 16000); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, _NNS_INT16); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 2000); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 1); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 16000); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } @@ -1637,14 +1672,14 @@ TEST (tensor_stream_test, audio_aggregate_u16) EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor")); /** check tensor config for audio */ - EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.config)); - EXPECT_EQ (g_test_data.config.info.type, _NNS_UINT16); - EXPECT_EQ (g_test_data.config.info.dimension[0], 1); - EXPECT_EQ (g_test_data.config.info.dimension[1], 100); - EXPECT_EQ (g_test_data.config.info.dimension[2], 1); - EXPECT_EQ (g_test_data.config.info.dimension[3], 1); - EXPECT_EQ (g_test_data.config.rate_n, 16000); - EXPECT_EQ (g_test_data.config.rate_d, 1); + EXPECT_TRUE (gst_tensor_config_validate (&g_test_data.tensor_config)); + EXPECT_EQ (g_test_data.tensor_config.info.type, _NNS_UINT16); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[0], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[1], 100); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[2], 1); + EXPECT_EQ (g_test_data.tensor_config.info.dimension[3], 1); + EXPECT_EQ (g_test_data.tensor_config.rate_n, 16000); + EXPECT_EQ (g_test_data.tensor_config.rate_d, 1); _free_test_data (); } -- 2.7.4