From dda5dad6fb615559ab05b912147cdee08d33f40c Mon Sep 17 00:00:00 2001 From: Jaeyun Date: Mon, 11 Feb 2019 18:23:11 +0900 Subject: [PATCH] [Filter] common functions in filter-subplugins use common functions to load tf/tf-lite model 1. remove unnecessary functions in sub-plugins (tf/tf-lite) 2. copy and free tensor info to load tf model during the caps negotiation 3. print additional logs when loading model info Signed-off-by: Jaeyun Jung --- .../tensor_filter/tensor_filter_tensorflow_core.cc | 89 +++------- .../tensor_filter/tensor_filter_tensorflow_core.h | 6 - .../tensor_filter_tensorflow_lite_core.cc | 43 +---- .../tensor_filter_tensorflow_lite_core.h | 2 - gst/nnstreamer/tensor_filter/tensor_filter.c | 193 +++++++++------------ gst/nnstreamer/tensor_filter/tensor_filter.h | 4 +- 6 files changed, 117 insertions(+), 220 deletions(-) diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_core.cc b/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_core.cc index 497b377..7073c13 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_core.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_core.cc @@ -61,31 +61,21 @@ TFCore::TFCore (const char * _model_path) */ TFCore::~TFCore () { + gst_tensors_info_free (&inputTensorMeta); + gst_tensors_info_free (&outputTensorMeta); } /** * @brief initialize the object with tensorflow model * @return 0 if OK. non-zero if error. - * -1 if the model is not loaded. - * -2 if the initialization of input tensor is failed. - * -3 if the initialization of output tensor is failed. */ int TFCore::init (const GstTensorFilterProperties * prop) { - if (setTensorProp (&inputTensorMeta, &prop->input_meta)) { - GST_ERROR ("Failed to initialize input tensor\n"); - return -2; - } - if (setTensorProp (&outputTensorMeta, &prop->output_meta)) { - GST_ERROR ("Failed to initialize output tensor\n"); - return -3; - } - if (loadModel ()) { - GST_ERROR ("Failed to load model\n"); - return -1; - } - return 0; + gst_tensors_info_copy (&inputTensorMeta, &prop->input_meta); + gst_tensors_info_copy (&outputTensorMeta, &prop->output_meta); + + return loadModel (); } /** @@ -258,24 +248,26 @@ TFCore::inputTensorValidation (const std::vector &placeholders) } } char chars[] = "[]"; - for (unsigned int i = 0; i < strlen (chars); ++i) + for (unsigned int j = 0; j < strlen (chars); ++j) { shape_description.erase ( std::remove ( shape_description.begin (), shape_description.end (), - chars[i] + chars[j] ), shape_description.end () ); } DataType dtype = DT_INVALID; + char *tensor_name = inputTensorMeta.info[i].name; + if (node->attr ().count ("dtype")) { dtype = node->attr ().at ("dtype").type (); } - if (strcmp (inputTensorMeta.info[i].name, node->name ().c_str ())) { + if (!tensor_name || strcmp (tensor_name, node->name ().c_str ())) { GST_ERROR ("Input Tensor is not valid: the name of input tensor is different\n"); return -2; } @@ -308,38 +300,6 @@ TFCore::inputTensorValidation (const std::vector &placeholders) } /** - * @brief extract and store the information of src tensors - * @return 0 if OK. non-zero if error. - */ -int -TFCore::setTensorProp (GstTensorsInfo * dest, const GstTensorsInfo * src) -{ - dest->num_tensors = src->num_tensors; - memcpy (dest->info, src->info, sizeof (GstTensorInfo) * src->num_tensors); - return 0; -} - -/** - * @brief return the number of Input Tensors. - * @return the number of Input Tensors. - */ -int -TFCore::getInputTensorSize () -{ - return inputTensorMeta.num_tensors; -} - -/** - * @brief return the number of Output Tensors. - * @return the number of Output Tensors - */ -int -TFCore::getOutputTensorSize () -{ - return outputTensorMeta.num_tensors; -} - -/** * @brief return the Dimension of Input Tensor. * @param[out] info Structure for tensor info. * @todo return whole array rather than index 0 @@ -348,9 +308,7 @@ TFCore::getOutputTensorSize () int TFCore::getInputTensorDim (GstTensorsInfo * info) { - info->num_tensors = inputTensorMeta.num_tensors; - memcpy (info->info, inputTensorMeta.info, - sizeof (GstTensorInfo) * inputTensorMeta.num_tensors); + gst_tensors_info_copy (info, &inputTensorMeta); return 0; } @@ -363,9 +321,7 @@ TFCore::getInputTensorDim (GstTensorsInfo * info) int TFCore::getOutputTensorDim (GstTensorsInfo * info) { - info->num_tensors = outputTensorMeta.num_tensors; - memcpy (info->info, outputTensorMeta.info, - sizeof (GstTensorInfo) * outputTensorMeta.num_tensors); + gst_tensors_info_copy (info, &outputTensorMeta); return 0; } @@ -389,6 +345,10 @@ TFCore::getOutputTensorDim (GstTensorsInfo * info) int TFCore::run (const GstTensorMemory * input, GstTensorMemory * output) { +#if (DBG) + gint64 start_time = g_get_real_time (); +#endif + std::vector > input_feeds; std::vector output_tensor_names; std::vector outputs; @@ -458,11 +418,7 @@ TFCore::run (const GstTensorMemory * input, GstTensorMemory * output) } for (int i = 0; i < outputTensorMeta.num_tensors; ++i) { - output[i].type = getTensorTypeFromTF (outputs[i].dtype()); - output[i].size = tensor_element_size[output[i].type]; - for (int j = 0; j < NNS_TENSOR_RANK_LIMIT; ++j) - output[i].size *= outputTensorMeta.info[i].dimension[j]; - + g_assert (output[i].type == getTensorTypeFromTF (outputs[i].dtype())); array_len = output[i].size / tensor_element_size[output[i].type]; switch (output[i].type) { @@ -502,6 +458,12 @@ TFCore::run (const GstTensorMemory * input, GstTensorMemory * output) } } +#if (DBG) + gint64 stop_time = g_get_real_time (); + g_message ("Invoke() is finished: %" G_GINT64_FORMAT, + (stop_time - start_time)); +#endif + return 0; } @@ -532,8 +494,7 @@ int tf_core_init (void * tf, const GstTensorFilterProperties * prop) { TFCore *c = (TFCore *) tf; - int ret = c->init (prop); - return ret; + return c->init (prop); } /** diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_core.h b/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_core.h index 06a9d81..c98d7a9 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_core.h +++ b/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_core.h @@ -68,11 +68,6 @@ public: int init(const GstTensorFilterProperties * prop); int loadModel (); const char* getModelPath(); - int setInputTensorProp (); - int setOutputTensorProp (); - - int getInputTensorSize (); - int getOutputTensorSize (); int getInputTensorDim (GstTensorsInfo * info); int getOutputTensorDim (GstTensorsInfo * info); int run (const GstTensorMemory * input, GstTensorMemory * output); @@ -93,7 +88,6 @@ private: tensor_type getTensorTypeFromTF (DataType tfType); DataType getTensorTypeToTF (tensor_type tType); - int setTensorProp (GstTensorsInfo * dest, const GstTensorsInfo * src); int inputTensorValidation (const std::vector &placeholders); }; diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_lite_core.cc b/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_lite_core.cc index 1b31500..5e2217e 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_lite_core.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_lite_core.cc @@ -54,6 +54,8 @@ TFLiteCore::TFLiteCore (const char * _model_path) */ TFLiteCore::~TFLiteCore () { + gst_tensors_info_free (&inputTensorMeta); + gst_tensors_info_free (&outputTensorMeta); } /** @@ -259,26 +261,6 @@ TFLiteCore::getTensorDim (int tensor_idx, tensor_dim dim) } /** - * @brief return the number of Input Tensors. - * @return the number of Input Tensors. - */ -int -TFLiteCore::getInputTensorSize () -{ - return inputTensorMeta.num_tensors; -} - -/** - * @brief return the number of Output Tensors. - * @return the number of Output Tensors - */ -int -TFLiteCore::getOutputTensorSize () -{ - return outputTensorMeta.num_tensors; -} - -/** * @brief return the Dimension of Input Tensor. * @param[out] info Structure for tensor info. * @todo return whole array rather than index 0 @@ -287,9 +269,7 @@ TFLiteCore::getOutputTensorSize () int TFLiteCore::getInputTensorDim (GstTensorsInfo * info) { - info->num_tensors = inputTensorMeta.num_tensors; - memcpy (info->info, inputTensorMeta.info, - sizeof (GstTensorInfo) * inputTensorMeta.num_tensors); + gst_tensors_info_copy (info, &inputTensorMeta); return 0; } @@ -302,9 +282,7 @@ TFLiteCore::getInputTensorDim (GstTensorsInfo * info) int TFLiteCore::getOutputTensorDim (GstTensorsInfo * info) { - info->num_tensors = outputTensorMeta.num_tensors; - memcpy (info->info, outputTensorMeta.info, - sizeof (GstTensorInfo) * outputTensorMeta.num_tensors); + gst_tensors_info_copy (info, &outputTensorMeta); return 0; } @@ -325,7 +303,7 @@ TFLiteCore::invoke (const GstTensorMemory * input, GstTensorMemory * output) int tensor_idx; TfLiteTensor *tensor_ptr; - for (int i = 0; i < getOutputTensorSize (); ++i) { + for (int i = 0; i < outputTensorMeta.num_tensors; ++i) { tensor_idx = interpreter->outputs ()[i]; tensor_ptr = interpreter->tensor (tensor_idx); @@ -334,7 +312,7 @@ TFLiteCore::invoke (const GstTensorMemory * input, GstTensorMemory * output) tensors_idx.push_back (tensor_idx); } - for (int i = 0; i < getInputTensorSize (); ++i) { + for (int i = 0; i < inputTensorMeta.num_tensors; ++i) { tensor_idx = interpreter->inputs ()[i]; tensor_ptr = interpreter->tensor (tensor_idx); @@ -395,8 +373,7 @@ int tflite_core_init (void * tflite) { TFLiteCore *c = (TFLiteCore *) tflite; - int ret = c->init (); - return ret; + return c->init (); } /** @@ -421,8 +398,7 @@ int tflite_core_getInputDim (void * tflite, GstTensorsInfo * info) { TFLiteCore *c = (TFLiteCore *) tflite; - int ret = c->getInputTensorDim (info); - return ret; + return c->getInputTensorDim (info); } /** @@ -435,8 +411,7 @@ int tflite_core_getOutputDim (void * tflite, GstTensorsInfo * info) { TFLiteCore *c = (TFLiteCore *) tflite; - int ret = c->getOutputTensorDim (info); - return ret; + return c->getOutputTensorDim (info); } /** diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_lite_core.h b/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_lite_core.h index 8ce64c6..cfb2e45 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_lite_core.h +++ b/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_lite_core.h @@ -63,8 +63,6 @@ private: std::unique_ptr interpreter; std::unique_ptr model; - int getInputTensorSize (); - int getOutputTensorSize (); tensor_type getTensorType (TfLiteType tfType); int getTensorDim (int tensor_idx, tensor_dim dim); }; diff --git a/gst/nnstreamer/tensor_filter/tensor_filter.c b/gst/nnstreamer/tensor_filter/tensor_filter.c index 587fa36..15a070f 100644 --- a/gst/nnstreamer/tensor_filter/tensor_filter.c +++ b/gst/nnstreamer/tensor_filter/tensor_filter.c @@ -59,7 +59,6 @@ #include #include -#include #include "tensor_filter.h" /** @@ -211,6 +210,9 @@ static gboolean gst_tensor_filter_transform_size (GstBaseTransform * trans, static gboolean gst_tensor_filter_start (GstBaseTransform * trans); static gboolean gst_tensor_filter_stop (GstBaseTransform * trans); +static void gst_tensor_filter_compare_tensors (GstTensorsInfo * info1, + GstTensorsInfo * info2); + /** * @brief Open nn framework. */ @@ -376,61 +378,28 @@ gst_tensor_filter_init (GstTensorFilter * self) } /** - * @brief deallocate the name of each GstTensorInfo. - * @param The GstTensorsInfo object - */ -static void -gst_tensor_filter_deallocate_tensor_name (GstTensorsInfo * info) -{ - guint i; - for (i = 0; i < info->num_tensors; ++i) - g_free (info->info[i].name); - gst_tensors_info_init (info); -} - -/** - * @brief Clear and reset data. + * @brief Function to finalize instance. */ static void -gst_tensor_filter_reset (GstTensorFilter * self) +gst_tensor_filter_finalize (GObject * object) { + GstTensorFilter *self; GstTensorFilterProperties *prop; + self = GST_TENSOR_FILTER (object); prop = &self->prop; gst_tensor_filter_close_fw (self); g_free_const (prop->fwname); - prop->fwname = NULL; - g_free_const (prop->model_file); - prop->model_file = NULL; - g_free_const (prop->custom_properties); - prop->custom_properties = NULL; - prop->input_configured = FALSE; - gst_tensor_filter_deallocate_tensor_name (&prop->input_meta); - - prop->output_configured = FALSE; - gst_tensor_filter_deallocate_tensor_name (&prop->output_meta); - - self->configured = FALSE; - gst_tensors_config_init (&self->in_config); - gst_tensors_config_init (&self->out_config); -} - -/** - * @brief Function to finalize instance. - */ -static void -gst_tensor_filter_finalize (GObject * object) -{ - GstTensorFilter *self; - - self = GST_TENSOR_FILTER (object); + gst_tensors_info_free (&prop->input_meta); + gst_tensors_info_free (&prop->output_meta); - gst_tensor_filter_reset (self); + gst_tensors_info_free (&self->in_config.info); + gst_tensors_info_free (&self->out_config.info); G_OBJECT_CLASS (parent_class)->finalize (object); } @@ -900,17 +869,16 @@ static void gst_tensor_filter_load_tensor_info (GstTensorFilter * self) { GstTensorFilterProperties *prop; + GstTensorsInfo in_info, out_info; int res; prop = &self->prop; - /** - * supposed fixed in-tensor info if getInputDimension is defined. - */ - if (!prop->input_configured) { - GstTensorsInfo in_info; + gst_tensors_info_init (&in_info); + gst_tensors_info_init (&out_info); - gst_tensors_info_init (&in_info); + /* supposed fixed in-tensor info if getInputDimension is defined. */ + if (!prop->input_configured) { gst_tensor_filter_call (self, res, getInputDimension, &in_info); if (res == 0) { @@ -918,23 +886,23 @@ gst_tensor_filter_load_tensor_info (GstTensorFilter * self) /** if set-property called and already has info, verify it! */ if (prop->input_meta.num_tensors > 0) { - g_assert (gst_tensors_info_is_equal (&prop->input_meta, &in_info)); + if (!gst_tensors_info_is_equal (&in_info, &prop->input_meta)) { + GST_ERROR_OBJECT (self, "The input tensor is not compatible."); + gst_tensor_filter_compare_tensors (&in_info, &prop->input_meta); + g_assert (0); + goto done; + } + } else { + gst_tensors_info_copy (&prop->input_meta, &in_info); } prop->input_configured = TRUE; - prop->input_meta = in_info; - silent_debug_info (&in_info, "input tensor"); } } - /** - * supposed fixed out-tensor info if getOutputDimension is defined. - */ + /* supposed fixed out-tensor info if getOutputDimension is defined. */ if (!prop->output_configured) { - GstTensorsInfo out_info; - - gst_tensors_info_init (&out_info); gst_tensor_filter_call (self, res, getOutputDimension, &out_info); if (res == 0) { @@ -942,26 +910,35 @@ gst_tensor_filter_load_tensor_info (GstTensorFilter * self) /** if set-property called and already has info, verify it! */ if (prop->output_meta.num_tensors > 0) { - g_assert (gst_tensors_info_is_equal (&prop->output_meta, &out_info)); + if (!gst_tensors_info_is_equal (&out_info, &prop->output_meta)) { + GST_ERROR_OBJECT (self, "The output tensor is not compatible."); + gst_tensor_filter_compare_tensors (&out_info, &prop->output_meta); + g_assert (0); + goto done; + } + } else { + gst_tensors_info_copy (&prop->output_meta, &out_info); } prop->output_configured = TRUE; - prop->output_meta = out_info; - silent_debug_info (&out_info, "output tensor"); } } + +done: + gst_tensors_info_free (&in_info); + gst_tensors_info_free (&out_info); } /** * @brief Printout the comparison results of two tensors. * @param[in] info1 The tensors to be shown on the left hand side * @param[in] info2 The tensors to be shown on the right hand side - * @return The newly allocated string for the printout. The caller must deallocate it. * @todo If this is going to be used by other elements, move this to nnstreamer/tensor_common. */ -static gchar * -_compare_tensors (GstTensorsInfo * info1, GstTensorsInfo * info2) +static void +gst_tensor_filter_compare_tensors (GstTensorsInfo * info1, + GstTensorsInfo * info2) { gchar null[] = ""; gchar *result = null; @@ -1009,28 +986,10 @@ _compare_tensors (GstTensorsInfo * info1, GstTensorsInfo * info2) g_free (line); } - if (result == null) - return NULL; - return result; -} - -/** - * @brief Copy GstTensorsInfo without the name of tensors. - * @param[out] the destination object - * @param[in] the source object - */ -static void -gst_tensor_filter_copy_info (GstTensorsInfo * dest, const GstTensorsInfo * src) -{ - guint i; - - g_return_if_fail (dest != NULL && src != NULL); - - dest->num_tensors = src->num_tensors; - for (i = 0; i < src->num_tensors; ++i) { - memcpy (dest->info[i].dimension, src->info[i].dimension, - sizeof (tensor_dim)); - dest->info[i].type = src->info[i].type; + if (result != null) { + /* print warning message */ + g_warning ("Tensor info :\n%s", result); + g_free (result); } } @@ -1051,6 +1010,8 @@ gst_tensor_filter_configure_tensor (GstTensorFilter * self, g_return_val_if_fail (incaps != NULL, FALSE); prop = &self->prop; + gst_tensors_config_init (&in_config); + gst_tensors_config_init (&out_config); /** * GstTensorFilter has to parse the tensor dimension and type from NN model. @@ -1071,17 +1032,15 @@ gst_tensor_filter_configure_tensor (GstTensorFilter * self, /** if set-property called and already has info, verify it! */ if (prop->input_meta.num_tensors > 0) { if (!gst_tensors_info_is_equal (&in_config.info, &prop->input_meta)) { - gchar *str = _compare_tensors (&in_config.info, &prop->input_meta); - /* print warning message */ - g_warning ("The input tensor is not compatible.\n%s", str); - GST_ERROR_OBJECT (self, "The input tensor is not compatible.\n%s", str); - g_free (str); - return FALSE; + GST_ERROR_OBJECT (self, "The input tensor is not compatible."); + gst_tensor_filter_compare_tensors (&in_config.info, &prop->input_meta); + goto done; } + } else { + gst_tensors_info_copy (&prop->input_meta, &in_config.info); } prop->input_configured = TRUE; - gst_tensor_filter_copy_info (&prop->input_meta, &in_config.info); /** call setInputDimension if output tensor is not configured */ if (!prop->output_configured) { @@ -1095,27 +1054,26 @@ gst_tensor_filter_configure_tensor (GstTensorFilter * self, if (res == 0) { /** if set-property called and already has info, verify it! */ if (prop->output_meta.num_tensors > 0) { - if (!gst_tensors_info_is_equal (&prop->output_meta, &out_info)) { - gchar *str = _compare_tensors (&out_info, &prop->output_meta); - /* print warning message */ - g_warning ("The output tensor is not compatible.\n%s", str); - GST_ERROR_OBJECT (self, - "The output tensor is not compatible.\n%s", str); - g_free (str); - return FALSE; + if (!gst_tensors_info_is_equal (&out_info, &prop->output_meta)) { + GST_ERROR_OBJECT (self, "The output tensor is not compatible."); + gst_tensor_filter_compare_tensors (&out_info, &prop->output_meta); + gst_tensors_info_free (&out_info); + goto done; } + } else { + gst_tensors_info_copy (&prop->output_meta, &out_info); } prop->output_configured = TRUE; - gst_tensor_filter_copy_info (&prop->output_meta, &out_info); - silent_debug_info (&out_info, "output tensor"); } + gst_tensors_info_free (&out_info); + if (!prop->output_configured) { GST_ERROR_OBJECT (self, "Failed to get output tensor info.\n"); g_assert (0); - return FALSE; + goto done; } } @@ -1125,7 +1083,7 @@ gst_tensor_filter_configure_tensor (GstTensorFilter * self, * GstTensorFilter cannot assure the framerate. * Simply set the framerate of out-tensor from incaps. */ - out_config.info = prop->output_meta; + gst_tensors_info_copy (&out_config.info, &prop->output_meta); out_config.rate_n = in_config.rate_n; out_config.rate_d = in_config.rate_d; @@ -1134,12 +1092,21 @@ gst_tensor_filter_configure_tensor (GstTensorFilter * self, g_assert (gst_tensors_config_is_equal (&self->in_config, &in_config)); g_assert (gst_tensors_config_is_equal (&self->out_config, &out_config)); } else { - self->in_config = in_config; - self->out_config = out_config; + gst_tensors_info_copy (&self->in_config.info, &in_config.info); + self->in_config.rate_n = in_config.rate_n; + self->in_config.rate_d = in_config.rate_d; + + gst_tensors_info_copy (&self->out_config.info, &out_config.info); + self->out_config.rate_n = out_config.rate_n; + self->out_config.rate_d = out_config.rate_d; + self->configured = TRUE; } } +done: + gst_tensors_info_free (&in_config.info); + gst_tensors_info_free (&out_config.info); return self->configured; } @@ -1220,24 +1187,26 @@ gst_tensor_filter_transform_caps (GstBaseTransform * trans, config.info = self->prop.output_meta; result = gst_tensor_filter_caps_from_config (self, &config); } else { - GstTensorsInfo in_info; - /* check in-tensor info to call setInputDimension */ - in_info = config.info; - if (gst_tensors_info_validate (&in_info)) { + if (gst_tensors_info_validate (&config.info)) { + GstTensorsInfo out_info; int res = -1; /* call setInputDimension with given input tensor */ - gst_tensor_filter_call (self, res, setInputDimension, &in_info, - &config.info); + gst_tensors_info_init (&out_info); + gst_tensor_filter_call (self, res, setInputDimension, &config.info, + &out_info); if (res == 0) { + config.info = out_info; result = gst_tensor_filter_caps_from_config (self, &config); } else { GST_ERROR_OBJECT (self, "Cannot get the output tensor info."); g_assert (0); result = gst_caps_from_string (CAPS_STRING); } + + gst_tensors_info_free (&out_info); } else { /* we don't know the exact tensor info yet */ result = gst_caps_from_string (CAPS_STRING); diff --git a/gst/nnstreamer/tensor_filter/tensor_filter.h b/gst/nnstreamer/tensor_filter/tensor_filter.h index 0c99374..b0b95ac 100644 --- a/gst/nnstreamer/tensor_filter/tensor_filter.h +++ b/gst/nnstreamer/tensor_filter/tensor_filter.h @@ -64,8 +64,8 @@ struct _GstTensorFilter GstTensorFilterProperties prop; /**< NNFW plugin's properties */ const GstTensorFilterFramework *fw; /**< The implementation core of the NNFW. NULL if not configured */ - /** internal properties for tensor-filter */ - int silent; /**< Verbose mode if FALSE. int instead of gboolean for non-glib custom plugins */ + /* internal properties for tensor-filter */ + gboolean silent; /**< Verbose mode if FALSE. int instead of gboolean for non-glib custom plugins */ gboolean configured; /**< True if already successfully configured tensor metadata */ GstTensorsConfig in_config; /**< input tensor info */ GstTensorsConfig out_config; /**< output tensor info */ -- 2.7.4