From cb4848af9973d652f5d7812adc1eb47f2ef9c803 Mon Sep 17 00:00:00 2001 From: Yongjoo Ahn Date: Mon, 19 Jun 2023 17:12:55 +0900 Subject: [PATCH] [tensor_filter] Let tensor_filter_common handle extra tensors - Extend array length for layout and ranks - Replace accessing tensor_info with array index with util func `gst_tensors_info_get_nth_info` Signed-off-by: Yongjoo Ahn --- .../tensor_filter/tensor_filter_tensorflow_lite.cc | 2 +- .../include/nnstreamer_plugin_api_filter.h | 6 +- .../tensor_filter/tensor_filter_common.c | 68 +++++++++++++--------- 3 files changed, 45 insertions(+), 31 deletions(-) diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_lite.cc b/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_lite.cc index 3e20f5b..ea531b7 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_lite.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_lite.cc @@ -730,7 +730,7 @@ TFLiteInterpreter::setInputTensorsInfo (const GstTensorsInfo *info) tensor_type tf_type; const GstTensorInfo *tensor_info; - tensor_info = &info->info[tensor_idx]; + tensor_info = gst_tensors_info_get_nth_info ((GstTensorsInfo *) info, tensor_idx); /** cannot change the type of input */ tf_type = getTensorType (interpreter->tensor (input_idx_list[tensor_idx])->type); diff --git a/gst/nnstreamer/include/nnstreamer_plugin_api_filter.h b/gst/nnstreamer/include/nnstreamer_plugin_api_filter.h index a33fa6f..c97716c 100644 --- a/gst/nnstreamer/include/nnstreamer_plugin_api_filter.h +++ b/gst/nnstreamer/include/nnstreamer_plugin_api_filter.h @@ -128,7 +128,7 @@ typedef enum _nns_tensor_layout _NNS_LAYOUT_NONE, /**< NONE: none of the above defined layouts */ } tensor_layout; -typedef tensor_layout tensors_layout[NNS_TENSOR_SIZE_LIMIT]; +typedef tensor_layout tensors_layout[NNS_TENSOR_SIZE_LIMIT + NNS_TENSOR_SIZE_EXTRA_LIMIT]; /** * @brief GstTensorFilter's properties for NN framework (internal data structure) @@ -146,12 +146,12 @@ typedef struct _GstTensorFilterProperties int input_configured; /**< TRUE if input tensor is configured. Use int instead of gboolean because this is refered by custom plugins. */ GstTensorsInfo input_meta; /**< configured input tensor info */ tensors_layout input_layout; /**< data layout info provided as a property to tensor_filter for the input, defaults to _NNS_LAYOUT_ANY for all the tensors */ - unsigned int input_ranks[NNS_TENSOR_SIZE_LIMIT]; /**< the rank list of input tensors, it is calculated based on the dimension string. */ + unsigned int input_ranks[NNS_TENSOR_SIZE_LIMIT + NNS_TENSOR_SIZE_EXTRA_LIMIT]; /**< the rank list of input tensors, it is calculated based on the dimension string. */ int output_configured; /**< TRUE if output tensor is configured. Use int instead of gboolean because this is refered by custom plugins. */ GstTensorsInfo output_meta; /**< configured output tensor info */ tensors_layout output_layout; /**< data layout info provided as a property to tensor_filter for the output, defaults to _NNS_LAYOUT_ANY for all the tensors */ - unsigned int output_ranks[NNS_TENSOR_SIZE_LIMIT]; /**< the rank list of output tensors, it is calculated based on the dimension string. */ + unsigned int output_ranks[NNS_TENSOR_SIZE_LIMIT + NNS_TENSOR_SIZE_EXTRA_LIMIT]; /**< the rank list of output tensors, it is calculated based on the dimension string. */ const char *custom_properties; /**< sub-plugin specific custom property values in string */ accl_hw *hw_list; /**< accelerators supported by framework intersected with user provided accelerator preference, use in GstTensorFilterFramework V1 only */ diff --git a/gst/nnstreamer/tensor_filter/tensor_filter_common.c b/gst/nnstreamer/tensor_filter/tensor_filter_common.c index e4003e0..54608ad 100644 --- a/gst/nnstreamer/tensor_filter/tensor_filter_common.c +++ b/gst/nnstreamer/tensor_filter/tensor_filter_common.c @@ -140,7 +140,7 @@ gst_tensors_layout_init (tensors_layout layout) { int i; - for (i = 0; i < NNS_TENSOR_SIZE_LIMIT; i++) { + for (i = 0; i < NNS_TENSOR_SIZE_LIMIT + NNS_TENSOR_SIZE_EXTRA_LIMIT; i++) { layout[i] = _NNS_LAYOUT_ANY; } } @@ -152,7 +152,7 @@ static void gst_tensors_rank_init (unsigned int ranks[]) { int i; - for (i = 0; i < NNS_TENSOR_SIZE_LIMIT; ++i) { + for (i = 0; i < NNS_TENSOR_SIZE_LIMIT + NNS_TENSOR_SIZE_EXTRA_LIMIT; ++i) { ranks[i] = 0; } } @@ -215,11 +215,11 @@ gst_tensors_parse_layouts_string (tensors_layout layout, str_layouts = g_strsplit_set (layout_string, ",.", -1); num_layouts = g_strv_length (str_layouts); - if (num_layouts > NNS_TENSOR_SIZE_LIMIT) { + if (num_layouts > NNS_TENSOR_SIZE_LIMIT + NNS_TENSOR_SIZE_EXTRA_LIMIT) { nns_logw ("Invalid param, layouts (%d) max (%d)\n", - num_layouts, NNS_TENSOR_SIZE_LIMIT); + num_layouts, NNS_TENSOR_SIZE_LIMIT + NNS_TENSOR_SIZE_EXTRA_LIMIT); - num_layouts = NNS_TENSOR_SIZE_LIMIT; + num_layouts = NNS_TENSOR_SIZE_LIMIT + NNS_TENSOR_SIZE_EXTRA_LIMIT; } for (i = 0; i < num_layouts; i++) { @@ -265,7 +265,8 @@ gst_tensor_filter_get_rank_string (const GstTensorFilterProperties * prop, g_string_append_printf (rank, "%u", _ranks[i]); else g_string_append_printf (rank, "%u", - gst_tensor_info_get_rank (&_meta->info[i])); + gst_tensor_info_get_rank (gst_tensors_info_get_nth_info ( + (GstTensorsInfo *) _meta, i))); if (i < _meta->num_tensors - 1) g_string_append_printf (rank, ","); @@ -307,8 +308,8 @@ gst_tensor_filter_get_dimension_string (const GstTensorFilterProperties * prop, for (i = 0; i < tinfo->num_tensors; ++i) { dim_str = - gst_tensor_get_rank_dimension_string (tinfo->info[i].dimension, - *(_rank + i)); + gst_tensor_get_rank_dimension_string (gst_tensors_info_get_nth_info ( + (GstTensorsInfo *) tinfo, i)->dimension, *(_rank + i)); g_string_append (dimensions, dim_str); if (i < tinfo->num_tensors - 1) { @@ -841,23 +842,29 @@ gst_tensorsinfo_compare_to_string (const GstTensorsInfo * info1, g_return_val_if_fail (info1 != NULL && info2 != NULL, NULL); - for (i = 0; i < NNS_TENSOR_SIZE_LIMIT; i++) { + for (i = 0; i < NNS_TENSOR_SIZE_LIMIT + NNS_TENSOR_SIZE_EXTRA_LIMIT; i++) { if (info1->num_tensors <= i && info2->num_tensors <= i) break; if (info1->num_tensors > i) { - tmp = gst_tensor_get_dimension_string (info1->info[i].dimension); - left = g_strdup_printf ("%s [%s]", - gst_tensor_get_type_string (info1->info[i].type), tmp); + GstTensorInfo *info1_i = + gst_tensors_info_get_nth_info ((GstTensorsInfo *) info1, i); + tmp = gst_tensor_get_dimension_string (info1_i->dimension); + left = + g_strdup_printf ("%s [%s]", + gst_tensor_get_type_string (info1_i->type), tmp); g_free (tmp); } else { left = g_strdup ("None"); } if (info2->num_tensors > i) { - tmp = gst_tensor_get_dimension_string (info2->info[i].dimension); - right = g_strdup_printf ("%s [%s]", - gst_tensor_get_type_string (info2->info[i].type), tmp); + GstTensorInfo *info2_i = + gst_tensors_info_get_nth_info ((GstTensorsInfo *) info2, i); + tmp = gst_tensor_get_dimension_string (info2_i->dimension); + right = + g_strdup_printf ("%s [%s]", + gst_tensor_get_type_string (info2_i->type), tmp); g_free (tmp); } else { right = g_strdup ("None"); @@ -1505,16 +1512,16 @@ _gtfc_setprop_DIMENSION (GstTensorFilterPrivate * priv, str_dims = g_strsplit_set (g_value_get_string (value), ",.", -1); num_dims = g_strv_length (str_dims); - if (num_dims > NNS_TENSOR_SIZE_LIMIT) { + if (num_dims > NNS_TENSOR_SIZE_LIMIT + NNS_TENSOR_SIZE_EXTRA_LIMIT) { nns_logw ("Invalid param, dimensions (%d) max (%d)\n", - num_dims, NNS_TENSOR_SIZE_LIMIT); + num_dims, NNS_TENSOR_SIZE_LIMIT + NNS_TENSOR_SIZE_EXTRA_LIMIT); - num_dims = NNS_TENSOR_SIZE_LIMIT; + num_dims = NNS_TENSOR_SIZE_LIMIT + NNS_TENSOR_SIZE_EXTRA_LIMIT; } for (i = 0; i < num_dims; ++i) { rank[i] = gst_tensor_parse_dimension (str_dims[i], - info->info[i].dimension); + gst_tensors_info_get_nth_info (info, i)->dimension); } g_strfreev (str_dims); @@ -1770,7 +1777,8 @@ _gtfc_setprop_LAYOUT (GstTensorFilterPrivate * priv, if (priv->fw->eventHandler (priv->fw, prop, priv->privateData, evt, &data) == 0) { memcpy (*layout, data.layout, - sizeof (tensor_layout) * NNS_TENSOR_SIZE_LIMIT); + sizeof (tensor_layout) * (NNS_TENSOR_SIZE_LIMIT + + NNS_TENSOR_SIZE_EXTRA_LIMIT)); } else { ml_logw ("Unable to update layout."); } @@ -1840,7 +1848,8 @@ _gtfc_setprop_INPUTCOMBINATION (GstTensorFilterPrivate * priv, for (i = 0; i < num; i++) { val = g_ascii_strtoull (strv[i], NULL, 10); - if (errno == ERANGE || val >= NNS_TENSOR_SIZE_LIMIT) { + if (errno == ERANGE + || val >= NNS_TENSOR_SIZE_LIMIT + NNS_TENSOR_SIZE_EXTRA_LIMIT) { ml_loge ("Invalid value %s, cannot set combination option.", strv[i]); ret = ERANGE; break; @@ -1887,7 +1896,8 @@ _gtfc_setprop_OUTPUTCOMBINATION (GstTensorFilterPrivate * priv, break; } - if (errno == ERANGE || val >= NNS_TENSOR_SIZE_LIMIT) { + if (errno == ERANGE + || val >= NNS_TENSOR_SIZE_LIMIT + NNS_TENSOR_SIZE_EXTRA_LIMIT) { ml_loge ("Invalid value %s, cannot set combination option.", strv[i]); ret = ERANGE; break; @@ -2255,10 +2265,12 @@ gst_tensor_filter_common_get_combined_in_info (GstTensorFilterPrivate * priv, goto error; } - gst_tensor_info_copy (&combined->info[idx++], &in->info[i]); + gst_tensor_info_copy (gst_tensors_info_get_nth_info (combined, idx++), + gst_tensors_info_get_nth_info ((GstTensorsInfo *) in, i)); - if (idx >= NNS_TENSOR_SIZE_LIMIT) { - nns_loge ("The max number of tensors is %d.", NNS_TENSOR_SIZE_LIMIT); + if (idx >= NNS_TENSOR_SIZE_LIMIT + NNS_TENSOR_SIZE_EXTRA_LIMIT) { + nns_loge ("The max number of tensors is %d.", + NNS_TENSOR_SIZE_LIMIT + NNS_TENSOR_SIZE_EXTRA_LIMIT); goto error; } } @@ -2302,7 +2314,8 @@ gst_tensor_filter_common_get_combined_out_info (GstTensorFilterPrivate * priv, goto error; } - gst_tensor_info_copy (&combined->info[idx++], &in->info[i]); + gst_tensor_info_copy (gst_tensors_info_get_nth_info (combined, idx++), + gst_tensors_info_get_nth_info ((GstTensorsInfo *) in, i)); } } @@ -2315,7 +2328,8 @@ gst_tensor_filter_common_get_combined_out_info (GstTensorFilterPrivate * priv, goto error; } - gst_tensor_info_copy (&combined->info[idx++], &out->info[i]); + gst_tensor_info_copy (gst_tensors_info_get_nth_info (combined, idx++), + gst_tensors_info_get_nth_info ((GstTensorsInfo *) out, i)); } } -- 2.7.4