bug fix to input and output tensor dimension 32/291532/2 accepted/tizen/unified/20230420.041532
authorInki Dae <inki.dae@samsung.com>
Tue, 18 Apr 2023 05:41:27 +0000 (14:41 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 18 Apr 2023 06:17:43 +0000 (15:17 +0900)
[Version] : 0.4.9
[Issue type] : bug fix

Fix tensor dimension value from ML single API correctly.

This issue has happened since below patch was committed,
 892c8d2e9af9ce49e714a467063c45cd4ed28cba of machine_learning repo.

This is a temporary solution so it should be fixed with using
ML_TENSOR_RANK_LIMIT instead. As of now, ML_TENSOR_RANK_LIMIT is a fixed
value of 16. Ideally, if we set input or output tensor dimension to actual
tensor dimension value using ml_tensors_info_set_tensor_dimension function
then we should get same dimension value using
ml_tensors_info_get_tensor_dimension function. However, current version of
ml_tensors_info_get_tensor_dimension function always says a fixed value of 16.

Change-Id: Ie05f7be2b53a6d1c5a5dfcd36cbefc4320c05b5f
Signed-off-by: Inki Dae <inki.dae@samsung.com>
packaging/inference-engine-mlapi.spec
src/inference_engine_mlapi.cpp

index f7c28d5..ad18b74 100644 (file)
@@ -1,6 +1,6 @@
 Name:       inference-engine-mlapi
 Summary:    ML Single API backend of NNStreamer for MediaVision
-Version:    0.4.8
+Version:    0.4.9
 Release:    0
 Group:      Multimedia/Libraries
 License:    Apache-2.0
index 3bb0f85..4f3adfc 100644 (file)
 extern "C" int ml_single_invoke_fast(ml_single_h single, const ml_tensors_data_h input, ml_tensors_data_h output);
 #endif
 
+// TODO. this is a temporary solution so it should be fixed with using ML_TENSOR_RANK_LIMIT instead.
+//       As of now, ML_TENSOR_RANK_LIMIT is a fixed value of 16. Ideally, if we set input or output
+//       tensor dimension to actual tensor dimension value using ml_tensors_info_set_tensor_dimension
+//       function then we should get same dimension value using ml_tensors_info_get_tensor_dimension function.
+//       However, as of now, ml_tensors_info_get_tensor_dimension function always says a fixed value of 16.
+#define MAX_TENSOR_DIMENSION_SIZE      4
+
 namespace InferenceEngineImpl
 {
 namespace MLAPIImpl
@@ -574,7 +581,7 @@ namespace MLAPIImpl
                        }
 
                        LOGI("Input tensor dimension:");
-                       for (unsigned int shape_idx = 0; shape_idx < ML_TENSOR_RANK_LIMIT; ++shape_idx) {
+                       for (unsigned int shape_idx = 0; shape_idx < MAX_TENSOR_DIMENSION_SIZE; ++shape_idx) {
                                tensor_info.shape.push_back(in_dim[shape_idx]);
                                in_size *= static_cast<size_t>(in_dim[shape_idx]);
                                LOGI("%u", in_dim[shape_idx]);
@@ -609,7 +616,7 @@ namespace MLAPIImpl
                for (auto& output : mDesignated_outputs) {
                        inference_engine_tensor_info tensor_info;
                        ml_tensor_type_e out_type;
-                       unsigned int out_dim[ML_TENSOR_RANK_LIMIT];
+                       unsigned int out_dim[MAX_TENSOR_DIMENSION_SIZE];
                        size_t out_size = 1;
 
                        ret = ml_tensors_info_get_tensor_type(mOutputInfoHandle, output.second, &out_type);
@@ -641,7 +648,7 @@ namespace MLAPIImpl
 
                        LOGI("Output tensor dimension:");
 
-                       for (unsigned int shape_idx = 0; shape_idx < ML_TENSOR_RANK_LIMIT; ++shape_idx) {
+                       for (unsigned int shape_idx = 0; shape_idx < MAX_TENSOR_DIMENSION_SIZE; ++shape_idx) {
                                out_size *= static_cast<size_t>(out_dim[shape_idx]);
 
                                if (out_dim[shape_idx] == 1 && shape_size == 0)