From 68734c87eae257fc55db74458cf9e99d8a212d9d Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Tue, 18 Apr 2023 14:41:27 +0900 Subject: [PATCH] bug fix to input and output tensor dimension [Version] : 0.4.9 [Issue type] : bug fix Fix tensor dimension value from ML single API correctly. This issue has happened since below patch was committed, 892c8d2e9af9ce49e714a467063c45cd4ed28cba of machine_learning repo. This is a temporary solution so it should be fixed with using ML_TENSOR_RANK_LIMIT instead. As of now, ML_TENSOR_RANK_LIMIT is a fixed value of 16. Ideally, if we set input or output tensor dimension to actual tensor dimension value using ml_tensors_info_set_tensor_dimension function then we should get same dimension value using ml_tensors_info_get_tensor_dimension function. However, current version of ml_tensors_info_get_tensor_dimension function always says a fixed value of 16. Change-Id: Ie05f7be2b53a6d1c5a5dfcd36cbefc4320c05b5f Signed-off-by: Inki Dae --- packaging/inference-engine-mlapi.spec | 2 +- src/inference_engine_mlapi.cpp | 13 ++++++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/packaging/inference-engine-mlapi.spec b/packaging/inference-engine-mlapi.spec index f7c28d5..ad18b74 100644 --- a/packaging/inference-engine-mlapi.spec +++ b/packaging/inference-engine-mlapi.spec @@ -1,6 +1,6 @@ Name: inference-engine-mlapi Summary: ML Single API backend of NNStreamer for MediaVision -Version: 0.4.8 +Version: 0.4.9 Release: 0 Group: Multimedia/Libraries License: Apache-2.0 diff --git a/src/inference_engine_mlapi.cpp b/src/inference_engine_mlapi.cpp index 3bb0f85..4f3adfc 100644 --- a/src/inference_engine_mlapi.cpp +++ b/src/inference_engine_mlapi.cpp @@ -32,6 +32,13 @@ extern "C" int ml_single_invoke_fast(ml_single_h single, const ml_tensors_data_h input, ml_tensors_data_h output); #endif +// TODO. this is a temporary solution so it should be fixed with using ML_TENSOR_RANK_LIMIT instead. +// As of now, ML_TENSOR_RANK_LIMIT is a fixed value of 16. Ideally, if we set input or output +// tensor dimension to actual tensor dimension value using ml_tensors_info_set_tensor_dimension +// function then we should get same dimension value using ml_tensors_info_get_tensor_dimension function. +// However, as of now, ml_tensors_info_get_tensor_dimension function always says a fixed value of 16. +#define MAX_TENSOR_DIMENSION_SIZE 4 + namespace InferenceEngineImpl { namespace MLAPIImpl @@ -574,7 +581,7 @@ namespace MLAPIImpl } LOGI("Input tensor dimension:"); - for (unsigned int shape_idx = 0; shape_idx < ML_TENSOR_RANK_LIMIT; ++shape_idx) { + for (unsigned int shape_idx = 0; shape_idx < MAX_TENSOR_DIMENSION_SIZE; ++shape_idx) { tensor_info.shape.push_back(in_dim[shape_idx]); in_size *= static_cast(in_dim[shape_idx]); LOGI("%u", in_dim[shape_idx]); @@ -609,7 +616,7 @@ namespace MLAPIImpl for (auto& output : mDesignated_outputs) { inference_engine_tensor_info tensor_info; ml_tensor_type_e out_type; - unsigned int out_dim[ML_TENSOR_RANK_LIMIT]; + unsigned int out_dim[MAX_TENSOR_DIMENSION_SIZE]; size_t out_size = 1; ret = ml_tensors_info_get_tensor_type(mOutputInfoHandle, output.second, &out_type); @@ -641,7 +648,7 @@ namespace MLAPIImpl LOGI("Output tensor dimension:"); - for (unsigned int shape_idx = 0; shape_idx < ML_TENSOR_RANK_LIMIT; ++shape_idx) { + for (unsigned int shape_idx = 0; shape_idx < MAX_TENSOR_DIMENSION_SIZE; ++shape_idx) { out_size *= static_cast(out_dim[shape_idx]); if (out_dim[shape_idx] == 1 && shape_size == 0) -- 2.34.1