From: Inki Dae Date: Mon, 2 Jan 2023 01:16:08 +0000 (+0900) Subject: fix a bug to incorrect tensor info X-Git-Tag: accepted/tizen/7.0/unified/20230920.022050~3 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=5f06cc191f36ac3a14b84e64fe587228a4f91b1a;p=platform%2Fcore%2Fmultimedia%2Finference-engine-mlapi.git fix a bug to incorrect tensor info [Version] : 0.4.7 [Issue type] : bug fix Fixed a bug that incorrect tensor info is used by destroy previous tensor handle of mlapi before creating a new one. In case of face recognition scenario, output tensor info can be updated in runtime because new class can be added while training - adding a new class allows to change output tensor size. Change-Id: Idb66bf213de8316b95f7e1697693bbf162e8c7a7 Signed-off-by: Inki Dae --- diff --git a/packaging/inference-engine-mlapi.spec b/packaging/inference-engine-mlapi.spec index a1cd8c1..b3aa670 100644 --- a/packaging/inference-engine-mlapi.spec +++ b/packaging/inference-engine-mlapi.spec @@ -1,6 +1,6 @@ Name: inference-engine-mlapi Summary: ML Single API backend of NNStreamer for MediaVision -Version: 0.4.6 +Version: 0.4.7 Release: 0 Group: Multimedia/Libraries License: Apache-2.0 diff --git a/src/inference_engine_mlapi.cpp b/src/inference_engine_mlapi.cpp index 645a303..90ee0c8 100644 --- a/src/inference_engine_mlapi.cpp +++ b/src/inference_engine_mlapi.cpp @@ -501,13 +501,17 @@ namespace MLAPIImpl int ret = INFERENCE_ENGINE_ERROR_NONE; - // TODO. Below is test code, should we allocate new buffer for every inference? - if (mOutputDataHandle == NULL) { - ret = ml_tensors_data_create(mOutputInfoHandle, &mOutputDataHandle); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_data_create(%d).", ret); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } + // In runtime, output tensor info can be updated. + // So make sure to destroy previous handle. I.e., face recognition framework. + if (mOutputDataHandle) { + ml_tensors_data_destroy(mOutputDataHandle); + mOutputDataHandle = NULL; + } + + ret = ml_tensors_data_create(mOutputInfoHandle, &mOutputDataHandle); + if (ret != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_data_create(%d).", ret); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; } // TODO. Cache tensor info and reduce function call in UpdateTensorsInfo()