fix a bug to incorrect tensor info 16/286216/1
authorInki Dae <inki.dae@samsung.com>
Mon, 2 Jan 2023 01:16:08 +0000 (10:16 +0900)
committerInki Dae <inki.dae@samsung.com>
Mon, 2 Jan 2023 01:16:08 +0000 (10:16 +0900)
[Version] : 0.4.7
[Issue type] : bug fix

Fixed a bug that incorrect tensor info is used by
destroy previous tensor handle of mlapi before creating a new one.

In case of face recognition scenario, output tensor info
can be updated in runtime because new class can be added
while training - adding a new class allows to change output
tensor size.

Change-Id: Idb66bf213de8316b95f7e1697693bbf162e8c7a7
Signed-off-by: Inki Dae <inki.dae@samsung.com>
packaging/inference-engine-mlapi.spec
src/inference_engine_mlapi.cpp

index a1cd8c1..b3aa670 100644 (file)
@@ -1,6 +1,6 @@
 Name:       inference-engine-mlapi
 Summary:    ML Single API backend of NNStreamer for MediaVision
-Version:    0.4.6
+Version:    0.4.7
 Release:    0
 Group:      Multimedia/Libraries
 License:    Apache-2.0
index f378bd1..2ca2a3d 100644 (file)
@@ -518,13 +518,17 @@ namespace MLAPIImpl
 
                int ret = INFERENCE_ENGINE_ERROR_NONE;
 
-               // TODO. Below is test code, should we allocate new buffer for every inference?
-               if (mOutputDataHandle == NULL) {
-                       ret = ml_tensors_data_create(mOutputInfoHandle, &mOutputDataHandle);
-                       if (ret != ML_ERROR_NONE) {
-                               LOGE("Failed to request ml_tensors_data_create(%d).", ret);
-                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-                       }
+               // In runtime, output tensor info can be updated.
+               // So make sure to destroy previous handle. I.e., face recognition framework.
+               if (mOutputDataHandle) {
+                       ml_tensors_data_destroy(mOutputDataHandle);
+                       mOutputDataHandle = NULL;
+               }
+
+               ret = ml_tensors_data_create(mOutputInfoHandle, &mOutputDataHandle);
+               if (ret != ML_ERROR_NONE) {
+                       LOGE("Failed to request ml_tensors_data_create(%d).", ret);
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                }
 
                // TODO. Cache tensor info and reduce function call in UpdateTensorsInfo()