InferenceTFLite: fix memory leak 85/304685/3
authorVibhav Aggarwal <v.aggarwal@samsung.com>
Tue, 23 Jan 2024 06:44:40 +0000 (15:44 +0900)
committerVibhav Aggarwal <v.aggarwal@samsung.com>
Wed, 24 Jan 2024 06:24:29 +0000 (15:24 +0900)
[Issue type] bug fix

The GPU delegate created by TfLiteGpuDelegateV2Create()
must be deleted using TfLiteGpuDelegateV2Delete().

Change-Id: Iacdcb1c3e51181584fa3c42447bf008937f986ea
Signed-off-by: Vibhav Aggarwal <v.aggarwal@samsung.com>
src/inference_engine_tflite.cpp
src/inference_engine_tflite_private.h

index f37454bd58e4caf54276e23f5dea908170d82eec..edb27c2fa7e8c939638d46465ac21e501617cd8a 100644 (file)
@@ -30,7 +30,7 @@ namespace InferenceEngineImpl
 {
 namespace TFLiteImpl
 {
-       InferenceTFLite::InferenceTFLite(void) : mTargetTypes(INFERENCE_TARGET_NONE)
+       InferenceTFLite::InferenceTFLite()
        {
                LOGI("ENTER");
                LOGI("LEAVE");
@@ -38,7 +38,8 @@ namespace TFLiteImpl
 
        InferenceTFLite::~InferenceTFLite()
        {
-               ;
+               if (mDelegate)
+                       TfLiteGpuDelegateV2Delete(mDelegate);
        }
 
        int InferenceTFLite::SetPrivateData(void *data)
@@ -117,13 +118,13 @@ namespace TFLiteImpl
 
                if (mTargetTypes == INFERENCE_TARGET_GPU) {
                        TfLiteGpuDelegateOptionsV2 options = TfLiteGpuDelegateOptionsV2Default();
-                       TfLiteDelegate *delegate = TfLiteGpuDelegateV2Create(&options);
-                       if (!delegate){
+                       mDelegate = TfLiteGpuDelegateV2Create(&options);
+                       if (!mDelegate){
                                LOGE("Failed to GPU delegate");
                                return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                        }
 
-                       if (mInterpreter->ModifyGraphWithDelegate(delegate) != kTfLiteOk)
+                       if (mInterpreter->ModifyGraphWithDelegate(mDelegate) != kTfLiteOk)
                        {
                                LOGE("Failed to construct GPU delegate");
                                return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
@@ -160,13 +161,14 @@ namespace TFLiteImpl
                        for (auto& dim : layer.second.shape)
                                size *= dim;
 
+                       void *pBuff;
                        switch (layer.second.data_type) {
                        case INFERENCE_TENSOR_DATA_TYPE_UINT8:
-                               auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mInputLayerId[layer.first]));
+                               pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mInputLayerId[layer.first]));
                                buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 };
                                break;
                        case INFERENCE_TENSOR_DATA_TYPE_FLOAT32:
-                               auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mInputLayerId[layer.first]));
+                               pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mInputLayerId[layer.first]));
                                buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1 };
                                break;
                        default:
@@ -194,20 +196,21 @@ namespace TFLiteImpl
                        for (int idx2 = 0; idx2 < mInterpreter->tensor(mOutputLayerId[layer.first])->dims->size; ++idx2)
                                size *= mInterpreter->tensor(mOutputLayerId[layer.first])->dims->data[idx2];
 
+                       void *pBuff;
                        switch (mInterpreter->tensor(mOutputLayerId[layer.first])->type) {
                        case kTfLiteUInt8:
                                LOGI("type is kTfLiteUInt8");
-                               auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[layer.first]));
+                               pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[layer.first]));
                                buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 };
                                break;
                        case kTfLiteInt64:
                                LOGI("type is kTfLiteInt64");
-                               auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<int64_t>(mOutputLayerId[layer.first]));
+                               pBuff = static_cast<void *>(mInterpreter->typed_tensor<int64_t>(mOutputLayerId[layer.first]));
                                buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_INT64, size * 8, 1};
                                break;
                        case kTfLiteFloat32:
                                LOGI("type is kTfLiteFloat32");
-                               auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mOutputLayerId[layer.first]));
+                               pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mOutputLayerId[layer.first]));
                                buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1 };
                                break;
                        default:
index 82605f626d5b6065b1a7d42eb01a8239c95e71c2..800902201bfd436cb3518330fa52190e8d1cd08a 100644 (file)
@@ -102,7 +102,9 @@ namespace TFLiteImpl
 
                std::string mConfigFile;
                std::string mWeightFile;
-               int mTargetTypes;
+               int mTargetTypes { INFERENCE_TARGET_NONE };
+
+               TfLiteDelegate *mDelegate {};
        };
 
 } /* InferenceEngineImpl */