InferenceTFLite: drop mInputData member 44/303744/5
authorVibhav Aggarwal <v.aggarwal@samsung.com>
Thu, 4 Jan 2024 07:37:30 +0000 (16:37 +0900)
committerVibhav Aggarwal <v.aggarwal@samsung.com>
Fri, 5 Jan 2024 09:55:51 +0000 (18:55 +0900)
[Issue type] code cleanup

Change-Id: Ifaff8ab6b1f1b16037b6249bd7918f8984282c35
Signed-off-by: Vibhav Aggarwal <v.aggarwal@samsung.com>
src/inference_engine_tflite.cpp
src/inference_engine_tflite_private.h

index a1c8517a5a920e5055d8e417c18bc1c0d8d3dba4..f37454bd58e4caf54276e23f5dea908170d82eec 100644 (file)
@@ -154,27 +154,22 @@ namespace TFLiteImpl
                        SetInterpreterInfo();
                }
 
-               mInputData.clear();
-
-               void *pBuff = NULL;
-
                for (auto& layer : mInputLayers) {
                        size_t size = 1;
                        inference_engine_tensor_buffer buffer;
                        for (auto& dim : layer.second.shape)
                                size *= dim;
 
-                       if ((layer.second).data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
-                               mInputData.push_back(
-                                               mInterpreter->typed_tensor<uint8_t>(mInputLayerId[layer.first]));
-                               pBuff = mInputData.back();
+                       switch (layer.second.data_type) {
+                       case INFERENCE_TENSOR_DATA_TYPE_UINT8:
+                               auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mInputLayerId[layer.first]));
                                buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 };
-                       } else if ((layer.second).data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
-                               mInputData.push_back(
-                                               mInterpreter->typed_tensor<float>(mInputLayerId[layer.first]));
-                               pBuff = mInputData.back();
+                               break;
+                       case INFERENCE_TENSOR_DATA_TYPE_FLOAT32:
+                               auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mInputLayerId[layer.first]));
                                buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1 };
-                       } else {
+                               break;
+                       default:
                                LOGE("Not supported");
                                return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
                        }
@@ -193,31 +188,34 @@ namespace TFLiteImpl
                        SetInterpreterInfo();
                }
 
-               void *pBuff = NULL;
                for (auto& layer : mOutputLayers) {
                        inference_engine_tensor_buffer buffer;
                        size_t size = 1;
                        for (int idx2 = 0; idx2 < mInterpreter->tensor(mOutputLayerId[layer.first])->dims->size; ++idx2)
                                size *= mInterpreter->tensor(mOutputLayerId[layer.first])->dims->data[idx2];
 
-                       if (mInterpreter->tensor(mOutputLayerId[layer.first])->type == kTfLiteUInt8) {
+                       switch (mInterpreter->tensor(mOutputLayerId[layer.first])->type) {
+                       case kTfLiteUInt8:
                                LOGI("type is kTfLiteUInt8");
-                               pBuff = (void *) mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[layer.first]);
+                               auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[layer.first]));
                                buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 };
-                       } else if (mInterpreter->tensor(mOutputLayerId[layer.first])->type == kTfLiteInt64) {
+                               break;
+                       case kTfLiteInt64:
                                LOGI("type is kTfLiteInt64");
-                               pBuff = (void*)mInterpreter->typed_tensor<int64_t>(mOutputLayerId[layer.first]);
+                               auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<int64_t>(mOutputLayerId[layer.first]));
                                buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_INT64, size * 8, 1};
-                       } else if (mInterpreter->tensor(mOutputLayerId[layer.first])->type == kTfLiteFloat32) {
+                               break;
+                       case kTfLiteFloat32:
                                LOGI("type is kTfLiteFloat32");
-                               pBuff = (void *) mInterpreter->typed_tensor<float>(mOutputLayerId[layer.first]);
+                               auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mOutputLayerId[layer.first]));
                                buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1 };
-                       } else {
+                               break;
+                       default:
                                LOGE("Not supported");
                                return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
                        }
 
-                       buffers.insert(std::make_pair(mInterpreter->tensor(mOutputLayerId[layer.first])->name, buffer));
+                       buffers.insert(std::make_pair(layer.first, buffer));
                }
                LOGI("LEAVE");
                return INFERENCE_ENGINE_ERROR_NONE;
index d4915007a49a0cda4a869da8378a5e57930617a4..82605f626d5b6065b1a7d42eb01a8239c95e71c2 100644 (file)
@@ -93,7 +93,6 @@ namespace TFLiteImpl
 
                std::unique_ptr<tflite::Interpreter> mInterpreter;
                std::unique_ptr<tflite::FlatBufferModel> mFlatBuffModel;
-               std::vector<void *> mInputData;
 
                std::map<std::string, inference_engine_tensor_info> mInputLayers;
                std::map<std::string, inference_engine_tensor_info> mOutputLayers;