InferenceTFLite: Remove code redundancy 17/280117/1
authorheechul.jeon <heechul.jeon@samsung.com>
Thu, 30 Jun 2022 01:47:29 +0000 (10:47 +0900)
committerHyunsoo Park <hance.park@samsung.com>
Wed, 24 Aug 2022 06:17:53 +0000 (15:17 +0900)
[Versin] 0.0.3
[Issue type] code cleanup

Change-Id: Ifa72db34f18c9cd89ac8055a8dcc6c5529ec9a87
Signed-off-by: heechul.jeon <heechul.jeon@samsung.com>
src/inference_engine_tflite.cpp
src/inference_engine_tflite_private.h

index 286c0fd6b89a013b413bf699006369c9c5c6cddb..36e671d63a7fcfdf9c9480ca43afab7dea998b07 100644 (file)
@@ -134,49 +134,8 @@ namespace TFLiteImpl
                LOGI("mInterpreter->tensors_size() :[%zu]",
                         mInterpreter->tensors_size());
 
-               // input tensor
-               std::map<std::string, int>().swap(mInputLayerId);
-               const std::vector<int>& inputs = mInterpreter->inputs();
-               if (!inputs.empty()) {
-                       for (auto& input : inputs) {
-                               mInputLayerId.insert(std::make_pair(mInterpreter->tensor(input)->name, input));
-                       }
-               } else {
-                       for (auto& layer: mInputLayers) {
-                               LOGI("mInputLayer list [%s]", layer.first.c_str());
-                               for (unsigned int idx = 0; idx < mInterpreter->tensors_size();
-                                        ++idx) {
-                                       if (mInterpreter->tensor(idx)->name == NULL)
-                                               continue;
-                                       if ((layer.first).compare(mInterpreter->tensor(idx)->name) == 0) {
-                                               mInputLayerId.insert(std::make_pair(layer.first, idx));
-                                               break;
-                                       }
-                               }
-                       }
-               }
-
-               // output tensor
-               std::map<std::string, int>().swap(mOutputLayerId);
-               const std::vector<int>& outputs = mInterpreter->outputs();
-               if (!outputs.empty()) {
-                       for (auto& output : outputs) {
-                               mOutputLayerId.insert(std::make_pair(mInterpreter->tensor(output)->name, output));
-                       }
-               } else {
-                       for (auto& layer : mOutputLayers) {
-                               LOGI("mOutputLayer list [%s]", layer.first.c_str());
-                               for (unsigned int idx = 0; idx < mInterpreter->tensors_size();
-                                        ++idx) {
-                                       if (mInterpreter->tensor(idx)->name == NULL)
-                                               continue;
-                                       if ((layer.first).compare(mInterpreter->tensor(idx)->name) == 0) {
-                                               mOutputLayerId.insert(std::make_pair(layer.first, idx));
-                                               break;
-                                       }
-                               }
-                       }
-               }
+               FillLayerId(mInputLayerId, mInputLayers, mInterpreter->inputs());
+               FillLayerId(mOutputLayerId, mOutputLayers, mInterpreter->outputs());
 
                if (mInterpreter->AllocateTensors() != kTfLiteOk) {
                        LOGE("Fail to allocate tensor");
@@ -487,6 +446,31 @@ namespace TFLiteImpl
                return INFERENCE_ENGINE_ERROR_NONE;
        }
 
+       void InferenceTFLite::FillLayerId(std::map<std::string, int>& layerId,
+                       std::map<std::string, inference_engine_tensor_info>& layers,
+                       const std::vector<int>& buffer)
+       {
+               layerId.clear();
+
+               if (!buffer.empty()) {
+                       for (auto& idx : buffer)
+                               layerId.insert(std::make_pair(mInterpreter->tensor(idx)->name, idx));
+                       return;
+               }
+
+               for (auto& layer: layers) {
+                       LOGI("Layer list [%s]", layer.first.c_str());
+                       for (unsigned int idx = 0; idx < mInterpreter->tensors_size(); ++idx) {
+                               if (mInterpreter->tensor(idx)->name == NULL)
+                                       continue;
+                               if ((layer.first).compare(mInterpreter->tensor(idx)->name) == 0) {
+                                       layerId.insert(std::make_pair(layer.first, idx));
+                                       break;
+                               }
+                       }
+               }
+       }
+
        extern "C"
        {
                class IInferenceEngineCommon *EngineCommonInit(void)
index 8cf942c8cb0ba86e84ce9a7f37d313f9995cb2be..33dd1f4daa57a9c8ec5b74c3c91439e129293d04 100644 (file)
@@ -85,6 +85,9 @@ namespace TFLiteImpl
 
        private:
                int SetInterpreterInfo();
+               void FillLayerId(std::map<std::string, int>& layerId,
+                               std::map<std::string, inference_engine_tensor_info>& layers,
+                               const std::vector<int>& buffer);
 
                std::unique_ptr<tflite::Interpreter> mInterpreter;
                std::unique_ptr<tflite::FlatBufferModel> mFlatBuffModel;