InferenceTFLite: Remove code redundancy 18/280118/1
authorheechul.jeon <heechul.jeon@samsung.com>
Thu, 30 Jun 2022 07:15:52 +0000 (16:15 +0900)
committerHyunsoo Park <hance.park@samsung.com>
Wed, 24 Aug 2022 06:17:59 +0000 (15:17 +0900)
[Versin] 0.0.4
[Issue type] code cleanup

Change-Id: Ia6942cd730aedd74f5acbd98b75f6b4e1b7dabfa
Signed-off-by: heechul.jeon <heechul.jeon@samsung.com>
packaging/inference-engine-tflite.spec
src/inference_engine_tflite.cpp
src/inference_engine_tflite_private.h

index fb56e8ce4ab053e692ed2bb28b3c15eaa4b27665..8277bda6dd6706ebf9964b99d2b1cf91bd503931 100644 (file)
@@ -1,6 +1,6 @@
 Name:       inference-engine-tflite
 Summary:    Tensorflow-Lite based implementation of inference-engine-interface
-Version:    0.0.3
+Version:    0.0.4
 Release:    0
 Group:      Multimedia/Libraries
 License:    Apache-2.0
index 36e671d63a7fcfdf9c9480ca43afab7dea998b07..e78b971269409e239cc4e900170625a6c48af095 100644 (file)
@@ -366,81 +366,22 @@ namespace TFLiteImpl
 
        int InferenceTFLite::SetInterpreterInfo()
        {
+               int ret = INFERENCE_ENGINE_ERROR_NONE;
                LOGI("ENTER");
+
                if (mInputLayers.empty()) {
                        LOGI("mInputLayer is empty. layers and tensors that mInterpreter has will be returned.");
 
-                       mInputLayers.clear();
-                       for (auto& layer : mInputLayerId) {
-
-                               std::vector<size_t> shape_nhwc;
-
-                               for (int idx = 0;
-                                        idx < mInterpreter->tensor(layer.second)->dims->size; idx++) {
-                                       shape_nhwc.push_back(
-                                                       mInterpreter->tensor(layer.second)->dims->data[idx]);
-                               }
-
-                               inference_engine_tensor_info tensor_info {
-                                       shape_nhwc, INFERENCE_TENSOR_SHAPE_NHWC,
-                                       INFERENCE_TENSOR_DATA_TYPE_NONE, 1
-                               };
-
-                               if (mInterpreter->tensor(layer.second)->type == kTfLiteUInt8) {
-                                       LOGI("type is kTfLiteUInt8");
-                                       tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
-                               } else if (mInterpreter->tensor(layer.second)->type ==
-                                                  kTfLiteFloat32) {
-                                       LOGI("type is kTfLiteFloat32");
-                                       tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
-                               } else {
-                                       LOGE("Not supported");
-                                       return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
-                               }
-
-                               for (auto& dim : tensor_info.shape) {
-                                       tensor_info.size *= dim;
-                               }
-                               mInputLayers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info));
-                       }
+                       ret = FillLayer(mInputLayers, mInputLayerId);
+                       if (ret != INFERENCE_ENGINE_ERROR_NONE)
+                               return ret;
                }
 
                if (mOutputLayers.empty()) {
                        LOGI("mOutputLayers is empty. layers and tensors that mInterpreter has will be returned.");
-
-                       mOutputLayers.clear();
-                       for (auto& layer : mOutputLayerId) {
-
-                               std::vector<size_t> shape_nhwc;
-
-                               for (int idx = 0;
-                                        idx < mInterpreter->tensor(layer.second)->dims->size; idx++) {
-                                       shape_nhwc.push_back(
-                                                       mInterpreter->tensor(layer.second)->dims->data[idx]);
-                               }
-
-                               inference_engine_tensor_info tensor_info {
-                                       shape_nhwc, INFERENCE_TENSOR_SHAPE_NHWC,
-                                       INFERENCE_TENSOR_DATA_TYPE_NONE, 1
-                               };
-
-                               if (mInterpreter->tensor(layer.second)->type == kTfLiteUInt8) {
-                                       LOGI("type is kTfLiteUInt8");
-                                       tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
-                               } else if (mInterpreter->tensor(layer.second)->type ==
-                                                  kTfLiteFloat32) {
-                                       LOGI("type is kTfLiteFloat32");
-                                       tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
-                               } else {
-                                       LOGE("Not supported");
-                                       return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
-                               }
-
-                               for (auto& dim : tensor_info.shape) {
-                                       tensor_info.size *= dim;
-                               }
-                               mOutputLayers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info));
-                       }
+                       ret = FillLayer(mOutputLayers, mOutputLayerId);
+                       if (ret != INFERENCE_ENGINE_ERROR_NONE)
+                               return ret;
                }
                LOGI("LEAVE");
                return INFERENCE_ENGINE_ERROR_NONE;
@@ -471,6 +412,49 @@ namespace TFLiteImpl
                }
        }
 
+       int InferenceTFLite::FillLayer(std::map<std::string, inference_engine_tensor_info>& layers,
+                       std::map<std::string, int>& layerId)
+       {
+               layers.clear();
+               for (auto& layer : layerId) {
+
+                       std::vector<size_t> shape_nhwc;
+
+                       for (int idx = 0;
+                                       idx < mInterpreter->tensor(layer.second)->dims->size; idx++) {
+                               shape_nhwc.push_back(
+                                               mInterpreter->tensor(layer.second)->dims->data[idx]);
+                       }
+
+                       inference_engine_tensor_info tensor_info {
+                               shape_nhwc, INFERENCE_TENSOR_SHAPE_NHWC,
+                               INFERENCE_TENSOR_DATA_TYPE_NONE, 1
+                       };
+
+                       switch (mInterpreter->tensor(layer.second)->type)
+                       {
+                       case kTfLiteUInt8:
+                               LOGI("type is kTfLiteUInt8");
+                               tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
+                               break;
+                       case kTfLiteFloat32:
+                               LOGI("type is kTfLiteFloat32");
+                               tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+                               break;
+                       default:
+                               LOGE("Not supported");
+                               return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
+                       }
+
+                       for (auto& dim : tensor_info.shape) {
+                               tensor_info.size *= dim;
+                       }
+                       layers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info));
+
+               }
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
+
        extern "C"
        {
                class IInferenceEngineCommon *EngineCommonInit(void)
index 33dd1f4daa57a9c8ec5b74c3c91439e129293d04..d4915007a49a0cda4a869da8378a5e57930617a4 100644 (file)
@@ -88,6 +88,8 @@ namespace TFLiteImpl
                void FillLayerId(std::map<std::string, int>& layerId,
                                std::map<std::string, inference_engine_tensor_info>& layers,
                                const std::vector<int>& buffer);
+               int FillLayer(std::map<std::string, inference_engine_tensor_info>& layers,
+                               std::map<std::string, int>& layerId);
 
                std::unique_ptr<tflite::Interpreter> mInterpreter;
                std::unique_ptr<tflite::FlatBufferModel> mFlatBuffModel;