From: heechul.jeon Date: Thu, 30 Jun 2022 07:15:52 +0000 (+0900) Subject: InferenceTFLite: Remove code redundancy X-Git-Tag: submit/tizen/20220824.063348~1 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=689f25360305adb5c00b0b78ac04477aefdda256;p=platform%2Fcore%2Fmultimedia%2Finference-engine-tflite.git InferenceTFLite: Remove code redundancy [Versin] 0.0.4 [Issue type] code cleanup Change-Id: Ia6942cd730aedd74f5acbd98b75f6b4e1b7dabfa Signed-off-by: heechul.jeon --- diff --git a/packaging/inference-engine-tflite.spec b/packaging/inference-engine-tflite.spec index fb56e8c..8277bda 100644 --- a/packaging/inference-engine-tflite.spec +++ b/packaging/inference-engine-tflite.spec @@ -1,6 +1,6 @@ Name: inference-engine-tflite Summary: Tensorflow-Lite based implementation of inference-engine-interface -Version: 0.0.3 +Version: 0.0.4 Release: 0 Group: Multimedia/Libraries License: Apache-2.0 diff --git a/src/inference_engine_tflite.cpp b/src/inference_engine_tflite.cpp index 36e671d..e78b971 100644 --- a/src/inference_engine_tflite.cpp +++ b/src/inference_engine_tflite.cpp @@ -366,81 +366,22 @@ namespace TFLiteImpl int InferenceTFLite::SetInterpreterInfo() { + int ret = INFERENCE_ENGINE_ERROR_NONE; LOGI("ENTER"); + if (mInputLayers.empty()) { LOGI("mInputLayer is empty. layers and tensors that mInterpreter has will be returned."); - mInputLayers.clear(); - for (auto& layer : mInputLayerId) { - - std::vector shape_nhwc; - - for (int idx = 0; - idx < mInterpreter->tensor(layer.second)->dims->size; idx++) { - shape_nhwc.push_back( - mInterpreter->tensor(layer.second)->dims->data[idx]); - } - - inference_engine_tensor_info tensor_info { - shape_nhwc, INFERENCE_TENSOR_SHAPE_NHWC, - INFERENCE_TENSOR_DATA_TYPE_NONE, 1 - }; - - if (mInterpreter->tensor(layer.second)->type == kTfLiteUInt8) { - LOGI("type is kTfLiteUInt8"); - tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8; - } else if (mInterpreter->tensor(layer.second)->type == - kTfLiteFloat32) { - LOGI("type is kTfLiteFloat32"); - tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32; - } else { - LOGE("Not supported"); - return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT; - } - - for (auto& dim : tensor_info.shape) { - tensor_info.size *= dim; - } - mInputLayers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info)); - } + ret = FillLayer(mInputLayers, mInputLayerId); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + return ret; } if (mOutputLayers.empty()) { LOGI("mOutputLayers is empty. layers and tensors that mInterpreter has will be returned."); - - mOutputLayers.clear(); - for (auto& layer : mOutputLayerId) { - - std::vector shape_nhwc; - - for (int idx = 0; - idx < mInterpreter->tensor(layer.second)->dims->size; idx++) { - shape_nhwc.push_back( - mInterpreter->tensor(layer.second)->dims->data[idx]); - } - - inference_engine_tensor_info tensor_info { - shape_nhwc, INFERENCE_TENSOR_SHAPE_NHWC, - INFERENCE_TENSOR_DATA_TYPE_NONE, 1 - }; - - if (mInterpreter->tensor(layer.second)->type == kTfLiteUInt8) { - LOGI("type is kTfLiteUInt8"); - tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8; - } else if (mInterpreter->tensor(layer.second)->type == - kTfLiteFloat32) { - LOGI("type is kTfLiteFloat32"); - tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32; - } else { - LOGE("Not supported"); - return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT; - } - - for (auto& dim : tensor_info.shape) { - tensor_info.size *= dim; - } - mOutputLayers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info)); - } + ret = FillLayer(mOutputLayers, mOutputLayerId); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + return ret; } LOGI("LEAVE"); return INFERENCE_ENGINE_ERROR_NONE; @@ -471,6 +412,49 @@ namespace TFLiteImpl } } + int InferenceTFLite::FillLayer(std::map& layers, + std::map& layerId) + { + layers.clear(); + for (auto& layer : layerId) { + + std::vector shape_nhwc; + + for (int idx = 0; + idx < mInterpreter->tensor(layer.second)->dims->size; idx++) { + shape_nhwc.push_back( + mInterpreter->tensor(layer.second)->dims->data[idx]); + } + + inference_engine_tensor_info tensor_info { + shape_nhwc, INFERENCE_TENSOR_SHAPE_NHWC, + INFERENCE_TENSOR_DATA_TYPE_NONE, 1 + }; + + switch (mInterpreter->tensor(layer.second)->type) + { + case kTfLiteUInt8: + LOGI("type is kTfLiteUInt8"); + tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8; + break; + case kTfLiteFloat32: + LOGI("type is kTfLiteFloat32"); + tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32; + break; + default: + LOGE("Not supported"); + return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT; + } + + for (auto& dim : tensor_info.shape) { + tensor_info.size *= dim; + } + layers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info)); + + } + return INFERENCE_ENGINE_ERROR_NONE; + } + extern "C" { class IInferenceEngineCommon *EngineCommonInit(void) diff --git a/src/inference_engine_tflite_private.h b/src/inference_engine_tflite_private.h index 33dd1f4..d491500 100644 --- a/src/inference_engine_tflite_private.h +++ b/src/inference_engine_tflite_private.h @@ -88,6 +88,8 @@ namespace TFLiteImpl void FillLayerId(std::map& layerId, std::map& layers, const std::vector& buffer); + int FillLayer(std::map& layers, + std::map& layerId); std::unique_ptr mInterpreter; std::unique_ptr mFlatBuffModel;