Applying coding rule 19/280119/1 accepted/tizen_7.0_unified accepted/tizen_7.0_unified_hotfix accepted/tizen_8.0_unified tizen_7.0 tizen_7.0_hotfix tizen_8.0 accepted/tizen/7.0/unified/20221110.060702 accepted/tizen/7.0/unified/hotfix/20221116.105401 accepted/tizen/8.0/unified/20231005.093425 accepted/tizen/unified/20220825.063540 submit/tizen/20220824.063348 tizen_7.0_m2_release tizen_8.0_m2_release
authorHyunsoo Park <hance.park@samsung.com>
Thu, 7 Jul 2022 02:31:08 +0000 (11:31 +0900)
committerHyunsoo Park <hance.park@samsung.com>
Wed, 24 Aug 2022 06:18:04 +0000 (15:18 +0900)
Change-Id: Ieb020b720fa19cccd1826706d67ae7887332002b
Signed-off-by: Hyunsoo Park <hance.park@samsung.com>
packaging/inference-engine-tflite.spec
src/inference_engine_tflite.cpp

index 8277bda6dd6706ebf9964b99d2b1cf91bd503931..566eae8062a31f4dd01d0f722e8f18b403be5887 100644 (file)
@@ -1,6 +1,6 @@
 Name:       inference-engine-tflite
 Summary:    Tensorflow-Lite based implementation of inference-engine-interface
-Version:    0.0.4
+Version:    0.0.5
 Release:    0
 Group:      Multimedia/Libraries
 License:    Apache-2.0
index e78b971269409e239cc4e900170625a6c48af095..8f22bd996a7272eff890f2e33c4240a6558cb583 100644 (file)
@@ -161,16 +161,15 @@ namespace TFLiteImpl
                for (auto& layer : mInputLayers) {
                        size_t size = 1;
                        inference_engine_tensor_buffer buffer;
-                       for (auto& dim : layer.second.shape) {
+                       for (auto& dim : layer.second.shape)
                                size *= dim;
-                       }
 
-                       if ( (layer.second).data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
+                       if ((layer.second).data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
                                mInputData.push_back(
                                                mInterpreter->typed_tensor<uint8_t>(mInputLayerId[layer.first]));
                                pBuff = mInputData.back();
                                buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 };
-                       } else if ( (layer.second).data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
+                       } else if ((layer.second).data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
                                mInputData.push_back(
                                                mInterpreter->typed_tensor<float>(mInputLayerId[layer.first]));
                                pBuff = mInputData.back();
@@ -198,11 +197,8 @@ namespace TFLiteImpl
                for (auto& layer : mOutputLayers) {
                        inference_engine_tensor_buffer buffer;
                        size_t size = 1;
-                       for (int idx2 = 0;
-                                idx2 < mInterpreter->tensor(mOutputLayerId[layer.first])->dims->size;
-                                ++idx2) {
+                       for (int idx2 = 0; idx2 < mInterpreter->tensor(mOutputLayerId[layer.first])->dims->size; ++idx2)
                                size *= mInterpreter->tensor(mOutputLayerId[layer.first])->dims->data[idx2];
-                       }
 
                        if (mInterpreter->tensor(mOutputLayerId[layer.first])->type == kTfLiteUInt8) {
                                LOGI("type is kTfLiteUInt8");
@@ -245,30 +241,25 @@ namespace TFLiteImpl
        {
                LOGI("ENTER");
 
-               std::map<std::string, inference_engine_tensor_info>().swap(mOutputLayers);
+               mOutputLayers.clear();
 
                for (auto& layer :mOutputLayerId) {
-                       LOGI("output layer ID: %d", layer.second);
-                       if ( layer.second < 0) {
-                               LOGE("Invalid output layer");
+                       if (layer.second < 0) {
+                               LOGE("Invalid output layer ID [%d]", layer.second);
                                return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                        }
 
                        inference_engine_tensor_info tensor_info;
 
-                       LOGI("mInterpreter->tensor(%d)->dims name[%s]",
-                                layer.second, mInterpreter->tensor(layer.second)->name);
-                       LOGI("mInterpreter->tensor(%d)->dims size[%d]",
-                                layer.second, mInterpreter->tensor(layer.second)->dims->size);
-                       LOGI("mInterpreter->tensor(%d)->dims type[%d]",
-                                layer.second, mInterpreter->tensor(layer.second)->type);
+                       LOGI("mInterpreter->tensor(%d)->dims name[%s] size[%d] type[%d]",
+                                layer.second,
+                                mInterpreter->tensor(layer.second)->name,
+                                mInterpreter->tensor(layer.second)->dims->size,
+                                mInterpreter->tensor(layer.second)->type);
 
                        std::vector<size_t> shape_nhwc;
-                       for (int idx = 0; idx < mInterpreter->tensor(layer.second)->dims->size;
-                                idx++) {
-                               shape_nhwc.push_back(
-                                               mInterpreter->tensor(layer.second)->dims->data[idx]);
-                       }
+                       for (int idx = 0; idx < mInterpreter->tensor(layer.second)->dims->size; idx++)
+                               shape_nhwc.push_back(mInterpreter->tensor(layer.second)->dims->data[idx]);
 
                        //tflite only supports NHWC (https://www.tensorflow.org/lite/guide/ops_compatibility).
                        tensor_info.shape = shape_nhwc;
@@ -287,9 +278,10 @@ namespace TFLiteImpl
                                return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
                        }
                        tensor_info.size = 1;
-                       for (auto & dim : tensor_info.shape) {
+
+                       for (auto & dim : tensor_info.shape)
                                tensor_info.size *= dim;
-                       }
+
                        mOutputLayers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info));
                }
 
@@ -304,10 +296,10 @@ namespace TFLiteImpl
        {
                LOGI("ENTER");
 
-               for (auto& layer : property.layers) {
+               for (auto& layer : property.layers)
                        LOGI("input layer name = %s", layer.first.c_str());
-               }
-               std::map<std::string, inference_engine_tensor_info>().swap(mInputLayers);
+
+               mInputLayers.clear();
                mInputLayers = property.layers;
 
                LOGI("LEAVE");
@@ -320,10 +312,10 @@ namespace TFLiteImpl
        {
                LOGI("ENTER");
 
-               for (auto& layer : property.layers) {
+               for (auto& layer : property.layers)
                        LOGI("input layer name = %s", layer.first.c_str());
-               }
-               std::map<std::string, inference_engine_tensor_info>().swap(mOutputLayers);
+
+               mOutputLayers.clear();
                mOutputLayers = property.layers;
 
                LOGI("LEAVE");
@@ -420,19 +412,15 @@ namespace TFLiteImpl
 
                        std::vector<size_t> shape_nhwc;
 
-                       for (int idx = 0;
-                                       idx < mInterpreter->tensor(layer.second)->dims->size; idx++) {
-                               shape_nhwc.push_back(
-                                               mInterpreter->tensor(layer.second)->dims->data[idx]);
-                       }
+                       for (int idx = 0; idx < mInterpreter->tensor(layer.second)->dims->size; idx++)
+                               shape_nhwc.push_back(mInterpreter->tensor(layer.second)->dims->data[idx]);
 
                        inference_engine_tensor_info tensor_info {
                                shape_nhwc, INFERENCE_TENSOR_SHAPE_NHWC,
                                INFERENCE_TENSOR_DATA_TYPE_NONE, 1
                        };
 
-                       switch (mInterpreter->tensor(layer.second)->type)
-                       {
+                       switch (mInterpreter->tensor(layer.second)->type) {
                        case kTfLiteUInt8:
                                LOGI("type is kTfLiteUInt8");
                                tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
@@ -446,9 +434,9 @@ namespace TFLiteImpl
                                return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
                        }
 
-                       for (auto& dim : tensor_info.shape) {
+                       for (auto& dim : tensor_info.shape)
                                tensor_info.size *= dim;
-                       }
+
                        layers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info));
 
                }