fix a regression bug 38/321038/1 accepted/tizen/unified/20241129.062254 accepted/tizen/unified/x/20241218.032617 accepted/tizen/unified/x/asan/20241224.004340
authorInki Dae <inki.dae@samsung.com>
Wed, 27 Nov 2024 02:17:16 +0000 (11:17 +0900)
committerInki Dae <inki.dae@samsung.com>
Wed, 27 Nov 2024 02:21:36 +0000 (11:21 +0900)
[Issue type] : bug fix

Fix a regression bug which was introduced by below patch,
commit-id : bd031c5165ef11d110fc3b50f8a6db4538121180

Output layer isn't filled with SetInterpreterInfo() so revert dropped code.

Change-Id: I707e06022eca44695c507b6c5e3a8535baa3fe0f
Signed-off-by: Inki Dae <inki.dae@samsung.com>
src/inference_engine_tflite.cpp

index a4e83899190cd5fa1f4e498d25bd0e099aeed4ce..948a1c441e3d948c7369dab60d08c1d5cc5b8b5e 100644 (file)
@@ -138,6 +138,12 @@ namespace TFLiteImpl
                FillLayerId(mInputLayerId, mInputLayers, mInterpreter->inputs());
                FillLayerId(mOutputLayerId, mOutputLayers, mInterpreter->outputs());
 
+               for (auto iter = mInputLayerId.begin(); iter != mInputLayerId.end(); iter++ )
+                       LOGI("Input layer : name(%s) -> idx(%d)", iter->first.c_str(), iter->second);
+
+               for (auto iter = mOutputLayerId.begin(); iter != mOutputLayerId.end(); iter++ )
+                       LOGI("Output layer : name(%s) -> idx(%d)", iter->first.c_str(), iter->second);
+
                if (mInterpreter->AllocateTensors() != kTfLiteOk) {
                        LOGE("Fail to allocate tensor");
                        return INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY;
@@ -267,7 +273,53 @@ namespace TFLiteImpl
        {
                LOGI("ENTER");
 
-               SetInterpreterInfo();
+               mOutputLayers.clear();
+
+               for (auto& layer : mOutputLayerId) {
+                       if (layer.second < 0) {
+                                       LOGE("Invalid output layer ID [%d]", layer.second);
+                                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       }
+
+                       inference_engine_tensor_info tensor_info;
+
+                       LOGI("mInterpreter->tensor(%d)->dims name[%s] size[%d] type[%d]",
+                                       layer.second,
+                                       mInterpreter->tensor(layer.second)->name,
+                                       mInterpreter->tensor(layer.second)->dims->size,
+                                       mInterpreter->tensor(layer.second)->type);
+
+                       std::vector<size_t> shape_nhwc;
+                       for (int idx = 0; idx < mInterpreter->tensor(layer.second)->dims->size; idx++)
+                                       shape_nhwc.push_back(mInterpreter->tensor(layer.second)->dims->data[idx]);
+
+                       //tflite only supports NHWC (https://www.tensorflow.org/lite/guide/ops_compatibility).
+                       tensor_info.shape = shape_nhwc;
+                       tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NHWC;
+                       if (mInterpreter->tensor(layer.second)->type == kTfLiteUInt8) {
+                                       LOGI("type is kTfLiteUInt8");
+                                       tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
+                       } else if (mInterpreter->tensor(layer.second)->type == kTfLiteInt8) {
+                                       LOGI("type is kTfLiteInt8");
+                                       tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_INT8;
+                       } else if (mInterpreter->tensor(layer.second)->type == kTfLiteInt64) {
+                                       LOGI("type is kTfLiteInt64");
+                                       tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_INT64;
+                       } else if (mInterpreter->tensor(layer.second)->type == kTfLiteFloat32) {
+                                       LOGI("type is kTfLiteFloat32");
+                                       tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+                       } else {
+                                       LOGE("Not supported");
+                                       return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
+                       }
+                       tensor_info.size = 1;
+
+                       for (auto & dim : tensor_info.shape)
+                                       tensor_info.size *= dim;
+
+                       mOutputLayers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info));
+               }
+
                property.layers = mOutputLayers;
 
                LOGI("LEAVE");
@@ -296,7 +348,7 @@ namespace TFLiteImpl
                LOGI("ENTER");
 
                for (auto& layer : property.layers)
-                       LOGI("input layer name = %s", layer.first.c_str());
+                       LOGI("output layer name = %s", layer.first.c_str());
 
                mOutputLayers.clear();
                mOutputLayers = property.layers;
@@ -422,11 +474,16 @@ namespace TFLiteImpl
                        std::map<std::string, inference_engine_tensor_info>& layers,
                        const std::vector<int>& buffer)
        {
+               LOGI("ENTER");
+
                layerId.clear();
 
                if (!buffer.empty()) {
-                       for (auto& idx : buffer)
+                       for (auto& idx : buffer) {
+                               LOGI("%s : %d", mInterpreter->tensor(idx)->name, idx);
                                layerId[mInterpreter->tensor(idx)->name] = idx;
+                       }
+
                        return;
                }
 
@@ -443,6 +500,8 @@ namespace TFLiteImpl
                        if (layerId.size() == layers.size())
                                break;
                }
+
+               LOGI("LEAVE");
        }
 
        int InferenceTFLite::FillLayer(std::map<std::string, inference_engine_tensor_info>& layers,