Set OutputLayers when it is empty vector 14/258214/2 submit/tizen/20210512.091922 submit/tizen/20210512.095706 submit/tizen/20210513.024033 submit/tizen/20210513.024256 submit/tizen/20210513.034723
authorHyunsoo Park <hance.park@samsung.com>
Wed, 12 May 2021 08:40:50 +0000 (17:40 +0900)
committerHyunsoo Park <hance.park@samsung.com>
Wed, 12 May 2021 09:00:32 +0000 (18:00 +0900)
Change-Id: I035159ce30cdc0edafdce72324a235693493d802
Signed-off-by: Hyunsoo Park <hance.park@samsung.com>
src/inference_engine_tflite.cpp

index 731e7e8e7897ee6af0b9d1aab442d464e343a561..bb4f66cc0c6f14edd1980629f25b7c3a051c013c 100644 (file)
@@ -228,8 +228,14 @@ namespace TFLiteImpl
        int InferenceTFLite::GetOutputTensorBuffers(
                        std::map<std::string, inference_engine_tensor_buffer> &buffers)
        {
-               void *pBuff = NULL;
+               LOGI("ENTER");
+
+               if (mOutputLayers.empty()) {
+                       SetInterpreterInfo();
+               }
 
+               void *pBuff = NULL;
+               LOGI("mOutputLayers size [%d]", mOutputLayers.size());
                for (auto& layer : mOutputLayers) {
                        inference_engine_tensor_buffer buffer;
                        size_t size = 1;
@@ -258,6 +264,7 @@ namespace TFLiteImpl
 
                        buffers.insert(std::make_pair(mInterpreter->tensor(mOutputLayerId[layer.first])->name, buffer));
                }
+               LOGI("LEAVE");
                return INFERENCE_ENGINE_ERROR_NONE;
        }
 
@@ -438,6 +445,44 @@ namespace TFLiteImpl
                        }
                }
 
+               if (mOutputLayers.empty()) {
+                       LOGI("mOutputLayers is empty. layers and tensors that mInterpreter has will be returned.");
+
+                       mOutputLayers.clear();
+                       for (auto& layer : mOutputLayerId) {
+
+                               std::vector<size_t> shape_nhwc;
+
+                               for (int idx = 0;
+                                        idx < mInterpreter->tensor(layer.second)->dims->size; idx++) {
+                                       shape_nhwc.push_back(
+                                                       mInterpreter->tensor(layer.second)->dims->data[idx]);
+                               }
+
+                               inference_engine_tensor_info tensor_info {
+                                       shape_nhwc, INFERENCE_TENSOR_SHAPE_NHWC,
+                                       INFERENCE_TENSOR_DATA_TYPE_NONE, 1
+                               };
+
+                               if (mInterpreter->tensor(layer.second)->type == kTfLiteUInt8) {
+                                       LOGI("type is kTfLiteUInt8");
+                                       tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
+                               } else if (mInterpreter->tensor(layer.second)->type ==
+                                                  kTfLiteFloat32) {
+                                       LOGI("type is kTfLiteFloat32");
+                                       tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+                               } else {
+                                       LOGE("Not supported");
+                                       return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
+                               }
+
+                               for (auto& dim : tensor_info.shape) {
+                                       tensor_info.size *= dim;
+                               }
+                               mOutputLayers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info));
+                       }
+               }
+
                return INFERENCE_ENGINE_ERROR_NONE;
        }