Change layerId and inference_engine_tensor_info to be accessed by layer name sandbox/tyzeroy/dev
authorTae-Young Chung <ty83.chung@samsung.com>
Mon, 11 Jan 2021 06:47:57 +0000 (15:47 +0900)
committerTae-Young Chung <ty83.chung@samsung.com>
Mon, 11 Jan 2021 07:10:53 +0000 (16:10 +0900)
Refer to inference-engine-interface commit: "50a24f739d9131e63592b643398ffb6291cac521"
Change-Id: If93d27fc2e8034c260896dd087c975a7eac7d7f1
Signed-off-by: Tae-Young Chung <ty83.chung@samsung.com>
src/inference_engine_tflite.cpp
src/inference_engine_tflite_private.h

index d9d9f50d48215ecf268a2d24c3abd541a93f6a5d..18838c6439d240d258d354db5c39b76886036f1d 100644 (file)
@@ -108,20 +108,23 @@ namespace TFLiteImpl
                         mInterpreter->tensors_size());
 
                // input tensor
+               std::map<std::string, int>().swap(mInputLayerId);
                if (mInterpreter->inputs().size()) {
-                       mInputLayerId = mInterpreter->inputs();
+                       for (auto iter = mInterpreter->inputs().begin();
+                               iter != mInterpreter->inputs().end(); ++iter) {
+                               mInputLayerId.insert(std::make_pair(mInterpreter->tensor((*iter))->name, (*iter)));
+                       }
                } else {
-                       std::vector<std::string>::iterator iter;
                        mInputLayerId.clear();
-                       for (iter = mInputLayer.begin(); iter != mInputLayer.end();
+                       for (auto iter = mInputLayers.begin(); iter != mInputLayers.end();
                                 ++iter) {
-                               LOGI("mInputLayer list [%s]", (*iter).c_str());
+                               LOGI("mInputLayer list [%s]", (iter->first).c_str());
                                for (unsigned int idx = 0; idx < mInterpreter->tensors_size();
                                         ++idx) {
                                        if (mInterpreter->tensor(idx)->name == NULL)
                                                continue;
-                                       if ((*iter).compare(mInterpreter->tensor(idx)->name) == 0) {
-                                               mInputLayerId.push_back(idx);
+                                       if ((iter->first).compare(mInterpreter->tensor(idx)->name) == 0) {
+                                               mInputLayerId.insert(std::make_pair(mInterpreter->tensor(idx)->name, idx));
                                                break;
                                        }
                                }
@@ -129,20 +132,23 @@ namespace TFLiteImpl
                }
 
                // output tensor
+               std::map<std::string, int>().swap(mOutputLayerId);
                if (mInterpreter->outputs().size()) {
-                       mOutputLayerId = mInterpreter->outputs();
+                       for (auto iter = mInterpreter->outputs().begin();
+                               iter != mInterpreter->outputs().end(); ++iter) {
+                               mOutputLayerId.insert(std::make_pair(mInterpreter->tensor((*iter))->name, (*iter)));
+                       }
                } else {
-                       std::vector<std::string>::iterator iter;
                        mOutputLayerId.clear();
-                       for (iter = mOutputLayer.begin(); iter != mOutputLayer.end();
+                       for (auto iter = mOutputLayers.begin(); iter != mOutputLayers.end();
                                 ++iter) {
-                               LOGI("mOutputLayer list [%s]", (*iter).c_str());
+                               LOGI("mOutputLayer list [%s]", iter->first.c_str());
                                for (unsigned int idx = 0; idx < mInterpreter->tensors_size();
                                         ++idx) {
                                        if (mInterpreter->tensor(idx)->name == NULL)
                                                continue;
-                                       if ((*iter).compare(mInterpreter->tensor(idx)->name) == 0) {
-                                               mOutputLayerId.push_back(idx);
+                                       if ((iter->first).compare(mInterpreter->tensor(idx)->name) == 0) {
+                                               mOutputLayerId.insert(std::make_pair(mInterpreter->tensor(idx)->name, idx));
                                                break;
                                        }
                                }
@@ -154,11 +160,6 @@ namespace TFLiteImpl
                        return INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY;
                }
 
-               for (unsigned int idx = 0; idx < mInputLayerId.size(); ++idx) {
-                       mInputAttrType.push_back(
-                                       mInterpreter->tensor(mInputLayerId[idx])->type);
-               }
-
                return ret;
        }
 
@@ -167,7 +168,7 @@ namespace TFLiteImpl
        {
                LOGI("ENTER");
 
-               if (mInputTensorInfo.empty()) {
+               if (mInputLayers.empty()) {
                        SetInterpreterInfo();
                }
 
@@ -175,22 +176,26 @@ namespace TFLiteImpl
 
                void *pBuff = NULL;
 
-               for (unsigned int idx = 0; idx < mInputLayerId.size(); ++idx) {
+               for (auto iter_layer = mInputLayers.begin(); iter_layer != mInputLayers.end(); ++iter_layer/*unsigned int idx = 0; idx < mInputLayerId.size(); ++idx*/) {
                        size_t size = 1;
                        inference_engine_tensor_buffer buffer;
-                       for (std::vector<size_t>::iterator iter =
+                       for (auto iter = (iter_layer->second).shape.begin();
+                               iter != (iter_layer->second).shape.end();
+                               ++iter
+                               /*std::vector<size_t>::iterator iter =
                                                 mInputTensorInfo[idx].shape.begin();
-                                iter != mInputTensorInfo[idx].shape.end(); ++iter) {
+                                iter != mInputTensorInfo[idx].shape.end(); ++iter*/) {
                                size *= (*iter);
                        }
-                       if (mInputAttrType[idx] == kTfLiteUInt8) {
+
+                       if ( (iter_layer->second).data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
                                mInputData.push_back(mInterpreter->typed_tensor<uint8_t>(
-                                               mInputLayerId[idx]));
+                                               mInputLayerId.find(iter_layer->first)->second));
                                pBuff = mInputData.back();
                                buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 };
-                       } else if (mInputAttrType[idx] == kTfLiteFloat32) {
+                       } else if ( (iter_layer->second).data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
                                mInputData.push_back(
-                                               mInterpreter->typed_tensor<float>(mInputLayerId[idx]));
+                                               mInterpreter->typed_tensor<float>(mInputLayerId.find(iter_layer->first)->second));
                                pBuff = mInputData.back();
                                buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4,
                                                   1 };
@@ -198,7 +203,7 @@ namespace TFLiteImpl
                                LOGE("Not supported");
                                return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
                        }
-                       buffers.insert(std::make_pair(mInputLayer[idx], buffer));
+                       buffers.insert(std::make_pair(iter_layer->first, buffer));
                }
 
                return INFERENCE_ENGINE_ERROR_NONE;
@@ -209,31 +214,31 @@ namespace TFLiteImpl
        {
                void *pBuff = NULL;
 
-               for (unsigned int idx = 0; idx < mOutputLayerId.size(); ++idx) {
+               //여기할 차례!!!
+               for (auto iter_layer = mOutputLayers.begin(); iter_layer != mOutputLayers.end(); ++iter_layer) {
                        inference_engine_tensor_buffer buffer;
                        size_t size = 1;
                        for (int idx2 = 0;
-                                idx2 < mInterpreter->tensor(mOutputLayerId[idx])->dims->size;
+                                idx2 < mInterpreter->tensor(mOutputLayerId.find(iter_layer->first)->second)->dims->size;
                                 ++idx2) {
-                               size *= mInterpreter->tensor(mOutputLayerId[idx])
-                                                               ->dims->data[idx2];
+                               size *= mInterpreter->tensor(mOutputLayerId.find(iter_layer->first)->second)->dims->data[idx2];
                        }
 
-                       if (mInterpreter->tensor(mOutputLayerId[idx])->type ==
+                       if (mInterpreter->tensor(mOutputLayerId.find(iter_layer->first)->second)->type ==
                                kTfLiteUInt8) {
                                LOGI("type is kTfLiteUInt8");
                                pBuff = (void *) mInterpreter->typed_tensor<uint8_t>(
-                                               mOutputLayerId[idx]);
+                                               mOutputLayerId.find(iter_layer->first)->second);
                                buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 };
-                       } else if (mInterpreter->tensor(mOutputLayerId[idx])->type == kTfLiteInt64) {
+                       } else if (mInterpreter->tensor(mOutputLayerId.find(iter_layer->first)->second)->type == kTfLiteInt64) {
                                LOGI("type is kTfLiteInt64");
-                               pBuff = (void*)mInterpreter->typed_tensor<int64_t>(mOutputLayerId[idx]);
+                               pBuff = (void*)mInterpreter->typed_tensor<int64_t>(mOutputLayerId.find(iter_layer->first)->second);
                                buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_INT64, size * 8, 1};
-                       } else if (mInterpreter->tensor(mOutputLayerId[idx])->type ==
+                       } else if (mInterpreter->tensor(mOutputLayerId.find(iter_layer->first)->second)->type ==
                                           kTfLiteFloat32) {
                                LOGI("type is kTfLiteFloat32");
                                pBuff = (void *) mInterpreter->typed_tensor<float>(
-                                               mOutputLayerId[idx]);
+                                               mOutputLayerId.find(iter_layer->first)->second);
                                buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4,
                                                   1 };
                        } else {
@@ -241,7 +246,7 @@ namespace TFLiteImpl
                                return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
                        }
 
-                       buffers.insert(std::make_pair(mOutputLayer[idx], buffer));
+                       buffers.insert(std::make_pair(mInterpreter->tensor(mOutputLayerId.find(iter_layer->first)->second)->name, buffer));
                }
                return INFERENCE_ENGINE_ERROR_NONE;
        }
@@ -252,8 +257,7 @@ namespace TFLiteImpl
                LOGI("ENTER");
 
                SetInterpreterInfo();
-               property.layer_names = mInputLayer;
-               property.tensor_infos = mInputTensorInfo;
+               property.layers = mInputLayers;
 
                LOGI("LEAVE");
 
@@ -265,44 +269,42 @@ namespace TFLiteImpl
        {
                LOGI("ENTER");
 
-               std::vector<inference_engine_tensor_info>().swap(mOutputTensorInfo);
+               std::map<std::string, inference_engine_tensor_info>().swap(mOutputLayers);
 
-               for (std::vector<int>::iterator iter = mOutputLayerId.begin();
+               for (auto iter = mOutputLayerId.begin();
                         iter != mOutputLayerId.end(); ++iter) {
-                       LOGI("output layer ID: %d", (*iter));
-                       if ((*iter) < 0) {
+                       LOGI("output layer ID: %d", iter->second);
+                       if ( iter->second < 0) {
                                LOGE("Invalid output layer");
                                return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                        }
 
-                       mOutputLayer.push_back(mInterpreter->tensor((*iter))->name);
-
                        inference_engine_tensor_info tensor_info;
 
                        LOGI("mInterpreter->tensor((*iter))->dims name[%s]",
-                                mInterpreter->tensor((*iter))->name);
+                                mInterpreter->tensor(iter->second)->name);
                        LOGI("mInterpreter->tensor((*iter))->dims size[%d]",
-                                mInterpreter->tensor((*iter))->dims->size);
+                                mInterpreter->tensor(iter->second)->dims->size);
                        LOGI("mInterpreter->tensor((*iter))->dims type[%d]",
-                                mInterpreter->tensor((*iter))->type);
+                                mInterpreter->tensor(iter->second)->type);
 
                        std::vector<size_t> shape_nhwc;
-                       for (int idx = 0; idx < mInterpreter->tensor((*iter))->dims->size;
+                       for (int idx = 0; idx < mInterpreter->tensor(iter->second)->dims->size;
                                 idx++) {
                                shape_nhwc.push_back(
-                                               mInterpreter->tensor((*iter))->dims->data[idx]);
+                                               mInterpreter->tensor(iter->second)->dims->data[idx]);
                        }
 
                        //tflite only supports NHWC (https://www.tensorflow.org/lite/guide/ops_compatibility).
                        tensor_info.shape = shape_nhwc;
                        tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NHWC;
-                       if (mInterpreter->tensor((*iter))->type == kTfLiteUInt8) {
+                       if (mInterpreter->tensor(iter->second)->type == kTfLiteUInt8) {
                                LOGI("type is kTfLiteUInt8");
                                tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
-                       } else if (mInterpreter->tensor((*iter))->type == kTfLiteInt64) {
+                       } else if (mInterpreter->tensor(iter->second)->type == kTfLiteInt64) {
                                LOGI("type is kTfLiteInt64");
                                tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_INT64;
-                       } else if (mInterpreter->tensor((*iter))->type == kTfLiteFloat32) {
+                       } else if (mInterpreter->tensor(iter->second)->type == kTfLiteFloat32) {
                                LOGI("type is kTfLiteFloat32");
                                tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
                        } else {
@@ -315,11 +317,10 @@ namespace TFLiteImpl
                                 iter2 != tensor_info.shape.end(); ++iter2) {
                                tensor_info.size *= (*iter2);
                        }
-                       mOutputTensorInfo.push_back(tensor_info);
+                       mOutputLayers.insert(std::make_pair(mInterpreter->tensor( iter->second /*(*iter)*/)->name, tensor_info));
                }
 
-               property.layer_names = mOutputLayer;
-               property.tensor_infos = mOutputTensorInfo;
+               property.layers = mOutputLayers;
 
                LOGI("LEAVE");
                return INFERENCE_ENGINE_ERROR_NONE;
@@ -329,22 +330,11 @@ namespace TFLiteImpl
                        inference_engine_layer_property &property)
        {
                LOGI("ENTER");
-
-               std::vector<std::string>::iterator iter;
-               for (iter = property.layer_names.begin();
-                        iter != property.layer_names.end(); iter++) {
-                       std::string name = *iter;
-                       LOGI("input layer name = %s", name.c_str());
+               for (auto iter = property.layers.begin(); iter != property.layers.end(); ++iter) {
+                       LOGI("input layer name = %s", (iter->first).c_str());
                }
-
-               mInputLayer.clear();
-               std::vector<std::string>().swap(mInputLayer);
-
-               mInputTensorInfo.clear();
-               std::vector<inference_engine_tensor_info>().swap(mInputTensorInfo);
-
-               mInputLayer = property.layer_names;
-               mInputTensorInfo = property.tensor_infos;
+               std::map<std::string, inference_engine_tensor_info>().swap(mInputLayers);
+               mInputLayers = property.layers;
 
                LOGI("LEAVE");
 
@@ -356,17 +346,11 @@ namespace TFLiteImpl
        {
                LOGI("ENTER");
 
-               std::vector<std::string>::iterator iter;
-               for (iter = property.layer_names.begin();
-                        iter != property.layer_names.end(); iter++) {
-                       std::string name = *iter;
-                       LOGI("output layer name = %s", name.c_str());
+               for (auto iter = property.layers.begin(); iter != property.layers.end(); ++iter) {
+                       LOGI("input layer name = %s", (iter->first).c_str());
                }
-
-               mOutputLayer.clear();
-               std::vector<std::string>().swap(mOutputLayer);
-
-               mOutputLayer = property.layer_names;
+               std::map<std::string, inference_engine_tensor_info>().swap(mOutputLayers);
+               mOutputLayers = property.layers;
 
                LOGI("LEAVE");
 
@@ -407,25 +391,19 @@ namespace TFLiteImpl
 
        int InferenceTFLite::SetInterpreterInfo()
        {
-               if (mInputLayer.empty() || mInputTensorInfo.empty()) {
+               if (mInputLayers.empty()) {
                        LOGI("mInputLayer is empty. layers and tensors that mInterpreter has will be returned.");
 
-                       mInputLayer.clear();
-                       std::vector<std::string>().swap(mInputLayer);
-
-                       mInputTensorInfo.clear();
-                       std::vector<inference_engine_tensor_info>().swap(mInputTensorInfo);
-
+                       mInputLayers.clear();
                        for (auto iter = mInputLayerId.begin(); iter != mInputLayerId.end();
                                 ++iter) {
-                               mInputLayer.push_back(mInterpreter->tensor((*iter))->name);
 
                                std::vector<size_t> shape_nhwc;
 
                                for (int idx = 0;
-                                        idx < mInterpreter->tensor((*iter))->dims->size; idx++) {
+                                        idx < mInterpreter->tensor(iter->second)->dims->size; idx++) {
                                        shape_nhwc.push_back(
-                                                       mInterpreter->tensor((*iter))->dims->data[idx]);
+                                                       mInterpreter->tensor(iter->second)->dims->data[idx]);
                                }
 
                                inference_engine_tensor_info tensor_info {
@@ -433,10 +411,10 @@ namespace TFLiteImpl
                                        INFERENCE_TENSOR_DATA_TYPE_NONE, 1
                                };
 
-                               if (mInterpreter->tensor((*iter))->type == kTfLiteUInt8) {
+                               if (mInterpreter->tensor(iter->second)->type == kTfLiteUInt8) {
                                        LOGI("type is kTfLiteUInt8");
                                        tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
-                               } else if (mInterpreter->tensor((*iter))->type ==
+                               } else if (mInterpreter->tensor(iter->second)->type ==
                                                   kTfLiteFloat32) {
                                        LOGI("type is kTfLiteFloat32");
                                        tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
@@ -448,7 +426,7 @@ namespace TFLiteImpl
                                for (auto iter2 : tensor_info.shape) {
                                        tensor_info.size *= iter2;
                                }
-                               mInputTensorInfo.push_back(tensor_info);
+                               mInputLayers.insert(std::make_pair(mInterpreter->tensor(iter->second)->name, tensor_info));
                        }
                }
 
index e184bdf94e76fdfc051882dce1ef29d1d6266854..c3f20905e575d459430fd4a6af95b831289edd10 100644 (file)
@@ -88,17 +88,11 @@ namespace TFLiteImpl
                std::unique_ptr<tflite::FlatBufferModel> mFlatBuffModel;
                std::vector<void *> mInputData;
 
-               std::vector<std::string> mInputLayer; /**< Input layer name */
-               std::vector<std::string> mOutputLayer; /**< Output layer name */
+               std::map<std::string, inference_engine_tensor_info> mInputLayers;
+               std::map<std::string, inference_engine_tensor_info> mOutputLayers;
 
-               std::vector<inference_engine_tensor_info> mInputTensorInfo;
-               std::vector<inference_engine_tensor_info> mOutputTensorInfo;
-
-               std::vector<int> mInputLayerId;
-               std::vector<int> mOutputLayerId;
-
-               std::vector<TfLiteType> mInputAttrType;
-               std::vector<TfLiteType> mOutputAttrType;
+               std::map<std::string, int> mInputLayerId;
+               std::map<std::string, int> mOutputLayerId;
 
                std::string mConfigFile;
                std::string mWeightFile;