Change members of inference_engine_layer_property structure, 94/254894/3
authorTae-Young Chung <ty83.chung@samsung.com>
Wed, 10 Mar 2021 09:09:52 +0000 (18:09 +0900)
committerTae-Young Chung <ty83.chung@samsung.com>
Tue, 16 Mar 2021 05:35:40 +0000 (14:35 +0900)
and change vector<inference_engine_tensor_buffer> to map<string, inference_engine_tensor_buffer>

This is based on
https://review.tizen.org/gerrit/#/c/platform/core/multimedia/inference-engine-interface/+/254892/
https://review.tizen.org/gerrit/#/c/platform/core/api/mediavision/+/254953/

Change-Id: I451e44dc31aeafe4a92f86baef34204daeed70a5
Signed-off-by: Tae-Young Chung <ty83.chung@samsung.com>
packaging/inference-engine-tflite.spec
src/inference_engine_tflite.cpp
src/inference_engine_tflite_private.h

index cb3fd4468546d7762220af3689bf8655d9b13256..773772849f46aeaf4618687744a7d5f45804b7e5 100644 (file)
@@ -1,7 +1,7 @@
 Name:       inference-engine-tflite
 Summary:    Tensorflow-Lite based implementation of inference-engine-interface
 Version:    0.0.1
-Release:    12
+Release:    13
 Group:      Multimedia/Libraries
 License:    Apache-2.0
 Source0:    %{name}-%{version}.tar.gz
index b4b810c13b741c98eb1f6cc014cb787edf779ac7..90b8a2ff49c20bc54d843119b7e85bd1a64b313c 100644 (file)
@@ -134,20 +134,22 @@ namespace TFLiteImpl
                         mInterpreter->tensors_size());
 
                // input tensor
-               if (mInterpreter->inputs().size()) {
-                       mInputLayerId = mInterpreter->inputs();
+               std::map<std::string, int>().swap(mInputLayerId);
+               const std::vector<int>& inputs = mInterpreter->inputs();
+               if (!inputs.empty()) {
+                       for (auto& input : inputs) {
+                               mInputLayerId.insert(std::make_pair(mInterpreter->tensor(input)->name, input));
+                       }
                } else {
-                       std::vector<std::string>::iterator iter;
                        mInputLayerId.clear();
-                       for (iter = mInputLayer.begin(); iter != mInputLayer.end();
-                                ++iter) {
-                               LOGI("mInputLayer list [%s]", (*iter).c_str());
+                       for (auto& layer: mInputLayers) {
+                               LOGI("mInputLayer list [%s]", layer.first.c_str());
                                for (unsigned int idx = 0; idx < mInterpreter->tensors_size();
                                         ++idx) {
                                        if (mInterpreter->tensor(idx)->name == NULL)
                                                continue;
-                                       if ((*iter).compare(mInterpreter->tensor(idx)->name) == 0) {
-                                               mInputLayerId.push_back(idx);
+                                       if ((layer.first).compare(mInterpreter->tensor(idx)->name) == 0) {
+                                               mInputLayerId.insert(std::make_pair(layer.first, idx));
                                                break;
                                        }
                                }
@@ -155,20 +157,22 @@ namespace TFLiteImpl
                }
 
                // output tensor
-               if (mInterpreter->outputs().size()) {
-                       mOutputLayerId = mInterpreter->outputs();
+               std::map<std::string, int>().swap(mOutputLayerId);
+               const std::vector<int>& outputs = mInterpreter->outputs();
+               if (!outputs.empty()) {
+                       for (auto& output : outputs) {
+                               mOutputLayerId.insert(std::make_pair(mInterpreter->tensor(output)->name, output));
+                       }
                } else {
-                       std::vector<std::string>::iterator iter;
                        mOutputLayerId.clear();
-                       for (iter = mOutputLayer.begin(); iter != mOutputLayer.end();
-                                ++iter) {
-                               LOGI("mOutputLayer list [%s]", (*iter).c_str());
+                       for (auto& layer : mOutputLayers) {
+                               LOGI("mOutputLayer list [%s]", layer.first.c_str());
                                for (unsigned int idx = 0; idx < mInterpreter->tensors_size();
                                         ++idx) {
                                        if (mInterpreter->tensor(idx)->name == NULL)
                                                continue;
-                                       if ((*iter).compare(mInterpreter->tensor(idx)->name) == 0) {
-                                               mOutputLayerId.push_back(idx);
+                                       if ((layer.first).compare(mInterpreter->tensor(idx)->name) == 0) {
+                                               mOutputLayerId.insert(std::make_pair(layer.first, idx));
                                                break;
                                        }
                                }
@@ -180,20 +184,15 @@ namespace TFLiteImpl
                        return INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY;
                }
 
-               for (unsigned int idx = 0; idx < mInputLayerId.size(); ++idx) {
-                       mInputAttrType.push_back(
-                                       mInterpreter->tensor(mInputLayerId[idx])->type);
-               }
-
                return ret;
        }
 
        int InferenceTFLite::GetInputTensorBuffers(
-                       std::vector<inference_engine_tensor_buffer> &buffers)
+                       std::map<std::string, inference_engine_tensor_buffer> &buffers)
        {
                LOGI("ENTER");
 
-               if (mInputTensorInfo.empty()) {
+               if (mInputLayers.empty()) {
                        SetInterpreterInfo();
                }
 
@@ -201,73 +200,65 @@ namespace TFLiteImpl
 
                void *pBuff = NULL;
 
-               for (unsigned int idx = 0; idx < mInputLayerId.size(); ++idx) {
+               for (auto& layer : mInputLayers) {
                        size_t size = 1;
                        inference_engine_tensor_buffer buffer;
-                       for (std::vector<size_t>::iterator iter =
-                                                mInputTensorInfo[idx].shape.begin();
-                                iter != mInputTensorInfo[idx].shape.end(); ++iter) {
-                               size *= (*iter);
+                       for (auto& dim : layer.second.shape) {
+                               size *= dim;
                        }
-                       if (mInputAttrType[idx] == kTfLiteUInt8) {
-                               mInputData.push_back(mInterpreter->typed_tensor<uint8_t>(
-                                               mInputLayerId[idx]));
+
+                       if ( (layer.second).data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
+                               mInputData.push_back(
+                                               mInterpreter->typed_tensor<uint8_t>(mInputLayerId[layer.first]));
                                pBuff = mInputData.back();
                                buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 };
-                       } else if (mInputAttrType[idx] == kTfLiteFloat32) {
+                       } else if ( (layer.second).data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
                                mInputData.push_back(
-                                               mInterpreter->typed_tensor<float>(mInputLayerId[idx]));
+                                               mInterpreter->typed_tensor<float>(mInputLayerId[layer.first]));
                                pBuff = mInputData.back();
-                               buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4,
-                                                  1 };
+                               buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1 };
                        } else {
                                LOGE("Not supported");
                                return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
                        }
-                       buffers.push_back(buffer);
+                       buffers.insert(std::make_pair(layer.first, buffer));
                }
 
                return INFERENCE_ENGINE_ERROR_NONE;
        }
 
        int InferenceTFLite::GetOutputTensorBuffers(
-                       std::vector<inference_engine_tensor_buffer> &buffers)
+                       std::map<std::string, inference_engine_tensor_buffer> &buffers)
        {
                void *pBuff = NULL;
 
-               for (unsigned int idx = 0; idx < mOutputLayerId.size(); ++idx) {
+               for (auto& layer : mOutputLayers) {
                        inference_engine_tensor_buffer buffer;
                        size_t size = 1;
                        for (int idx2 = 0;
-                                idx2 < mInterpreter->tensor(mOutputLayerId[idx])->dims->size;
+                                idx2 < mInterpreter->tensor(mOutputLayerId[layer.first])->dims->size;
                                 ++idx2) {
-                               size *= mInterpreter->tensor(mOutputLayerId[idx])
-                                                               ->dims->data[idx2];
+                               size *= mInterpreter->tensor(mOutputLayerId[layer.first])->dims->data[idx2];
                        }
 
-                       if (mInterpreter->tensor(mOutputLayerId[idx])->type ==
-                               kTfLiteUInt8) {
+                       if (mInterpreter->tensor(mOutputLayerId[layer.first])->type == kTfLiteUInt8) {
                                LOGI("type is kTfLiteUInt8");
-                               pBuff = (void *) mInterpreter->typed_tensor<uint8_t>(
-                                               mOutputLayerId[idx]);
+                               pBuff = (void *) mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[layer.first]);
                                buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 };
-                       } else if (mInterpreter->tensor(mOutputLayerId[idx])->type == kTfLiteInt64) {
+                       } else if (mInterpreter->tensor(mOutputLayerId[layer.first])->type == kTfLiteInt64) {
                                LOGI("type is kTfLiteInt64");
-                               pBuff = (void*)mInterpreter->typed_tensor<int64_t>(mOutputLayerId[idx]);
+                               pBuff = (void*)mInterpreter->typed_tensor<int64_t>(mOutputLayerId[layer.first]);
                                buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_INT64, size * 8, 1};
-                       } else if (mInterpreter->tensor(mOutputLayerId[idx])->type ==
-                                          kTfLiteFloat32) {
+                       } else if (mInterpreter->tensor(mOutputLayerId[layer.first])->type == kTfLiteFloat32) {
                                LOGI("type is kTfLiteFloat32");
-                               pBuff = (void *) mInterpreter->typed_tensor<float>(
-                                               mOutputLayerId[idx]);
-                               buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4,
-                                                  1 };
+                               pBuff = (void *) mInterpreter->typed_tensor<float>(mOutputLayerId[layer.first]);
+                               buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1 };
                        } else {
                                LOGE("Not supported");
                                return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
                        }
 
-                       buffers.push_back(buffer);
+                       buffers.insert(std::make_pair(mInterpreter->tensor(mOutputLayerId[layer.first])->name, buffer));
                }
                return INFERENCE_ENGINE_ERROR_NONE;
        }
@@ -278,8 +269,7 @@ namespace TFLiteImpl
                LOGI("ENTER");
 
                SetInterpreterInfo();
-               property.layer_names = mInputLayer;
-               property.tensor_infos = mInputTensorInfo;
+               property.layers = mInputLayers;
 
                LOGI("LEAVE");
 
@@ -291,44 +281,41 @@ namespace TFLiteImpl
        {
                LOGI("ENTER");
 
-               std::vector<inference_engine_tensor_info>().swap(mOutputTensorInfo);
+               std::map<std::string, inference_engine_tensor_info>().swap(mOutputLayers);
 
-               for (std::vector<int>::iterator iter = mOutputLayerId.begin();
-                        iter != mOutputLayerId.end(); ++iter) {
-                       LOGI("output layer ID: %d", (*iter));
-                       if ((*iter) < 0) {
+               for (auto& layer :mOutputLayerId) {
+                       LOGI("output layer ID: %d", layer.second);
+                       if ( layer.second < 0) {
                                LOGE("Invalid output layer");
                                return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                        }
 
-                       mOutputLayer.push_back(mInterpreter->tensor((*iter))->name);
-
                        inference_engine_tensor_info tensor_info;
 
-                       LOGI("mInterpreter->tensor((*iter))->dims name[%s]",
-                                mInterpreter->tensor((*iter))->name);
-                       LOGI("mInterpreter->tensor((*iter))->dims size[%d]",
-                                mInterpreter->tensor((*iter))->dims->size);
-                       LOGI("mInterpreter->tensor((*iter))->dims type[%d]",
-                                mInterpreter->tensor((*iter))->type);
+                       LOGI("mInterpreter->tensor(%d)->dims name[%s]",
+                                layer.second, mInterpreter->tensor(layer.second)->name);
+                       LOGI("mInterpreter->tensor(%d)->dims size[%d]",
+                                layer.second, mInterpreter->tensor(layer.second)->dims->size);
+                       LOGI("mInterpreter->tensor(%d)->dims type[%d]",
+                                layer.second, mInterpreter->tensor(layer.second)->type);
 
                        std::vector<size_t> shape_nhwc;
-                       for (int idx = 0; idx < mInterpreter->tensor((*iter))->dims->size;
+                       for (int idx = 0; idx < mInterpreter->tensor(layer.second)->dims->size;
                                 idx++) {
                                shape_nhwc.push_back(
-                                               mInterpreter->tensor((*iter))->dims->data[idx]);
+                                               mInterpreter->tensor(layer.second)->dims->data[idx]);
                        }
 
                        //tflite only supports NHWC (https://www.tensorflow.org/lite/guide/ops_compatibility).
                        tensor_info.shape = shape_nhwc;
                        tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NHWC;
-                       if (mInterpreter->tensor((*iter))->type == kTfLiteUInt8) {
+                       if (mInterpreter->tensor(layer.second)->type == kTfLiteUInt8) {
                                LOGI("type is kTfLiteUInt8");
                                tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
-                       } else if (mInterpreter->tensor((*iter))->type == kTfLiteInt64) {
+                       } else if (mInterpreter->tensor(layer.second)->type == kTfLiteInt64) {
                                LOGI("type is kTfLiteInt64");
                                tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_INT64;
-                       } else if (mInterpreter->tensor((*iter))->type == kTfLiteFloat32) {
+                       } else if (mInterpreter->tensor(layer.second)->type == kTfLiteFloat32) {
                                LOGI("type is kTfLiteFloat32");
                                tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
                        } else {
@@ -336,16 +323,13 @@ namespace TFLiteImpl
                                return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
                        }
                        tensor_info.size = 1;
-                       for (std::vector<size_t>::iterator iter2 =
-                                                tensor_info.shape.begin();
-                                iter2 != tensor_info.shape.end(); ++iter2) {
-                               tensor_info.size *= (*iter2);
+                       for (auto & dim : tensor_info.shape) {
+                               tensor_info.size *= dim;
                        }
-                       mOutputTensorInfo.push_back(tensor_info);
+                       mOutputLayers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info));
                }
 
-               property.layer_names = mOutputLayer;
-               property.tensor_infos = mOutputTensorInfo;
+               property.layers = mOutputLayers;
 
                LOGI("LEAVE");
                return INFERENCE_ENGINE_ERROR_NONE;
@@ -356,21 +340,11 @@ namespace TFLiteImpl
        {
                LOGI("ENTER");
 
-               std::vector<std::string>::iterator iter;
-               for (iter = property.layer_names.begin();
-                        iter != property.layer_names.end(); iter++) {
-                       std::string name = *iter;
-                       LOGI("input layer name = %s", name.c_str());
+               for (auto& layer : property.layers) {
+                       LOGI("input layer name = %s", layer.first.c_str());
                }
-
-               mInputLayer.clear();
-               std::vector<std::string>().swap(mInputLayer);
-
-               mInputTensorInfo.clear();
-               std::vector<inference_engine_tensor_info>().swap(mInputTensorInfo);
-
-               mInputLayer = property.layer_names;
-               mInputTensorInfo = property.tensor_infos;
+               std::map<std::string, inference_engine_tensor_info>().swap(mInputLayers);
+               mInputLayers = property.layers;
 
                LOGI("LEAVE");
 
@@ -382,17 +356,11 @@ namespace TFLiteImpl
        {
                LOGI("ENTER");
 
-               std::vector<std::string>::iterator iter;
-               for (iter = property.layer_names.begin();
-                        iter != property.layer_names.end(); iter++) {
-                       std::string name = *iter;
-                       LOGI("output layer name = %s", name.c_str());
+               for (auto& layer : property.layers) {
+                       LOGI("input layer name = %s", layer.first.c_str());
                }
-
-               mOutputLayer.clear();
-               std::vector<std::string>().swap(mOutputLayer);
-
-               mOutputLayer = property.layer_names;
+               std::map<std::string, inference_engine_tensor_info>().swap(mOutputLayers);
+               mOutputLayers = property.layers;
 
                LOGI("LEAVE");
 
@@ -417,8 +385,8 @@ namespace TFLiteImpl
        }
 
        int InferenceTFLite::Run(
-                       std::vector<inference_engine_tensor_buffer> &input_buffers,
-                       std::vector<inference_engine_tensor_buffer> &output_buffers)
+                       std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+                       std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
        {
                LOGI("ENTER");
                TfLiteStatus status = mInterpreter->Invoke();
@@ -434,25 +402,18 @@ namespace TFLiteImpl
 
        int InferenceTFLite::SetInterpreterInfo()
        {
-               if (mInputLayer.empty() || mInputTensorInfo.empty()) {
+               if (mInputLayers.empty()) {
                        LOGI("mInputLayer is empty. layers and tensors that mInterpreter has will be returned.");
 
-                       mInputLayer.clear();
-                       std::vector<std::string>().swap(mInputLayer);
-
-                       mInputTensorInfo.clear();
-                       std::vector<inference_engine_tensor_info>().swap(mInputTensorInfo);
-
-                       for (auto iter = mInputLayerId.begin(); iter != mInputLayerId.end();
-                                ++iter) {
-                               mInputLayer.push_back(mInterpreter->tensor((*iter))->name);
+                       mInputLayers.clear();
+                       for (auto& layer : mInputLayerId) {
 
                                std::vector<size_t> shape_nhwc;
 
                                for (int idx = 0;
-                                        idx < mInterpreter->tensor((*iter))->dims->size; idx++) {
+                                        idx < mInterpreter->tensor(layer.second)->dims->size; idx++) {
                                        shape_nhwc.push_back(
-                                                       mInterpreter->tensor((*iter))->dims->data[idx]);
+                                                       mInterpreter->tensor(layer.second)->dims->data[idx]);
                                }
 
                                inference_engine_tensor_info tensor_info {
@@ -460,10 +421,10 @@ namespace TFLiteImpl
                                        INFERENCE_TENSOR_DATA_TYPE_NONE, 1
                                };
 
-                               if (mInterpreter->tensor((*iter))->type == kTfLiteUInt8) {
+                               if (mInterpreter->tensor(layer.second)->type == kTfLiteUInt8) {
                                        LOGI("type is kTfLiteUInt8");
                                        tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
-                               } else if (mInterpreter->tensor((*iter))->type ==
+                               } else if (mInterpreter->tensor(layer.second)->type ==
                                                   kTfLiteFloat32) {
                                        LOGI("type is kTfLiteFloat32");
                                        tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
@@ -472,10 +433,10 @@ namespace TFLiteImpl
                                        return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
                                }
 
-                               for (auto iter2 : tensor_info.shape) {
-                                       tensor_info.size *= iter2;
+                               for (auto& dim : tensor_info.shape) {
+                                       tensor_info.size *= dim;
                                }
-                               mInputTensorInfo.push_back(tensor_info);
+                               mInputLayers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info));
                        }
                }
 
index 4ebfd4305e18464176f14c2005891e1e7b2c4e3d..8cf942c8cb0ba86e84ce9a7f37d313f9995cb2be 100644 (file)
@@ -60,10 +60,10 @@ namespace TFLiteImpl
                                 inference_model_format_e model_format) override;
 
                int GetInputTensorBuffers(
-                               std::vector<inference_engine_tensor_buffer> &buffers) override;
+                               std::map<std::string, inference_engine_tensor_buffer> &buffers) override;
 
                int GetOutputTensorBuffers(
-                               std::vector<inference_engine_tensor_buffer> &buffers) override;
+                               std::map<std::string, inference_engine_tensor_buffer> &buffers) override;
 
                int GetInputLayerProperty(
                                inference_engine_layer_property &property) override;
@@ -79,8 +79,8 @@ namespace TFLiteImpl
 
                int GetBackendCapacity(inference_engine_capacity *capacity) override;
 
-               int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
-                               std::vector<inference_engine_tensor_buffer> &output_buffers)
+               int Run(std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+                               std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
                                override;
 
        private:
@@ -90,17 +90,11 @@ namespace TFLiteImpl
                std::unique_ptr<tflite::FlatBufferModel> mFlatBuffModel;
                std::vector<void *> mInputData;
 
-               std::vector<std::string> mInputLayer; /**< Input layer name */
-               std::vector<std::string> mOutputLayer; /**< Output layer name */
+               std::map<std::string, inference_engine_tensor_info> mInputLayers;
+               std::map<std::string, inference_engine_tensor_info> mOutputLayers;
 
-               std::vector<inference_engine_tensor_info> mInputTensorInfo;
-               std::vector<inference_engine_tensor_info> mOutputTensorInfo;
-
-               std::vector<int> mInputLayerId;
-               std::vector<int> mOutputLayerId;
-
-               std::vector<TfLiteType> mInputAttrType;
-               std::vector<TfLiteType> mOutputAttrType;
+               std::map<std::string, int> mInputLayerId;
+               std::map<std::string, int> mOutputLayerId;
 
                std::string mConfigFile;
                std::string mWeightFile;