Change members of inference_engine_layer_property structure, 97/254897/4
authorTae-Young Chung <ty83.chung@samsung.com>
Wed, 10 Mar 2021 09:15:19 +0000 (18:15 +0900)
committerTae-Young Chung <ty83.chung@samsung.com>
Tue, 16 Mar 2021 08:37:34 +0000 (17:37 +0900)
and change vector<inference_engine_tensor_buffer> to map<string, inference_engine_tensor_buffer>

This is based on
https://review.tizen.org/gerrit/#/c/platform/core/multimedia/inference-engine-interface/+/254892/
https://review.tizen.org/gerrit/#/c/platform/core/api/mediavision/+/254953/

Change-Id: Iba73fa67f586287f0efe90ac2750fab9c6927e45
Signed-off-by: Tae-Young Chung <ty83.chung@samsung.com>
packaging/inference-engine-armnn.spec
src/inference_engine_armnn.cpp
src/inference_engine_armnn_private.h

index cfcc70a18ecff728db9a41e4bfee6cf1db8c7522..d529f4d0339a8b0d7d3524c14d729626b2711011 100644 (file)
@@ -1,7 +1,7 @@
 Name:       inference-engine-armnn
 Summary:    ARM Neural Network Runtime based implementation of inference-engine-interface
 Version:    0.0.1
-Release:    1
+Release:    2
 Group:      Multimedia/Libraries
 License:    Apache-2.0
 ExclusiveArch: %{arm} aarch64
index 96a141fb905292c410ad1ccd0198fdc47a098fd2..baf96ad6e73ec19759a23868c12fc3eeaebadfe1 100644 (file)
@@ -52,16 +52,22 @@ namespace ARMNNImpl
        InferenceARMNN::~InferenceARMNN()
        {
                mDesignated_inputs.clear();
-               std::vector<std::string>().swap(mDesignated_inputs);
+               std::map<std::string, int>().swap(mDesignated_inputs);
 
                mDesignated_outputs.clear();
-               std::vector<std::string>().swap(mDesignated_outputs);
+               std::map<std::string, int>().swap(mDesignated_outputs);
 
                mInputBindingInfo.clear();
-               std::vector<armnn::BindingPointInfo>().swap(mInputBindingInfo);
+               std::map<std::string, armnn::BindingPointInfo>().swap(mInputBindingInfo);
 
                mOutputBindingInfo.clear();
-               std::vector<armnn::BindingPointInfo>().swap(mOutputBindingInfo);
+               std::map<std::string, armnn::BindingPointInfo>().swap(mOutputBindingInfo);
+
+               mInputProperty.layers.clear();
+               std::map<std::string, inference_engine_tensor_info>().swap(mInputProperty.layers);
+
+               mOutputProperty.layers.clear();
+               std::map<std::string, inference_engine_tensor_info>().swap(mOutputProperty.layers);
 
                armnn::IRuntime::Destroy(sRuntime);
                sRuntime = nullptr;
@@ -168,49 +174,43 @@ namespace ARMNNImpl
 
                // If there is any input layer designated by user then it is set as input layer.
                // Otherwise, layer from armnn runtime will be set as input.
-               if (mDesignated_inputs.empty()) {
-                       std::vector<std::string> in_names =
+               if (mInputProperty.layers.empty()) {
+                       const std::vector<std::string>& in_names =
                                        parser->GetSubgraphInputTensorNames(0);
-                       for (auto const &name : in_names) {
-                               mInputBindingInfo.push_back(
-                                               parser->GetNetworkInputBindingInfo(0, name));
+                       for (const auto &name : in_names) {
+                               mInputBindingInfo.insert(
+                                               std::make_pair(name, parser->GetNetworkInputBindingInfo(0,name)));
                                LOGI("%s layer has been designated as input.", name.c_str());
                        }
                } else {
                        mInputBindingInfo.clear();
-                       std::vector<armnn::BindingPointInfo>().swap(mInputBindingInfo);
-
-                       std::vector<std::string>::iterator iter;
-                       for (iter = mDesignated_inputs.begin();
-                                iter != mDesignated_inputs.end(); iter++) {
-                               std::string name = *iter;
-                               mInputBindingInfo.push_back(
-                                               parser->GetNetworkInputBindingInfo(0, name));
-                               LOGI("%s layer has been designated as input.", name.c_str());
+                       std::map<std::string, armnn::BindingPointInfo>().swap(mInputBindingInfo);
+
+                       for (auto& layer : mInputProperty.layers) {
+                               mInputBindingInfo.insert(
+                                       std::make_pair(layer.first, parser->GetNetworkInputBindingInfo(0, layer.first)));
+                               LOGI("%s layer has been designated as input.", layer.first.c_str());
                        }
                }
 
                // If there is any output layer designated by user then it is set as output layer.
                // Otherwise, layer from armnn runtime will be set as output.
-               if (mDesignated_outputs.empty()) {
-                       std::vector<std::string> out_names =
+               if (mOutputProperty.layers.empty()) {
+                       const std::vector<std::string>& out_names =
                                        parser->GetSubgraphOutputTensorNames(0);
-                       for (auto const &name : out_names) {
-                               mOutputBindingInfo.push_back(
-                                               parser->GetNetworkOutputBindingInfo(0, name));
+                       for (const auto &name : out_names) {
+                               mOutputBindingInfo.insert(
+                                       std::make_pair(name, parser->GetNetworkOutputBindingInfo(0, name)));
                                LOGI("%s layer has been designated as output.", name.c_str());
                        }
                } else {
                        mOutputBindingInfo.clear();
-                       std::vector<armnn::BindingPointInfo>().swap(mOutputBindingInfo);
-
-                       std::vector<std::string>::iterator iter;
-                       for (iter = mDesignated_outputs.begin();
-                                iter != mDesignated_outputs.end(); iter++) {
-                               std::string name = *iter;
-                               mOutputBindingInfo.push_back(
-                                               parser->GetNetworkOutputBindingInfo(0, name));
-                               LOGI("%s layer has been designated as output.", name.c_str());
+                       std::map<std::string, armnn::BindingPointInfo>().swap(mOutputBindingInfo);
+
+                       for (auto& layer : mOutputProperty.layers) {
+                               mOutputBindingInfo.insert(
+                                       std::make_pair(layer.first, parser->GetNetworkOutputBindingInfo(0, layer.first)));
+                               LOGI("%s layer has been designated as output.", layer.first.c_str());
                        }
                }
 
@@ -387,7 +387,7 @@ namespace ARMNNImpl
        }
 
        int InferenceARMNN::GetInputTensorBuffers(
-                       std::vector<inference_engine_tensor_buffer> &buffers)
+                       std::map<std::string, inference_engine_tensor_buffer> &buffers)
        {
                LOGI("ENTER");
 
@@ -399,7 +399,7 @@ namespace ARMNNImpl
        }
 
        int InferenceARMNN::GetOutputTensorBuffers(
-                       std::vector<inference_engine_tensor_buffer> &buffers)
+                       std::map<std::string, inference_engine_tensor_buffer> &buffers)
        {
                LOGI("ENTER");
 
@@ -417,13 +417,11 @@ namespace ARMNNImpl
 
                // TODO. Need to check if model file loading is done.
 
-               std::vector<armnn::BindingPointInfo>::iterator iter;
-               for (iter = mInputBindingInfo.begin(); iter != mInputBindingInfo.end();
-                        iter++) {
+               for (auto& info : mInputBindingInfo) {
                        inference_engine_tensor_info out_info = {};
-                       armnn::BindingPointInfo bindingInfo = *iter;
-                       armnn::TensorInfo tensorInfo = bindingInfo.second;
-                       armnn::TensorShape shape = tensorInfo.GetShape();
+                       armnn::BindingPointInfo& bindingInfo = info.second;
+                       armnn::TensorInfo& tensorInfo = bindingInfo.second;
+                       armnn::TensorShape& shape = tensorInfo.GetShape();
                        size_t tensor_size = 1;
 
                        for (int i = 0; i < (int) tensorInfo.GetNumDimensions(); i++) {
@@ -434,7 +432,7 @@ namespace ARMNNImpl
                        out_info.data_type =
                                        ConvertDataType((armnn::DataType) tensorInfo.GetDataType());
                        out_info.size = tensor_size;
-                       property.tensor_infos.push_back(out_info);
+                       property.layers.insert(std::make_pair(info.first, out_info));
                }
 
                LOGI("LEAVE");
@@ -449,13 +447,11 @@ namespace ARMNNImpl
 
                // TODO. Need to check if model file loading is done.
 
-               std::vector<armnn::BindingPointInfo>::iterator iter;
-               for (iter = mOutputBindingInfo.begin();
-                        iter != mOutputBindingInfo.end(); iter++) {
+               for (auto& info : mOutputBindingInfo) {
                        inference_engine_tensor_info out_info = {};
-                       armnn::BindingPointInfo bindingInfo = *iter;
-                       armnn::TensorInfo tensorInfo = bindingInfo.second;
-                       armnn::TensorShape shape = tensorInfo.GetShape();
+                       armnn::BindingPointInfo& bindingInfo = info.second;
+                       armnn::TensorInfo& tensorInfo = bindingInfo.second;
+                       armnn::TensorShape& shape = tensorInfo.GetShape();
                        size_t tensor_size = 1;
 
                        for (int i = 0; i < (int) tensorInfo.GetNumDimensions(); i++) {
@@ -466,7 +462,7 @@ namespace ARMNNImpl
                        out_info.data_type =
                                        ConvertDataType((armnn::DataType) tensorInfo.GetDataType());
                        out_info.size = tensor_size;
-                       property.tensor_infos.push_back(out_info);
+                       property.layers.insert(std::make_pair(info.first, out_info));
                }
 
                LOGI("LEAVE");
@@ -479,17 +475,14 @@ namespace ARMNNImpl
        {
                LOGI("ENTER");
 
-               std::vector<std::string>::iterator iter;
-               for (iter = property.layer_names.begin();
-                        iter != property.layer_names.end(); iter++) {
-                       std::string name = *iter;
-                       LOGI("input layer name = %s", name.c_str());
+               for (auto& layer : property.layers) {
+                       LOGI("input layer name = %s", layer.first.c_str());
                }
 
                mDesignated_inputs.clear();
-               std::vector<std::string>().swap(mDesignated_inputs);
+               std::map<std::string, int>().swap(mDesignated_inputs);
 
-               mDesignated_inputs = property.layer_names;
+               mInputProperty = property;
 
                LOGI("LEAVE");
 
@@ -501,17 +494,14 @@ namespace ARMNNImpl
        {
                LOGI("ENTER");
 
-               std::vector<std::string>::iterator iter;
-               for (iter = property.layer_names.begin();
-                        iter != property.layer_names.end(); iter++) {
-                       std::string name = *iter;
-                       LOGI("output layer name = %s", name.c_str());
+               for (auto& layer : property.layers) {
+                       LOGI("output layer name = %s", layer.first.c_str());
                }
 
                mDesignated_outputs.clear();
-               std::vector<std::string>().swap(mDesignated_outputs);
+               std::map<std::string, int>().swap(mDesignated_outputs);
 
-               mDesignated_outputs = property.layer_names;
+               mOutputProperty = property;
 
                LOGI("LEAVE");
 
@@ -537,8 +527,8 @@ namespace ARMNNImpl
        }
 
        int InferenceARMNN::CheckTensorBuffers(
-                       std::vector<inference_engine_tensor_buffer> &input_buffers,
-                       std::vector<inference_engine_tensor_buffer> &output_buffers)
+                       std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+                       std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
        {
                int ret = INFERENCE_ENGINE_ERROR_NONE;
 
@@ -598,8 +588,8 @@ namespace ARMNNImpl
        }
 
        int InferenceARMNN::Run(
-                       std::vector<inference_engine_tensor_buffer> &input_buffers,
-                       std::vector<inference_engine_tensor_buffer> &output_buffers)
+                       std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+                       std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
        {
                LOGI("ENTER");
 
@@ -609,24 +599,20 @@ namespace ARMNNImpl
                        return err;
                }
 
-               std::vector<armnn::BindingPointInfo>::iterator binding_iter;
-               std::vector<inference_engine_tensor_buffer>::iterator buffer_iter;
-
+               auto binding_iter =  mInputBindingInfo.begin();
+               auto buffer_iter = input_buffers.begin();
                // Setup input layer.
                armnn::InputTensors input_tensors;
-
-               for (binding_iter = mInputBindingInfo.begin(),
-                       buffer_iter = input_buffers.begin();
-                        binding_iter != mInputBindingInfo.end();
+               for (; binding_iter != mInputBindingInfo.end();
                         binding_iter++, buffer_iter++) {
-                       armnn::BindingPointInfo inBindingInfo = *binding_iter;
-                       armnn::TensorInfo inputTensorInfo = inBindingInfo.second;
-                       inference_engine_tensor_buffer tensor_buffer = *buffer_iter;
+                       armnn::BindingPointInfo& inBindingInfo = binding_iter->second;
+                       armnn::TensorInfo& inputTensorInfo = inBindingInfo.second;
+                       inference_engine_tensor_buffer& tensor_buffer = buffer_iter->second;
 
                        armnn::Tensor input_tensor(inputTensorInfo, tensor_buffer.buffer);
                        input_tensors.push_back({ inBindingInfo.first, input_tensor });
 
-                       armnn::TensorShape shape = inputTensorInfo.GetShape();
+                       armnn::TensorShape& shape = inputTensorInfo.GetShape();
                        unsigned int tensor_size = 1;
                        for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions();
                                 i++)
@@ -636,21 +622,21 @@ namespace ARMNNImpl
                                 inputTensorInfo.GetNumDimensions(), tensor_size);
                }
 
+               binding_iter =  mOutputBindingInfo.begin();
+               buffer_iter = output_buffers.begin();
                // Setup output layer.
                armnn::OutputTensors output_tensors;
 
-               for (binding_iter = mOutputBindingInfo.begin(),
-                       buffer_iter = output_buffers.begin();
-                        binding_iter != mOutputBindingInfo.end();
+               for (; binding_iter != mOutputBindingInfo.end();
                         binding_iter++, buffer_iter++) {
-                       armnn::BindingPointInfo outBindingInfo = *binding_iter;
-                       armnn::TensorInfo outputTensorInfo = outBindingInfo.second;
-                       inference_engine_tensor_buffer tensor_buffer = *buffer_iter;
+                       armnn::BindingPointInfo& outBindingInfo = binding_iter->second;
+                       armnn::TensorInfo& outputTensorInfo = outBindingInfo.second;
+                       inference_engine_tensor_buffer& tensor_buffer = buffer_iter->second;
 
                        armnn::Tensor output_tensor(outputTensorInfo, tensor_buffer.buffer);
                        output_tensors.push_back({ outBindingInfo.first, output_tensor });
 
-                       armnn::TensorShape shape = outputTensorInfo.GetShape();
+                       armnn::TensorShape& shape = outputTensorInfo.GetShape();
                        unsigned int tensor_size = 1;
                        for (unsigned int i = 0; i < outputTensorInfo.GetNumDimensions();
                                 i++)
index 69bd93f822e4b810dc88f029ce04ca0f9022713e..8e36cb2ae89d8872b9489d98c1979980bb83e790 100644 (file)
@@ -59,10 +59,10 @@ namespace ARMNNImpl
                                 inference_model_format_e model_format) override;
 
                int GetInputTensorBuffers(
-                               std::vector<inference_engine_tensor_buffer> &buffers) override;
+                               std::map<std::string, inference_engine_tensor_buffer> &buffers) override;
 
                int GetOutputTensorBuffers(
-                               std::vector<inference_engine_tensor_buffer> &buffers) override;
+                               std::map<std::string, inference_engine_tensor_buffer> &buffers) override;
 
                int GetInputLayerProperty(
                                inference_engine_layer_property &property) override;
@@ -78,8 +78,8 @@ namespace ARMNNImpl
 
                int GetBackendCapacity(inference_engine_capacity *capacity) override;
 
-               int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
-                               std::vector<inference_engine_tensor_buffer> &output_buffers)
+               int Run(std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+                               std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
                                override;
 
        private:
@@ -88,8 +88,8 @@ namespace ARMNNImpl
                                                  inference_model_format_e model_format);
                inference_tensor_data_type_e ConvertDataType(armnn::DataType type);
                int CheckTensorBuffers(
-                               std::vector<inference_engine_tensor_buffer> &input_buffers,
-                               std::vector<inference_engine_tensor_buffer> &output_buffers);
+                               std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+                               std::map<std::string, inference_engine_tensor_buffer> &output_buffers);
                bool UseGpuAcc(void);
                armnn::IGpuAccTunedParameters::TuningLevel ConvertTuningType(inference_engine_cltuner_mode_e tuning_mode);
 
@@ -99,10 +99,12 @@ namespace ARMNNImpl
                armnn::INetworkPtr mNetwork;
                armnn::NetworkId mNetworkIdentifier;
 
-               std::vector<armnn::BindingPointInfo> mInputBindingInfo;
-               std::vector<armnn::BindingPointInfo> mOutputBindingInfo;
-               std::vector<std::string> mDesignated_inputs;
-               std::vector<std::string> mDesignated_outputs;
+               std::map<std::string, armnn::BindingPointInfo> mInputBindingInfo;
+               std::map<std::string, armnn::BindingPointInfo> mOutputBindingInfo;
+               std::map<std::string, int> mDesignated_inputs;
+               std::map<std::string, int> mDesignated_outputs;
+               inference_engine_layer_property mInputProperty;
+               inference_engine_layer_property mOutputProperty;
                inference_engine_cltuner mCLTuner;
        };