Change members of inference_engine_layer_property structure,
authorTae-Young Chung <ty83.chung@samsung.com>
Wed, 10 Mar 2021 09:05:36 +0000 (18:05 +0900)
committerInki Dae <inki.dae@samsung.com>
Mon, 29 Mar 2021 05:11:48 +0000 (14:11 +0900)
and change vector<inference_engine_tensor_buffer> to map<string, inference_engine_tensor_buffer>

This patch is to use inference_engine_tensor_info and inference_engine_tensor_buffer
based on input/output layers' name.

Change-Id: I18d3e7ae80a8c2a1e6236938571b8f22b12b2e1e
Signed-off-by: Tae-Young Chung <ty83.chung@samsung.com>
include/inference_engine_common.h
include/inference_engine_common_impl.h
include/inference_engine_type.h
packaging/inference-engine-interface.spec
src/inference_engine_common_impl.cpp
test/src/inference_engine_profiler.cpp
test/src/inference_engine_tc.cpp
test/src/inference_engine_test_common.cpp
test/src/inference_engine_test_common.h

index d7db0d51908e08a8ca96d9f04d5eb62a82617921..47695ec5568b9b30162b3c32cdd0d7456527cf2d 100644 (file)
 #ifndef __INFERENCE_ENGINE_COMMON_H__
 #define __INFERENCE_ENGINE_COMMON_H__
 
+#include <map>
 #include <vector>
 #include <string>
 
 #include "inference_engine_type.h"
 
+using IETensorBuffer = std::map<std::string, inference_engine_tensor_buffer>;
 namespace InferenceEngineInterface
 {
 namespace Common
@@ -88,8 +90,7 @@ namespace Common
                 * @param[out] buffers A backend engine should add input tensor buffers allocated itself to buffers vector.
                 *              Otherwise, it should put buffers to be empty.
                 */
-               virtual int GetInputTensorBuffers(
-                               std::vector<inference_engine_tensor_buffer> &buffers) = 0;
+               virtual int GetInputTensorBuffers(IETensorBuffer &buffers) = 0;
 
                /**
                 * @brief Get output tensor buffers from a given backend engine.
@@ -105,8 +106,7 @@ namespace Common
                 * @param[out] buffers A backend engine should add output tensor buffers allocated itself to buffers vector.
                 *              Otherwise, it should put buffers to be empty.
                 */
-               virtual int GetOutputTensorBuffers(
-                               std::vector<inference_engine_tensor_buffer> &buffers) = 0;
+               virtual int GetOutputTensorBuffers(IETensorBuffer &buffers) = 0;
 
                /**
                 * @brief Get input layer property information from a given backend engine.
@@ -185,8 +185,7 @@ namespace Common
                 * @param[in] output_buffers It contains tensor buffers to be used as output layer.
                 */
                virtual int
-               Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
-                       std::vector<inference_engine_tensor_buffer> &output_buffers) = 0;
+               Run(IETensorBuffer &input_buffers, IETensorBuffer &output_buffers) = 0;
        };
 
        typedef void destroy_t(IInferenceEngineCommon *);
index 5966557c0c0693ec86838581ca5d03a28281b750..e3fad9184cb0b1f1976eea7532cbf7142b5495c9 100644 (file)
@@ -17,6 +17,7 @@
 #ifndef __INFERENCE_ENGINE_COMMON_IMPL_H__
 #define __INFERENCE_ENGINE_COMMON_IMPL_H__
 
+#include <map>
 #include <vector>
 #include <string>
 
@@ -118,8 +119,7 @@ namespace Common
                 * @param[out] buffers A backend engine should add input tensor buffers allocated itself to buffers vector.
                 *              Otherwise, it should put buffers to be empty.
                 */
-               int GetInputTensorBuffers(
-                               std::vector<inference_engine_tensor_buffer> &buffers);
+               int GetInputTensorBuffers(IETensorBuffer &buffers);
 
                /**
                 * @brief Get output tensor buffers from a given backend engine.
@@ -135,8 +135,7 @@ namespace Common
                 * @param[out] buffers A backend engine should add output tensor buffers allocated itself to buffers vector.
                 *              Otherwise, it should put buffers to be empty.
                 */
-               int GetOutputTensorBuffers(
-                               std::vector<inference_engine_tensor_buffer> &buffers);
+               int GetOutputTensorBuffers(IETensorBuffer &buffers);
 
                /**
                 * @brief Get input layer property information from a given backend engine.
@@ -210,8 +209,7 @@ namespace Common
                 * @param[in] input_buffers It contains tensor buffers to be used as input layer.
                 * @param[in] output_buffers It contains tensor buffers to be used as output layer.
                 */
-               int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
-                               std::vector<inference_engine_tensor_buffer> &output_buffers);
+               int Run(IETensorBuffer &input_buffers, IETensorBuffer &output_buffers);
 
                /**
                 * @brief Enable or disable Inference engine profiler.
@@ -247,8 +245,7 @@ namespace Common
                int GetNpuBackendType(dictionary *dict, const char *section_name);
                int InitBackendEngine(const std::string &backend_path,
                                                          int backend_type, int device_type);
-               int CheckTensorBuffers(
-                               std::vector<inference_engine_tensor_buffer> &buffers);
+               int CheckTensorBuffers(IETensorBuffer &buffers);
                int CheckLayerProperty(inference_engine_layer_property &property);
 
                inference_backend_type_e mSelectedBackendEngine;
index 3e8a46f0332c5f3e311d7a89769b33a883339d52..f854452e39f42dd5fef14201ca7ed6c598070207 100644 (file)
@@ -17,6 +17,9 @@
 #ifndef __INFERENCE_ENGINE_TYPE_H__
 #define __INFERENCE_ENGINE_TYPE_H__
 
+#include <map>
+#include <vector>
+
 #ifdef __cplusplus
 extern "C"
 {
@@ -212,8 +215,7 @@ extern "C"
         * @since_tizen 6.0
         */
        typedef struct _inference_engine_layer_property {
-               std::vector<std::string> layer_names; /**< names of layers. */
-               std::vector<inference_engine_tensor_info> tensor_infos; /**< information of tensors. */
+               std::map<std::string, inference_engine_tensor_info> layers;
                // TODO.
        } inference_engine_layer_property;
 
index 0cab44f4e20001f1bce63f05c35ae37cc10246f9..47fc83a407c0fa3609f480aeddf6ee650b1abbaa 100644 (file)
@@ -1,7 +1,7 @@
 Name:        inference-engine-interface
 Summary:     Interface of inference engines
 Version:     0.0.2
-Release:     13
+Release:     14
 Group:       Multimedia/Framework
 License:     Apache-2.0
 Source0:     %{name}-%{version}.tar.gz
index 4bc82223a3824fdb329ed4c38338f2cc8f5f0b12..3835469803a9b74d6b56b8a073e069758f1f63d7 100644 (file)
@@ -188,18 +188,15 @@ namespace Common
                return ret;
        }
 
-       int InferenceEngineCommon::CheckTensorBuffers(
-                       std::vector<inference_engine_tensor_buffer> &buffers)
+       int InferenceEngineCommon::CheckTensorBuffers(IETensorBuffer &buffers)
        {
-               if (buffers.size() == 0) {
+               if (buffers.empty()) {
                        LOGE("tensor buffer vector is empty.");
                        return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
                }
 
-               for (std::vector<inference_engine_tensor_buffer>::const_iterator iter =
-                                        buffers.begin();
-                        iter != buffers.end(); ++iter) {
-                       inference_engine_tensor_buffer tensor_buffer = *iter;
+               for (auto& buffer : buffers) {
+                       const inference_engine_tensor_buffer& tensor_buffer = buffer.second;
                        if (tensor_buffer.buffer == nullptr || tensor_buffer.size == 0) {
                                LOGE("tensor buffer pointer is null or tensor buffer size is 0.");
                                return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
@@ -219,11 +216,16 @@ namespace Common
                        inference_engine_layer_property &property)
        {
                // Verity tensor info values.
-               std::vector<inference_engine_tensor_info>::const_iterator info_iter;
-               for (info_iter = property.tensor_infos.begin();
-                        info_iter != property.tensor_infos.end(); ++info_iter) {
-                       inference_engine_tensor_info tensor_info = *info_iter;
-                       if (tensor_info.shape.size() == 0 || tensor_info.size == 0) {
+               for (auto& layer : property.layers) {
+                       const std::string& name = layer.first;
+
+                       if (name.empty()) {
+                               LOGE("layer name is invalid.");
+                               return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+                       }
+
+                       const inference_engine_tensor_info& tensor_info = layer.second;
+                       if (tensor_info.shape.empty() || tensor_info.size == 0) {
                                LOGE("shape size of tensor info or size of it is 0.");
                                return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
                        }
@@ -237,18 +239,6 @@ namespace Common
                        // TODO. we may need to check shape type also.
                }
 
-               // Verity layer names.
-               std::vector<std::string>::const_iterator name_iter;
-               for (name_iter = property.layer_names.begin();
-                        name_iter != property.layer_names.end(); ++name_iter) {
-                       std::string name = *name_iter;
-
-                       if (name.length() == 0) {
-                               LOGE("layer name is invalid.");
-                               return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
-                       }
-               }
-
                return INFERENCE_ENGINE_ERROR_NONE;
        }
 
@@ -393,7 +383,7 @@ namespace Common
                        BackendTable.insert(std::make_pair("mlapi",INFERENCE_BACKEND_MLAPI));
                        BackendTable.insert(std::make_pair("one",INFERENCE_BACKEND_ONE));
 
-                       config->backend_type = BackendTable.find(config->backend_name)->second;
+                       config->backend_type = BackendTable[config->backend_name];
                }
 
                std::string backendLibName;
@@ -518,8 +508,7 @@ namespace Common
                return ret;
        }
 
-       int InferenceEngineCommon::GetInputTensorBuffers(
-                       std::vector<inference_engine_tensor_buffer> &buffers)
+       int InferenceEngineCommon::GetInputTensorBuffers(IETensorBuffer &buffers)
        {
                CHECK_ENGINE_INSTANCE(mBackendHandle);
 
@@ -531,15 +520,14 @@ namespace Common
 
                // If backend engine doesn't provide tensor buffers then just return.
                // In this case, InferenceEngineCommon framework will allocate the tensor buffers.
-               if (buffers.size() == 0) {
+               if (buffers.empty()) {
                        return ret;
                }
 
                return CheckTensorBuffers(buffers);
        }
 
-       int InferenceEngineCommon::GetOutputTensorBuffers(
-                       std::vector<inference_engine_tensor_buffer> &buffers)
+       int InferenceEngineCommon::GetOutputTensorBuffers(IETensorBuffer &buffers)
        {
                CHECK_ENGINE_INSTANCE(mBackendHandle);
 
@@ -551,7 +539,7 @@ namespace Common
 
                // If backend engine doesn't provide tensor buffers then just return.
                // In this case, InferenceEngineCommon framework will allocate the tensor buffers.
-               if (buffers.size() == 0) {
+               if (buffers.empty()) {
                        return ret;
                }
 
@@ -571,8 +559,7 @@ namespace Common
 
                // If backend engine doesn't provide input layer property information then just return.
                // In this case, user has to provide the information manually.
-               if (property.layer_names.size() == 0 &&
-                       property.tensor_infos.size() == 0) {
+               if (property.layers.empty()) {
                        LOGI("backend doesn't provide input layer property.");
                        return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
                }
@@ -593,8 +580,7 @@ namespace Common
 
                // If backend engine doesn't provide output layer property information then just return.
                // In this case, user has to provide the information manually.
-               if (property.layer_names.size() == 0 &&
-                       property.tensor_infos.size() == 0) {
+               if (property.layers.empty()) {
                        LOGI("backend doesn't provide output layer property.");
                        return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
                }
@@ -607,8 +593,8 @@ namespace Common
        {
                CHECK_ENGINE_INSTANCE(mBackendHandle);
 
-               if (property.layer_names.empty() || property.tensor_infos.empty()) {
-                       LOGE("layer_names or tensor_infos vector of a given property is empty.");
+               if (property.layers.empty()) {
+                       LOGE("property is empty.");
                        return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
                }
 
@@ -626,8 +612,8 @@ namespace Common
        {
                CHECK_ENGINE_INSTANCE(mBackendHandle);
 
-               if (property.layer_names.empty()) {
-                       LOGE("layer_names vector of a given property is empty.");
+               if (property.layers.empty()) {
+                       LOGE("property is empty.");
                        return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
                }
 
@@ -653,9 +639,8 @@ namespace Common
                return mBackendHandle->GetBackendCapacity(capacity);
        }
 
-       int InferenceEngineCommon::Run(
-                       std::vector<inference_engine_tensor_buffer> &input_buffers,
-                       std::vector<inference_engine_tensor_buffer> &output_buffers)
+       int InferenceEngineCommon::Run(IETensorBuffer &input_buffers,
+                       IETensorBuffer &output_buffers)
        {
                CHECK_ENGINE_INSTANCE(mBackendHandle);
 
index 394bd376572738860035f7f1bfcb02ebdd556656..373339a11aaab98048f1404d4c78c42c17b8db57 100644 (file)
@@ -250,18 +250,16 @@ TEST_P(InferenceEngineTfliteTest, Inference)
        ASSERT_NE(model_type, -1);
 
        inference_engine_layer_property input_property;
-       std::vector<std::string>::iterator iter;
-
-       for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
-               inference_engine_tensor_info tensor_info = {
-                       { 1, ch, height, width },
-                       (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
-                       (inference_tensor_data_type_e) tensor_type,
-                       (size_t)(1 * ch * height * width)
-               };
-
-               input_property.layer_names.push_back(*iter);
-               input_property.tensor_infos.push_back(tensor_info);
+
+       inference_engine_tensor_info input_tensor_info = {
+               { 1, ch, height, width },
+               (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
+               (inference_tensor_data_type_e) tensor_type,
+               (size_t)(1 * ch * height * width)
+       };
+
+       for (auto& layer : input_layers) {
+               input_property.layers.insert(std::make_pair(layer, input_tensor_info));
        }
 
        ret = engine->SetInputLayerProperty(input_property);
@@ -269,8 +267,15 @@ TEST_P(InferenceEngineTfliteTest, Inference)
 
        inference_engine_layer_property output_property;
 
-       for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
-               output_property.layer_names.push_back(*iter);
+       inference_engine_tensor_info output_tensor_info = {
+               std::vector<size_t>{1},
+               INFERENCE_TENSOR_SHAPE_NCHW,
+               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+               1
+       };
+
+       for (auto& layer : output_layers) {
+               output_property.layers.insert(std::make_pair(layer, output_tensor_info));
        }
 
        ret = engine->SetOutputLayerProperty(output_property);
@@ -279,13 +284,15 @@ TEST_P(InferenceEngineTfliteTest, Inference)
        ret = engine->Load(models, (inference_model_format_e) model_type);
        ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
 
-       std::vector<inference_engine_tensor_buffer> inputs, outputs;
+       IETensorBuffer inputs, outputs;
        ret = PrepareTensorBuffers(engine.get(), inputs, outputs);
        ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
 
        // Copy input image tensor data from a given file to input tensor buffer.
-       for (int i = 0; i < (int) image_paths.size(); ++i) {
-               CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
+       ASSERT_EQ(image_paths.size(), inputs.size());
+       int imageIndex = 0;
+       for (auto& input : inputs) {
+               CopyFileToMemory(image_paths[imageIndex++].c_str(), input.second, input.second.size);
        }
 
        for (int repeat = 0; repeat < iteration; ++repeat) {
@@ -439,26 +446,34 @@ TEST_P(InferenceEngineTfliteCLTunerTest, Inference)
        int model_type = GetModelInfo(model_paths, models);
        ASSERT_NE(model_type, -1);
 
-       std::vector<std::string>::iterator iter;
+       inference_engine_layer_property input_property;
 
-       inference_engine_tensor_info tensor_info = {
+       inference_engine_tensor_info input_tensor_info = {
                { 1, ch, height, width },
                INFERENCE_TENSOR_SHAPE_NCHW,
                static_cast<inference_tensor_data_type_e>(tensor_type),
                static_cast<size_t>(1 * ch * height * width)
        };
 
-       inference_engine_layer_property input_property;
-
-       for (auto &input : input_layers) {
-               input_property.layer_names.push_back(input);
-               input_property.tensor_infos.push_back(tensor_info);
+       for (auto& input : input_layers) {
+               input_property.layers.insert(std::make_pair(input, input_tensor_info));
        }
 
        ret = engine->SetInputLayerProperty(input_property);
        ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
 
-       inference_engine_layer_property output_property = { output_layers, {} };
+       inference_engine_layer_property output_property;
+
+       inference_engine_tensor_info output_tensor_info = {
+               std::vector<size_t>{1},
+               INFERENCE_TENSOR_SHAPE_NCHW,
+               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+               1
+       };
+
+       for (auto& layer : output_layers) {
+               output_property.layers.insert(std::make_pair(layer, output_tensor_info));
+       }
 
        ret = engine->SetOutputLayerProperty(output_property);
        ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
@@ -466,13 +481,15 @@ TEST_P(InferenceEngineTfliteCLTunerTest, Inference)
        ret = engine->Load(models, (inference_model_format_e) model_type);
        ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
 
-       std::vector<inference_engine_tensor_buffer> inputs, outputs;
+       IETensorBuffer inputs, outputs;
        ret = PrepareTensorBuffers(engine.get(), inputs, outputs);
        ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
 
        // Copy input image tensor data from a given file to input tensor buffer.
-       for (int i = 0; i < (int) image_paths.size(); ++i) {
-               CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
+       ASSERT_EQ(image_paths.size(), inputs.size());
+       int imageIndex = 0;
+       for (auto& input : inputs) {
+               CopyFileToMemory(image_paths[imageIndex++].c_str(), input.second, input.second.size);
        }
 
        for (int repeat = 0; repeat < iteration; ++repeat) {
@@ -601,18 +618,16 @@ TEST_P(InferenceEngineCaffeTest, Inference)
        ASSERT_NE(model_type, -1);
 
        inference_engine_layer_property input_property;
-       std::vector<std::string>::iterator iter;
-
-       for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
-               inference_engine_tensor_info tensor_info = {
-                       { 1, ch, height, width },
-                       (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
-                       (inference_tensor_data_type_e) tensor_type,
-                       (size_t)(1 * ch * height * width)
-               };
-
-               input_property.layer_names.push_back(*iter);
-               input_property.tensor_infos.push_back(tensor_info);
+
+       inference_engine_tensor_info input_tensor_info = {
+               { 1, ch, height, width },
+               INFERENCE_TENSOR_SHAPE_NCHW,
+               (inference_tensor_data_type_e) tensor_type,
+               (size_t)(1 * ch * height * width)
+       };
+
+       for (auto& layer : input_layers) {
+               input_property.layers.insert(std::make_pair(layer, input_tensor_info));
        }
 
        ret = engine->SetInputLayerProperty(input_property);
@@ -620,8 +635,15 @@ TEST_P(InferenceEngineCaffeTest, Inference)
 
        inference_engine_layer_property output_property;
 
-       for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
-               output_property.layer_names.push_back(*iter);
+       inference_engine_tensor_info output_tensor_info = {
+               std::vector<size_t>{1},
+               INFERENCE_TENSOR_SHAPE_NCHW,
+               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+               1
+       };
+
+       for (auto& layer : output_layers) {
+               output_property.layers.insert(std::make_pair(layer, output_tensor_info));
        }
 
        ret = engine->SetOutputLayerProperty(output_property);
@@ -630,13 +652,15 @@ TEST_P(InferenceEngineCaffeTest, Inference)
        ret = engine->Load(models, (inference_model_format_e) model_type);
        ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
 
-       std::vector<inference_engine_tensor_buffer> inputs, outputs;
+       IETensorBuffer inputs, outputs;
        ret = PrepareTensorBuffers(engine.get(), inputs, outputs);
        ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
 
        // Copy input image tensor data from a given file to input tensor buffer.
-       for (int i = 0; i < (int) image_paths.size(); ++i) {
-               CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
+       ASSERT_EQ(image_paths.size(), inputs.size());
+       int imageIndex = 0;
+       for (auto& input : inputs) {
+               CopyFileToMemory(image_paths[imageIndex++].c_str(), input.second, input.second.size);
        }
 
        for (int repeat = 0; repeat < iteration; ++repeat) {
@@ -758,18 +782,16 @@ TEST_P(InferenceEngineDldtTest, Inference)
        ASSERT_NE(model_type, -1);
 
        inference_engine_layer_property input_property;
-       std::vector<std::string>::iterator iter;
-
-       for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
-               inference_engine_tensor_info tensor_info = {
-                       { 1, ch, height, width },
-                       (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
-                       (inference_tensor_data_type_e) tensor_type,
-                       (size_t)(1 * ch * height * width)
-               };
-
-               input_property.layer_names.push_back(*iter);
-               input_property.tensor_infos.push_back(tensor_info);
+
+       inference_engine_tensor_info input_tensor_info = {
+               { 1, ch, height, width },
+               INFERENCE_TENSOR_SHAPE_NCHW,
+               (inference_tensor_data_type_e) tensor_type,
+               (size_t)(1 * ch * height * width)
+       };
+
+       for (auto& layer : input_layers) {
+               input_property.layers.insert(std::make_pair(layer, input_tensor_info));
        }
 
        ret = engine->SetInputLayerProperty(input_property);
@@ -777,8 +799,15 @@ TEST_P(InferenceEngineDldtTest, Inference)
 
        inference_engine_layer_property output_property;
 
-       for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
-               output_property.layer_names.push_back(*iter);
+       inference_engine_tensor_info output_tensor_info = {
+               { 1, ch, height, width },
+               INFERENCE_TENSOR_SHAPE_NCHW,
+               (inference_tensor_data_type_e) tensor_type,
+               (size_t)(1 * ch * height * width)
+       };
+
+       for (auto& layer : output_layers) {
+               output_property.layers.insert(std::make_pair(layer, output_tensor_info));
        }
 
        ret = engine->SetOutputLayerProperty(output_property);
@@ -787,13 +816,15 @@ TEST_P(InferenceEngineDldtTest, Inference)
        ret = engine->Load(models, (inference_model_format_e) model_type);
        ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
 
-       std::vector<inference_engine_tensor_buffer> inputs, outputs;
+       IETensorBuffer inputs, outputs;
        ret = PrepareTensorBuffers(engine.get(), inputs, outputs);
        ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
 
        // Copy input image tensor data from a given file to input tensor buffer.
-       for (int i = 0; i < (int) image_paths.size(); ++i) {
-               CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
+       ASSERT_EQ(image_paths.size(), inputs.size());
+       int imageIndex = 0;
+       for (auto& input : inputs) {
+               CopyFileToMemory(image_paths[imageIndex++].c_str(), input.second, input.second.size);
        }
 
        for (int repeat = 0; repeat < iteration; ++repeat) {
index 88f75d18d2fde4a599f1cca3f50912c1590170fe..6a430b7a695764cabe1b86fd0e32c44c9e3fbaaf 100644 (file)
@@ -321,9 +321,8 @@ TEST_P(InferenceEngineTestCase_G4, SetInputLayer_P)
        ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
 
        inference_engine_layer_property input_property;
-       std::vector<std::string>::iterator iter;
 
-       for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
+       for (auto& layer : input_layers) {
                inference_engine_tensor_info tensor_info = {
                        { 1, ch, height, width },
                        (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
@@ -331,8 +330,7 @@ TEST_P(InferenceEngineTestCase_G4, SetInputLayer_P)
                        (size_t)(1 * ch * height * width)
                };
 
-               input_property.layer_names.push_back(*iter);
-               input_property.tensor_infos.push_back(tensor_info);
+               input_property.layers.insert(std::make_pair(layer, tensor_info));
        }
        ret = engine->SetInputLayerProperty(input_property);
        EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
@@ -359,9 +357,8 @@ TEST_P(InferenceEngineTestCase_G5, SetInputLayer_N1)
        ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
 
        inference_engine_layer_property output_property;
-       std::vector<std::string>::iterator iter;
 
-       for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
+       for (auto& layer : output_layers) {
                inference_engine_tensor_info tensor_info = {
                        { 1, ch, height, width },
                        (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
@@ -369,8 +366,7 @@ TEST_P(InferenceEngineTestCase_G5, SetInputLayer_N1)
                        (size_t)(1 * ch * height * width)
                };
 
-               output_property.layer_names.push_back(*iter);
-               output_property.tensor_infos.push_back(tensor_info);
+               output_property.layers.insert(std::make_pair(layer, tensor_info));
        }
        ret = engine->SetInputLayerProperty(output_property);
        EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PARAMETER);
@@ -417,9 +413,8 @@ TEST_P(InferenceEngineTestCase_G4, SetOutputLayer_P)
        ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
 
        inference_engine_layer_property output_property;
-       std::vector<std::string>::iterator iter;
 
-       for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
+       for (auto& layer : output_layers) {
                inference_engine_tensor_info tensor_info = {
                        { 1, ch, height, width },
                        (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
@@ -427,8 +422,7 @@ TEST_P(InferenceEngineTestCase_G4, SetOutputLayer_P)
                        (size_t)(1 * ch * height * width)
                };
 
-               output_property.layer_names.push_back(*iter);
-               output_property.tensor_infos.push_back(tensor_info);
+               output_property.layers.insert(std::make_pair(layer, tensor_info));
        }
        ret = engine->SetOutputLayerProperty(output_property);
        EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
@@ -455,9 +449,8 @@ TEST_P(InferenceEngineTestCase_G5, SetOutputLayer_N1)
        ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
 
        inference_engine_layer_property output_property;
-       std::vector<std::string>::iterator iter;
 
-       for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
+       for (auto& layer : output_layers) {
                inference_engine_tensor_info tensor_info = {
                        { 1, ch, height, width },
                        (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
@@ -465,8 +458,7 @@ TEST_P(InferenceEngineTestCase_G5, SetOutputLayer_N1)
                        (size_t)(1 * ch * height * width)
                };
 
-               output_property.layer_names.push_back(*iter);
-               output_property.tensor_infos.push_back(tensor_info);
+               output_property.layers.insert(std::make_pair(layer, tensor_info));
        }
        ret = engine->SetOutputLayerProperty(output_property);
        EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PARAMETER);
@@ -571,18 +563,16 @@ TEST_P(InferenceEngineTestCase_G6, Inference_P)
        ASSERT_NE(model_type, -1);
 
        inference_engine_layer_property input_property;
-       std::vector<std::string>::iterator iter;
 
-       for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
-               inference_engine_tensor_info tensor_info = {
-                       { 1, ch, height, width },
-                       (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
-                       (inference_tensor_data_type_e) tensor_type,
-                       (size_t)(1 * ch * height * width)
-               };
+       inference_engine_tensor_info input_tensor_info = {
+               { 1, ch, height, width },
+               (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
+               (inference_tensor_data_type_e) tensor_type,
+               (size_t)(1 * ch * height * width)
+       };
 
-               input_property.layer_names.push_back(*iter);
-               input_property.tensor_infos.push_back(tensor_info);
+       for (auto& layer : input_layers) {
+               input_property.layers.insert(std::make_pair(layer, input_tensor_info));
        }
 
        ret = engine->SetInputLayerProperty(input_property);
@@ -590,8 +580,12 @@ TEST_P(InferenceEngineTestCase_G6, Inference_P)
 
        inference_engine_layer_property output_property;
 
-       for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
-               output_property.layer_names.push_back(*iter);
+       inference_engine_tensor_info output_tensor_info = { std::vector<size_t>{1},
+                                                       INFERENCE_TENSOR_SHAPE_NCHW,
+                                                       INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                                       1};
+       for (auto& layer : output_layers) {
+               output_property.layers.insert(std::make_pair(layer, output_tensor_info));
        }
 
        ret = engine->SetOutputLayerProperty(output_property);
@@ -600,13 +594,15 @@ TEST_P(InferenceEngineTestCase_G6, Inference_P)
        ret = engine->Load(models, (inference_model_format_e) model_type);
        ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
 
-       std::vector<inference_engine_tensor_buffer> inputs, outputs;
+       IETensorBuffer inputs, outputs;
        ret = PrepareTensorBuffers(engine.get(), inputs, outputs);
        ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
 
        // Copy input image tensor data from a given file to input tensor buffer.
-       for (int i = 0; i < (int) image_paths.size(); ++i) {
-               CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
+       ASSERT_EQ(image_paths.size(), inputs.size());
+       int imageIndex = 0;
+       for (auto& input : inputs) {
+               CopyFileToMemory(image_paths[imageIndex++].c_str(), input.second, input.second.size);
        }
 
        for (int repeat = 0; repeat < iteration; ++repeat) {
index de97a748cdebbfc501ec64da3b4b67200fbf53b9..3033bc62e73a80b05bb4a4b42fa5c24799ead419 100644 (file)
@@ -142,8 +142,8 @@ std::string GetModelString(const int model_type)
 }
 
 int PrepareTensorBuffers(InferenceEngineCommon *engine,
-                                                std::vector<inference_engine_tensor_buffer> &inputs,
-                                                std::vector<inference_engine_tensor_buffer> &outputs)
+                                                IETensorBuffer &inputs,
+                                                IETensorBuffer &outputs)
 {
        int ret = engine->GetInputTensorBuffers(inputs);
        EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
@@ -154,13 +154,13 @@ int PrepareTensorBuffers(InferenceEngineCommon *engine,
                EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
 
                // If backend is OpenCV then the buffers will be allocated out of this function.
-               if (input_property.tensor_infos.empty()) {
+               if (input_property.layers.empty()) {
                        return INFERENCE_ENGINE_ERROR_NONE;
                }
 
-               for (int i = 0; i < (int) input_property.tensor_infos.size(); ++i) {
-                       inference_engine_tensor_info tensor_info =
-                                       input_property.tensor_infos[i];
+               for (auto iter = input_property.layers.begin(); iter != input_property.layers.end(); ++iter) {
+                       inference_engine_tensor_info tensor_info = iter->second;
+
                        inference_engine_tensor_buffer tensor_buffer;
                        if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
                                tensor_buffer.buffer = (void *) (new float[tensor_info.size]);
@@ -175,7 +175,7 @@ int PrepareTensorBuffers(InferenceEngineCommon *engine,
                        EXPECT_TRUE(tensor_buffer.buffer);
                        tensor_buffer.owner_is_backend = 0;
                        tensor_buffer.data_type = tensor_info.data_type;
-                       inputs.push_back(tensor_buffer);
+                       inputs.insert(std::make_pair(iter->first, tensor_buffer));
                }
        }
 
@@ -188,13 +188,13 @@ int PrepareTensorBuffers(InferenceEngineCommon *engine,
                EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
 
                // If backend is OpenCV then the buffers will be allocated out of this function.
-               if (output_property.tensor_infos.empty()) {
+               if (output_property.layers.empty()) {
                        return INFERENCE_ENGINE_ERROR_NONE;
                }
 
-               for (int i = 0; i < (int) output_property.tensor_infos.size(); ++i) {
-                       inference_engine_tensor_info tensor_info =
-                                       output_property.tensor_infos[i];
+               for (auto iter = output_property.layers.begin(); iter != output_property.layers.end(); ++iter) {
+                       inference_engine_tensor_info tensor_info = iter->second;
+
                        inference_engine_tensor_buffer tensor_buffer;
                        if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
                                tensor_buffer.buffer = (void *) (new float[tensor_info.size]);
@@ -215,20 +215,19 @@ int PrepareTensorBuffers(InferenceEngineCommon *engine,
                        EXPECT_TRUE(tensor_buffer.buffer);
                        tensor_buffer.owner_is_backend = 0;
                        tensor_buffer.data_type = tensor_info.data_type;
-                       outputs.push_back(tensor_buffer);
+                       outputs.insert(std::make_pair(iter->first, tensor_buffer));
                }
        }
 
        return INFERENCE_ENGINE_ERROR_NONE;
 }
 
-void CleanupTensorBuffers(std::vector<inference_engine_tensor_buffer> &inputs,
-                                                 std::vector<inference_engine_tensor_buffer> &outputs)
+void CleanupTensorBuffers(IETensorBuffer &inputs,
+                                                 IETensorBuffer &outputs)
 {
        if (!inputs.empty()) {
-               std::vector<inference_engine_tensor_buffer>::iterator iter;
-               for (iter = inputs.begin(); iter != inputs.end(); iter++) {
-                       inference_engine_tensor_buffer tensor_buffer = *iter;
+               for (auto iter = inputs.begin(); iter != inputs.end(); iter++) {
+                       inference_engine_tensor_buffer tensor_buffer = iter->second;
 
                        // If tensor buffer owner is a backend then skip to release the tensor buffer.
                        // This tensor buffer will be released by the backend.
@@ -241,13 +240,12 @@ void CleanupTensorBuffers(std::vector<inference_engine_tensor_buffer> &inputs,
                        else
                                delete[](unsigned char *) tensor_buffer.buffer;
                }
-               std::vector<inference_engine_tensor_buffer>().swap(inputs);
+               IETensorBuffer().swap(inputs);
        }
 
        if (!outputs.empty()) {
-               std::vector<inference_engine_tensor_buffer>::iterator iter;
-               for (iter = outputs.begin(); iter != outputs.end(); iter++) {
-                       inference_engine_tensor_buffer tensor_buffer = *iter;
+               for (auto iter = outputs.begin(); iter != outputs.end(); iter++) {
+                       inference_engine_tensor_buffer tensor_buffer = iter->second;
 
                        // If tensor buffer owner is a backend then skip to release the tensor buffer.
                        // This tensor buffer will be released by the backend.
@@ -260,7 +258,7 @@ void CleanupTensorBuffers(std::vector<inference_engine_tensor_buffer> &inputs,
                        else
                                delete[](unsigned char *) tensor_buffer.buffer;
                }
-               std::vector<inference_engine_tensor_buffer>().swap(outputs);
+               IETensorBuffer().swap(outputs);
        }
 }
 
@@ -284,26 +282,27 @@ void CopyFileToMemory(const char *file_name,
 }
 
 void FillOutputResult(InferenceEngineCommon *engine,
-                                         std::vector<inference_engine_tensor_buffer> &outputs,
+                                         IETensorBuffer &outputs,
                                          tensor_t &outputData)
 {
        inference_engine_layer_property property;
        int ret = engine->GetOutputLayerProperty(property);
        EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
 
-       for (int i = 0; i < (int) property.tensor_infos.size(); ++i) {
-               inference_engine_tensor_info tensor_info = property.tensor_infos[i];
+       for (auto& layer : property.layers) {
+               const inference_engine_tensor_info& tensor_info = layer.second;
 
                std::vector<int> tmpDimInfo;
-               for (int i = 0; i < (int) tensor_info.shape.size(); i++) {
-                       tmpDimInfo.push_back(tensor_info.shape[i]);
+               for (auto& dim : tensor_info.shape) {
+                       tmpDimInfo.push_back(dim);
                }
 
                outputData.dimInfo.push_back(tmpDimInfo);
 
                // Normalize output tensor data converting it to float type in case of quantized model.
                if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
-                       unsigned char *ori_buf = (unsigned char *) outputs[i].buffer;
+                       auto *ori_buf = static_cast<unsigned char *>(outputs[layer.first].buffer);
+
                        float *new_buf = new float[tensor_info.size];
                        ASSERT_TRUE(new_buf);
 
@@ -312,13 +311,13 @@ void FillOutputResult(InferenceEngineCommon *engine,
                        }
 
                        // replace original buffer with new one, and release origin one.
-                       outputs[i].buffer = new_buf;
-                       if (!outputs[i].owner_is_backend) {
+                       outputs[layer.first].buffer = new_buf;
+                       if (!outputs[layer.first].owner_is_backend) {
                                delete[] ori_buf;
                        }
                }
 
-               outputData.data.push_back((void *) outputs[i].buffer);
+               outputData.data.push_back(static_cast<void *>(outputs[layer.first].buffer));
        }
 }
 
@@ -494,28 +493,29 @@ int VerifyPoseEstimationResults(tensor_t &outputData, std::vector<int> &answers,
        return ret;
 }
 
-int VerifyAICHandGesture1Results(std::vector<inference_engine_tensor_buffer> &output)
+int VerifyAICHandGesture1Results(IETensorBuffer &output)
 {
        // ### output[0] ###
        // output name : "mobilenetv2/boundingbox"
        // data type   : int64
        // tensor shape : 1 * 56 * 56
+       std::string outputNamebbox("mobilenetv2/boundingbox");
        std::ifstream fin("/opt/usr/images/boundingbox.answer", std::ios_base::in | std::ios_base::binary);
-       char *o_buffer = new (std::nothrow) char[output[0].size];
+       char *o_buffer = new (std::nothrow) char[output[outputNamebbox].size];
        if (!o_buffer) {
                std::cout << "failed to alloc o_buffer." << std::endl;
                return 0;
        }
 
-       fin.read(o_buffer, output[0].size);
+       fin.read(o_buffer, output[outputNamebbox].size);
        fin.close();
 
        const int *f_answer = (const int *)o_buffer;
-       const unsigned int output_size = output[0].size / 8;
+       const unsigned int output_size = output[outputNamebbox].size / 8;
        for (unsigned int i = 0; i < output_size; ++i) {
-               if (static_cast<int *>(output[0].buffer)[i] != f_answer[i]) {
+               if (static_cast<int *>(output[outputNamebbox].buffer)[i] != f_answer[i]) {
                        std::cout << "boundingbox wrong answer at index[" << i << "]" << std::endl;
-                       std::cout << static_cast<int *>(output[0].buffer)[i] << " vs " << f_answer[i] << std::endl;
+                       std::cout << static_cast<int *>(output[outputNamebbox].buffer)[i] << " vs " << f_answer[i] << std::endl;
                        delete[] o_buffer;
                        return 0;
                }
@@ -527,21 +527,22 @@ int VerifyAICHandGesture1Results(std::vector<inference_engine_tensor_buffer> &ou
        // output name : "mobilenetv2/heatmap"
        // data type   : float
        // tensor shape : 1 * 56 * 56 *21
+       std::string outputNameHeatMap("mobilenetv2/heatmap");
        std::ifstream fin_2("/opt/usr/images/heatmap.answer", std::ios_base::in | std::ios_base::binary);
-       char *o_buffer_2 = new (std::nothrow) char[output[1].size];
+       char *o_buffer_2 = new (std::nothrow) char[output[outputNameHeatMap].size];
        if (!o_buffer_2) {
                std::cout << "failed to alloc o_buffer_2." << std::endl;
                return 0;
        }
 
-       fin_2.read(o_buffer_2, output[1].size);
+       fin_2.read(o_buffer_2, output[outputNameHeatMap].size);
        fin_2.close();
 
        const float *f_answer_2 = (const float *)o_buffer_2;
-       const unsigned int output_size_2 = output[1].size / 8;
+       const unsigned int output_size_2 = output[outputNameHeatMap].size / 8;
        const int margin = 2;
        for (unsigned int i = 0; i < output_size_2; ++i) {
-               const int value_left = static_cast<int>((static_cast<float *>(output[1].buffer)[i]));
+               const int value_left = static_cast<int>((static_cast<float *>(output[outputNameHeatMap].buffer)[i]));
                const int value_right = static_cast<int>(f_answer_2[i]);
                int diff = value_left - value_right;
                diff = diff < 0 ? diff * -1 : diff;
@@ -558,16 +559,17 @@ int VerifyAICHandGesture1Results(std::vector<inference_engine_tensor_buffer> &ou
        return 1;
 }
 
-int VerifyAICHandGesture2Results(std::vector<inference_engine_tensor_buffer> &output,
+int VerifyAICHandGesture2Results(IETensorBuffer &output,
                                                                 std::vector<int> &answers)
 {
        // ### output[0] ###
        // output name : "mobilenetv2/coord_refine"
        // data type   : float
        // tensor shape : 1 * 21 * 2
-       unsigned int size = output[0].size / 4;
+       std::string outputNameCoord("mobilenetv2/coord_refine");
+       unsigned int size = output[outputNameCoord].size / 4;
        for (unsigned int i = 0; i < size; ++i) {
-               unsigned int value = static_cast<unsigned int>(static_cast<float *>(output[0].buffer)[i] * 100.0f);
+               unsigned int value = static_cast<unsigned int>(static_cast<float *>(output[outputNameCoord].buffer)[i] * 100.0f);
                if (value != static_cast<unsigned int>(answers[i])) {
                        std::cout << "coord_refine wrong answer at index[" << i << "]" << std::endl;
                        std::cout << value << " vs " << answers[i] << std::endl;
@@ -579,7 +581,8 @@ int VerifyAICHandGesture2Results(std::vector<inference_engine_tensor_buffer> &ou
        // output name : "mobilenetv2/gesture"
        // data type   : int64
        // tensor shape : 1 * 1 * 1
-       unsigned int value = static_cast<unsigned int>(static_cast<long long *>(output[1].buffer)[0]);
+       std::string outputNameGesture("mobilenetv2/gesture");
+       unsigned int value = static_cast<unsigned int>(static_cast<long long *>(output[outputNameGesture].buffer)[0]);
        if (value != static_cast<unsigned int>(answers[answers.size() - 1])) {
                        std::cout << "gesture wrong answer at index[0]" << std::endl;
                        std::cout << value << " vs " << answers[0] << std::endl;
index 8bc133d7946fef2e94037d254dc62a2a08b4bc4c..3904a229804acd2cb8ee53cb85bd288d557e2295 100644 (file)
@@ -65,18 +65,18 @@ int GetModelInfo(std::vector<std::string> &model_paths,
 std::string GetModelString(const int model_type);
 
 int PrepareTensorBuffers(InferenceEngineCommon *engine,
-                                                std::vector<inference_engine_tensor_buffer> &inputs,
-                                                std::vector<inference_engine_tensor_buffer> &outputs);
+                                                IETensorBuffer &inputs,
+                                                IETensorBuffer &outputs);
 
-void CleanupTensorBuffers(std::vector<inference_engine_tensor_buffer> &inputs,
-                                                 std::vector<inference_engine_tensor_buffer> &outputs);
+void CleanupTensorBuffers(IETensorBuffer &inputs,
+                                                 IETensorBuffer &outputs);
 
 void CopyFileToMemory(const char *file_name,
                                          inference_engine_tensor_buffer &buffer,
                                          unsigned int size);
 
 void FillOutputResult(InferenceEngineCommon *engine,
-                                         std::vector<inference_engine_tensor_buffer> &outputs,
+                                         IETensorBuffer &outputs,
                                          tensor_t &outputData);
 
 int VerifyImageClassificationResults(tensor_t &outputData, int answer);
@@ -92,7 +92,7 @@ int VerifyFacialLandmarkDetectionResults(tensor_t &outputData,
 int VerifyPoseEstimationResults(tensor_t &outputData, std::vector<int> &answers,
                                                                int height, int width);
 
-int VerifyAICHandGesture1Results(std::vector<inference_engine_tensor_buffer> &output);
+int VerifyAICHandGesture1Results(IETensorBuffer &output);
 
-int VerifyAICHandGesture2Results(std::vector<inference_engine_tensor_buffer> &output,
+int VerifyAICHandGesture2Results(IETensorBuffer &output,
                                                                 std::vector<int> &answers);