From 8aeb3939ad9c151e1e5ccbe45235589e43372f3d Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Wed, 10 Mar 2021 18:05:36 +0900 Subject: [PATCH] Change members of inference_engine_layer_property structure, and change vector to map This patch is to use inference_engine_tensor_info and inference_engine_tensor_buffer based on input/output layers' name. Change-Id: I18d3e7ae80a8c2a1e6236938571b8f22b12b2e1e Signed-off-by: Tae-Young Chung --- include/inference_engine_common.h | 11 +-- include/inference_engine_common_impl.h | 13 +-- include/inference_engine_type.h | 6 +- packaging/inference-engine-interface.spec | 2 +- src/inference_engine_common_impl.cpp | 69 ++++++------- test/src/inference_engine_profiler.cpp | 155 ++++++++++++++++++------------ test/src/inference_engine_tc.cpp | 58 ++++++----- test/src/inference_engine_test_common.cpp | 93 +++++++++--------- test/src/inference_engine_test_common.h | 14 +-- 9 files changed, 217 insertions(+), 204 deletions(-) diff --git a/include/inference_engine_common.h b/include/inference_engine_common.h index d7db0d5..47695ec 100644 --- a/include/inference_engine_common.h +++ b/include/inference_engine_common.h @@ -17,11 +17,13 @@ #ifndef __INFERENCE_ENGINE_COMMON_H__ #define __INFERENCE_ENGINE_COMMON_H__ +#include #include #include #include "inference_engine_type.h" +using IETensorBuffer = std::map; namespace InferenceEngineInterface { namespace Common @@ -88,8 +90,7 @@ namespace Common * @param[out] buffers A backend engine should add input tensor buffers allocated itself to buffers vector. * Otherwise, it should put buffers to be empty. */ - virtual int GetInputTensorBuffers( - std::vector &buffers) = 0; + virtual int GetInputTensorBuffers(IETensorBuffer &buffers) = 0; /** * @brief Get output tensor buffers from a given backend engine. @@ -105,8 +106,7 @@ namespace Common * @param[out] buffers A backend engine should add output tensor buffers allocated itself to buffers vector. * Otherwise, it should put buffers to be empty. */ - virtual int GetOutputTensorBuffers( - std::vector &buffers) = 0; + virtual int GetOutputTensorBuffers(IETensorBuffer &buffers) = 0; /** * @brief Get input layer property information from a given backend engine. @@ -185,8 +185,7 @@ namespace Common * @param[in] output_buffers It contains tensor buffers to be used as output layer. */ virtual int - Run(std::vector &input_buffers, - std::vector &output_buffers) = 0; + Run(IETensorBuffer &input_buffers, IETensorBuffer &output_buffers) = 0; }; typedef void destroy_t(IInferenceEngineCommon *); diff --git a/include/inference_engine_common_impl.h b/include/inference_engine_common_impl.h index 5966557..e3fad91 100644 --- a/include/inference_engine_common_impl.h +++ b/include/inference_engine_common_impl.h @@ -17,6 +17,7 @@ #ifndef __INFERENCE_ENGINE_COMMON_IMPL_H__ #define __INFERENCE_ENGINE_COMMON_IMPL_H__ +#include #include #include @@ -118,8 +119,7 @@ namespace Common * @param[out] buffers A backend engine should add input tensor buffers allocated itself to buffers vector. * Otherwise, it should put buffers to be empty. */ - int GetInputTensorBuffers( - std::vector &buffers); + int GetInputTensorBuffers(IETensorBuffer &buffers); /** * @brief Get output tensor buffers from a given backend engine. @@ -135,8 +135,7 @@ namespace Common * @param[out] buffers A backend engine should add output tensor buffers allocated itself to buffers vector. * Otherwise, it should put buffers to be empty. */ - int GetOutputTensorBuffers( - std::vector &buffers); + int GetOutputTensorBuffers(IETensorBuffer &buffers); /** * @brief Get input layer property information from a given backend engine. @@ -210,8 +209,7 @@ namespace Common * @param[in] input_buffers It contains tensor buffers to be used as input layer. * @param[in] output_buffers It contains tensor buffers to be used as output layer. */ - int Run(std::vector &input_buffers, - std::vector &output_buffers); + int Run(IETensorBuffer &input_buffers, IETensorBuffer &output_buffers); /** * @brief Enable or disable Inference engine profiler. @@ -247,8 +245,7 @@ namespace Common int GetNpuBackendType(dictionary *dict, const char *section_name); int InitBackendEngine(const std::string &backend_path, int backend_type, int device_type); - int CheckTensorBuffers( - std::vector &buffers); + int CheckTensorBuffers(IETensorBuffer &buffers); int CheckLayerProperty(inference_engine_layer_property &property); inference_backend_type_e mSelectedBackendEngine; diff --git a/include/inference_engine_type.h b/include/inference_engine_type.h index 3e8a46f..f854452 100644 --- a/include/inference_engine_type.h +++ b/include/inference_engine_type.h @@ -17,6 +17,9 @@ #ifndef __INFERENCE_ENGINE_TYPE_H__ #define __INFERENCE_ENGINE_TYPE_H__ +#include +#include + #ifdef __cplusplus extern "C" { @@ -212,8 +215,7 @@ extern "C" * @since_tizen 6.0 */ typedef struct _inference_engine_layer_property { - std::vector layer_names; /**< names of layers. */ - std::vector tensor_infos; /**< information of tensors. */ + std::map layers; // TODO. } inference_engine_layer_property; diff --git a/packaging/inference-engine-interface.spec b/packaging/inference-engine-interface.spec index 0cab44f..47fc83a 100644 --- a/packaging/inference-engine-interface.spec +++ b/packaging/inference-engine-interface.spec @@ -1,7 +1,7 @@ Name: inference-engine-interface Summary: Interface of inference engines Version: 0.0.2 -Release: 13 +Release: 14 Group: Multimedia/Framework License: Apache-2.0 Source0: %{name}-%{version}.tar.gz diff --git a/src/inference_engine_common_impl.cpp b/src/inference_engine_common_impl.cpp index 4bc8222..3835469 100644 --- a/src/inference_engine_common_impl.cpp +++ b/src/inference_engine_common_impl.cpp @@ -188,18 +188,15 @@ namespace Common return ret; } - int InferenceEngineCommon::CheckTensorBuffers( - std::vector &buffers) + int InferenceEngineCommon::CheckTensorBuffers(IETensorBuffer &buffers) { - if (buffers.size() == 0) { + if (buffers.empty()) { LOGE("tensor buffer vector is empty."); return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; } - for (std::vector::const_iterator iter = - buffers.begin(); - iter != buffers.end(); ++iter) { - inference_engine_tensor_buffer tensor_buffer = *iter; + for (auto& buffer : buffers) { + const inference_engine_tensor_buffer& tensor_buffer = buffer.second; if (tensor_buffer.buffer == nullptr || tensor_buffer.size == 0) { LOGE("tensor buffer pointer is null or tensor buffer size is 0."); return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; @@ -219,11 +216,16 @@ namespace Common inference_engine_layer_property &property) { // Verity tensor info values. - std::vector::const_iterator info_iter; - for (info_iter = property.tensor_infos.begin(); - info_iter != property.tensor_infos.end(); ++info_iter) { - inference_engine_tensor_info tensor_info = *info_iter; - if (tensor_info.shape.size() == 0 || tensor_info.size == 0) { + for (auto& layer : property.layers) { + const std::string& name = layer.first; + + if (name.empty()) { + LOGE("layer name is invalid."); + return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; + } + + const inference_engine_tensor_info& tensor_info = layer.second; + if (tensor_info.shape.empty() || tensor_info.size == 0) { LOGE("shape size of tensor info or size of it is 0."); return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; } @@ -237,18 +239,6 @@ namespace Common // TODO. we may need to check shape type also. } - // Verity layer names. - std::vector::const_iterator name_iter; - for (name_iter = property.layer_names.begin(); - name_iter != property.layer_names.end(); ++name_iter) { - std::string name = *name_iter; - - if (name.length() == 0) { - LOGE("layer name is invalid."); - return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; - } - } - return INFERENCE_ENGINE_ERROR_NONE; } @@ -393,7 +383,7 @@ namespace Common BackendTable.insert(std::make_pair("mlapi",INFERENCE_BACKEND_MLAPI)); BackendTable.insert(std::make_pair("one",INFERENCE_BACKEND_ONE)); - config->backend_type = BackendTable.find(config->backend_name)->second; + config->backend_type = BackendTable[config->backend_name]; } std::string backendLibName; @@ -518,8 +508,7 @@ namespace Common return ret; } - int InferenceEngineCommon::GetInputTensorBuffers( - std::vector &buffers) + int InferenceEngineCommon::GetInputTensorBuffers(IETensorBuffer &buffers) { CHECK_ENGINE_INSTANCE(mBackendHandle); @@ -531,15 +520,14 @@ namespace Common // If backend engine doesn't provide tensor buffers then just return. // In this case, InferenceEngineCommon framework will allocate the tensor buffers. - if (buffers.size() == 0) { + if (buffers.empty()) { return ret; } return CheckTensorBuffers(buffers); } - int InferenceEngineCommon::GetOutputTensorBuffers( - std::vector &buffers) + int InferenceEngineCommon::GetOutputTensorBuffers(IETensorBuffer &buffers) { CHECK_ENGINE_INSTANCE(mBackendHandle); @@ -551,7 +539,7 @@ namespace Common // If backend engine doesn't provide tensor buffers then just return. // In this case, InferenceEngineCommon framework will allocate the tensor buffers. - if (buffers.size() == 0) { + if (buffers.empty()) { return ret; } @@ -571,8 +559,7 @@ namespace Common // If backend engine doesn't provide input layer property information then just return. // In this case, user has to provide the information manually. - if (property.layer_names.size() == 0 && - property.tensor_infos.size() == 0) { + if (property.layers.empty()) { LOGI("backend doesn't provide input layer property."); return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; } @@ -593,8 +580,7 @@ namespace Common // If backend engine doesn't provide output layer property information then just return. // In this case, user has to provide the information manually. - if (property.layer_names.size() == 0 && - property.tensor_infos.size() == 0) { + if (property.layers.empty()) { LOGI("backend doesn't provide output layer property."); return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; } @@ -607,8 +593,8 @@ namespace Common { CHECK_ENGINE_INSTANCE(mBackendHandle); - if (property.layer_names.empty() || property.tensor_infos.empty()) { - LOGE("layer_names or tensor_infos vector of a given property is empty."); + if (property.layers.empty()) { + LOGE("property is empty."); return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; } @@ -626,8 +612,8 @@ namespace Common { CHECK_ENGINE_INSTANCE(mBackendHandle); - if (property.layer_names.empty()) { - LOGE("layer_names vector of a given property is empty."); + if (property.layers.empty()) { + LOGE("property is empty."); return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; } @@ -653,9 +639,8 @@ namespace Common return mBackendHandle->GetBackendCapacity(capacity); } - int InferenceEngineCommon::Run( - std::vector &input_buffers, - std::vector &output_buffers) + int InferenceEngineCommon::Run(IETensorBuffer &input_buffers, + IETensorBuffer &output_buffers) { CHECK_ENGINE_INSTANCE(mBackendHandle); diff --git a/test/src/inference_engine_profiler.cpp b/test/src/inference_engine_profiler.cpp index 394bd37..373339a 100644 --- a/test/src/inference_engine_profiler.cpp +++ b/test/src/inference_engine_profiler.cpp @@ -250,18 +250,16 @@ TEST_P(InferenceEngineTfliteTest, Inference) ASSERT_NE(model_type, -1); inference_engine_layer_property input_property; - std::vector::iterator iter; - - for (iter = input_layers.begin(); iter != input_layers.end(); iter++) { - inference_engine_tensor_info tensor_info = { - { 1, ch, height, width }, - (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW, - (inference_tensor_data_type_e) tensor_type, - (size_t)(1 * ch * height * width) - }; - - input_property.layer_names.push_back(*iter); - input_property.tensor_infos.push_back(tensor_info); + + inference_engine_tensor_info input_tensor_info = { + { 1, ch, height, width }, + (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW, + (inference_tensor_data_type_e) tensor_type, + (size_t)(1 * ch * height * width) + }; + + for (auto& layer : input_layers) { + input_property.layers.insert(std::make_pair(layer, input_tensor_info)); } ret = engine->SetInputLayerProperty(input_property); @@ -269,8 +267,15 @@ TEST_P(InferenceEngineTfliteTest, Inference) inference_engine_layer_property output_property; - for (iter = output_layers.begin(); iter != output_layers.end(); iter++) { - output_property.layer_names.push_back(*iter); + inference_engine_tensor_info output_tensor_info = { + std::vector{1}, + INFERENCE_TENSOR_SHAPE_NCHW, + INFERENCE_TENSOR_DATA_TYPE_FLOAT32, + 1 + }; + + for (auto& layer : output_layers) { + output_property.layers.insert(std::make_pair(layer, output_tensor_info)); } ret = engine->SetOutputLayerProperty(output_property); @@ -279,13 +284,15 @@ TEST_P(InferenceEngineTfliteTest, Inference) ret = engine->Load(models, (inference_model_format_e) model_type); ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); - std::vector inputs, outputs; + IETensorBuffer inputs, outputs; ret = PrepareTensorBuffers(engine.get(), inputs, outputs); ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); // Copy input image tensor data from a given file to input tensor buffer. - for (int i = 0; i < (int) image_paths.size(); ++i) { - CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size); + ASSERT_EQ(image_paths.size(), inputs.size()); + int imageIndex = 0; + for (auto& input : inputs) { + CopyFileToMemory(image_paths[imageIndex++].c_str(), input.second, input.second.size); } for (int repeat = 0; repeat < iteration; ++repeat) { @@ -439,26 +446,34 @@ TEST_P(InferenceEngineTfliteCLTunerTest, Inference) int model_type = GetModelInfo(model_paths, models); ASSERT_NE(model_type, -1); - std::vector::iterator iter; + inference_engine_layer_property input_property; - inference_engine_tensor_info tensor_info = { + inference_engine_tensor_info input_tensor_info = { { 1, ch, height, width }, INFERENCE_TENSOR_SHAPE_NCHW, static_cast(tensor_type), static_cast(1 * ch * height * width) }; - inference_engine_layer_property input_property; - - for (auto &input : input_layers) { - input_property.layer_names.push_back(input); - input_property.tensor_infos.push_back(tensor_info); + for (auto& input : input_layers) { + input_property.layers.insert(std::make_pair(input, input_tensor_info)); } ret = engine->SetInputLayerProperty(input_property); ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); - inference_engine_layer_property output_property = { output_layers, {} }; + inference_engine_layer_property output_property; + + inference_engine_tensor_info output_tensor_info = { + std::vector{1}, + INFERENCE_TENSOR_SHAPE_NCHW, + INFERENCE_TENSOR_DATA_TYPE_FLOAT32, + 1 + }; + + for (auto& layer : output_layers) { + output_property.layers.insert(std::make_pair(layer, output_tensor_info)); + } ret = engine->SetOutputLayerProperty(output_property); ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); @@ -466,13 +481,15 @@ TEST_P(InferenceEngineTfliteCLTunerTest, Inference) ret = engine->Load(models, (inference_model_format_e) model_type); ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); - std::vector inputs, outputs; + IETensorBuffer inputs, outputs; ret = PrepareTensorBuffers(engine.get(), inputs, outputs); ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); // Copy input image tensor data from a given file to input tensor buffer. - for (int i = 0; i < (int) image_paths.size(); ++i) { - CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size); + ASSERT_EQ(image_paths.size(), inputs.size()); + int imageIndex = 0; + for (auto& input : inputs) { + CopyFileToMemory(image_paths[imageIndex++].c_str(), input.second, input.second.size); } for (int repeat = 0; repeat < iteration; ++repeat) { @@ -601,18 +618,16 @@ TEST_P(InferenceEngineCaffeTest, Inference) ASSERT_NE(model_type, -1); inference_engine_layer_property input_property; - std::vector::iterator iter; - - for (iter = input_layers.begin(); iter != input_layers.end(); iter++) { - inference_engine_tensor_info tensor_info = { - { 1, ch, height, width }, - (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW, - (inference_tensor_data_type_e) tensor_type, - (size_t)(1 * ch * height * width) - }; - - input_property.layer_names.push_back(*iter); - input_property.tensor_infos.push_back(tensor_info); + + inference_engine_tensor_info input_tensor_info = { + { 1, ch, height, width }, + INFERENCE_TENSOR_SHAPE_NCHW, + (inference_tensor_data_type_e) tensor_type, + (size_t)(1 * ch * height * width) + }; + + for (auto& layer : input_layers) { + input_property.layers.insert(std::make_pair(layer, input_tensor_info)); } ret = engine->SetInputLayerProperty(input_property); @@ -620,8 +635,15 @@ TEST_P(InferenceEngineCaffeTest, Inference) inference_engine_layer_property output_property; - for (iter = output_layers.begin(); iter != output_layers.end(); iter++) { - output_property.layer_names.push_back(*iter); + inference_engine_tensor_info output_tensor_info = { + std::vector{1}, + INFERENCE_TENSOR_SHAPE_NCHW, + INFERENCE_TENSOR_DATA_TYPE_FLOAT32, + 1 + }; + + for (auto& layer : output_layers) { + output_property.layers.insert(std::make_pair(layer, output_tensor_info)); } ret = engine->SetOutputLayerProperty(output_property); @@ -630,13 +652,15 @@ TEST_P(InferenceEngineCaffeTest, Inference) ret = engine->Load(models, (inference_model_format_e) model_type); ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); - std::vector inputs, outputs; + IETensorBuffer inputs, outputs; ret = PrepareTensorBuffers(engine.get(), inputs, outputs); ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); // Copy input image tensor data from a given file to input tensor buffer. - for (int i = 0; i < (int) image_paths.size(); ++i) { - CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size); + ASSERT_EQ(image_paths.size(), inputs.size()); + int imageIndex = 0; + for (auto& input : inputs) { + CopyFileToMemory(image_paths[imageIndex++].c_str(), input.second, input.second.size); } for (int repeat = 0; repeat < iteration; ++repeat) { @@ -758,18 +782,16 @@ TEST_P(InferenceEngineDldtTest, Inference) ASSERT_NE(model_type, -1); inference_engine_layer_property input_property; - std::vector::iterator iter; - - for (iter = input_layers.begin(); iter != input_layers.end(); iter++) { - inference_engine_tensor_info tensor_info = { - { 1, ch, height, width }, - (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW, - (inference_tensor_data_type_e) tensor_type, - (size_t)(1 * ch * height * width) - }; - - input_property.layer_names.push_back(*iter); - input_property.tensor_infos.push_back(tensor_info); + + inference_engine_tensor_info input_tensor_info = { + { 1, ch, height, width }, + INFERENCE_TENSOR_SHAPE_NCHW, + (inference_tensor_data_type_e) tensor_type, + (size_t)(1 * ch * height * width) + }; + + for (auto& layer : input_layers) { + input_property.layers.insert(std::make_pair(layer, input_tensor_info)); } ret = engine->SetInputLayerProperty(input_property); @@ -777,8 +799,15 @@ TEST_P(InferenceEngineDldtTest, Inference) inference_engine_layer_property output_property; - for (iter = output_layers.begin(); iter != output_layers.end(); iter++) { - output_property.layer_names.push_back(*iter); + inference_engine_tensor_info output_tensor_info = { + { 1, ch, height, width }, + INFERENCE_TENSOR_SHAPE_NCHW, + (inference_tensor_data_type_e) tensor_type, + (size_t)(1 * ch * height * width) + }; + + for (auto& layer : output_layers) { + output_property.layers.insert(std::make_pair(layer, output_tensor_info)); } ret = engine->SetOutputLayerProperty(output_property); @@ -787,13 +816,15 @@ TEST_P(InferenceEngineDldtTest, Inference) ret = engine->Load(models, (inference_model_format_e) model_type); ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); - std::vector inputs, outputs; + IETensorBuffer inputs, outputs; ret = PrepareTensorBuffers(engine.get(), inputs, outputs); ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); // Copy input image tensor data from a given file to input tensor buffer. - for (int i = 0; i < (int) image_paths.size(); ++i) { - CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size); + ASSERT_EQ(image_paths.size(), inputs.size()); + int imageIndex = 0; + for (auto& input : inputs) { + CopyFileToMemory(image_paths[imageIndex++].c_str(), input.second, input.second.size); } for (int repeat = 0; repeat < iteration; ++repeat) { diff --git a/test/src/inference_engine_tc.cpp b/test/src/inference_engine_tc.cpp index 88f75d1..6a430b7 100644 --- a/test/src/inference_engine_tc.cpp +++ b/test/src/inference_engine_tc.cpp @@ -321,9 +321,8 @@ TEST_P(InferenceEngineTestCase_G4, SetInputLayer_P) ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); inference_engine_layer_property input_property; - std::vector::iterator iter; - for (iter = input_layers.begin(); iter != input_layers.end(); iter++) { + for (auto& layer : input_layers) { inference_engine_tensor_info tensor_info = { { 1, ch, height, width }, (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW, @@ -331,8 +330,7 @@ TEST_P(InferenceEngineTestCase_G4, SetInputLayer_P) (size_t)(1 * ch * height * width) }; - input_property.layer_names.push_back(*iter); - input_property.tensor_infos.push_back(tensor_info); + input_property.layers.insert(std::make_pair(layer, tensor_info)); } ret = engine->SetInputLayerProperty(input_property); EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); @@ -359,9 +357,8 @@ TEST_P(InferenceEngineTestCase_G5, SetInputLayer_N1) ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); inference_engine_layer_property output_property; - std::vector::iterator iter; - for (iter = output_layers.begin(); iter != output_layers.end(); iter++) { + for (auto& layer : output_layers) { inference_engine_tensor_info tensor_info = { { 1, ch, height, width }, (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW, @@ -369,8 +366,7 @@ TEST_P(InferenceEngineTestCase_G5, SetInputLayer_N1) (size_t)(1 * ch * height * width) }; - output_property.layer_names.push_back(*iter); - output_property.tensor_infos.push_back(tensor_info); + output_property.layers.insert(std::make_pair(layer, tensor_info)); } ret = engine->SetInputLayerProperty(output_property); EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PARAMETER); @@ -417,9 +413,8 @@ TEST_P(InferenceEngineTestCase_G4, SetOutputLayer_P) ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); inference_engine_layer_property output_property; - std::vector::iterator iter; - for (iter = output_layers.begin(); iter != output_layers.end(); iter++) { + for (auto& layer : output_layers) { inference_engine_tensor_info tensor_info = { { 1, ch, height, width }, (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW, @@ -427,8 +422,7 @@ TEST_P(InferenceEngineTestCase_G4, SetOutputLayer_P) (size_t)(1 * ch * height * width) }; - output_property.layer_names.push_back(*iter); - output_property.tensor_infos.push_back(tensor_info); + output_property.layers.insert(std::make_pair(layer, tensor_info)); } ret = engine->SetOutputLayerProperty(output_property); EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); @@ -455,9 +449,8 @@ TEST_P(InferenceEngineTestCase_G5, SetOutputLayer_N1) ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); inference_engine_layer_property output_property; - std::vector::iterator iter; - for (iter = output_layers.begin(); iter != output_layers.end(); iter++) { + for (auto& layer : output_layers) { inference_engine_tensor_info tensor_info = { { 1, ch, height, width }, (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW, @@ -465,8 +458,7 @@ TEST_P(InferenceEngineTestCase_G5, SetOutputLayer_N1) (size_t)(1 * ch * height * width) }; - output_property.layer_names.push_back(*iter); - output_property.tensor_infos.push_back(tensor_info); + output_property.layers.insert(std::make_pair(layer, tensor_info)); } ret = engine->SetOutputLayerProperty(output_property); EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PARAMETER); @@ -571,18 +563,16 @@ TEST_P(InferenceEngineTestCase_G6, Inference_P) ASSERT_NE(model_type, -1); inference_engine_layer_property input_property; - std::vector::iterator iter; - for (iter = input_layers.begin(); iter != input_layers.end(); iter++) { - inference_engine_tensor_info tensor_info = { - { 1, ch, height, width }, - (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW, - (inference_tensor_data_type_e) tensor_type, - (size_t)(1 * ch * height * width) - }; + inference_engine_tensor_info input_tensor_info = { + { 1, ch, height, width }, + (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW, + (inference_tensor_data_type_e) tensor_type, + (size_t)(1 * ch * height * width) + }; - input_property.layer_names.push_back(*iter); - input_property.tensor_infos.push_back(tensor_info); + for (auto& layer : input_layers) { + input_property.layers.insert(std::make_pair(layer, input_tensor_info)); } ret = engine->SetInputLayerProperty(input_property); @@ -590,8 +580,12 @@ TEST_P(InferenceEngineTestCase_G6, Inference_P) inference_engine_layer_property output_property; - for (iter = output_layers.begin(); iter != output_layers.end(); iter++) { - output_property.layer_names.push_back(*iter); + inference_engine_tensor_info output_tensor_info = { std::vector{1}, + INFERENCE_TENSOR_SHAPE_NCHW, + INFERENCE_TENSOR_DATA_TYPE_FLOAT32, + 1}; + for (auto& layer : output_layers) { + output_property.layers.insert(std::make_pair(layer, output_tensor_info)); } ret = engine->SetOutputLayerProperty(output_property); @@ -600,13 +594,15 @@ TEST_P(InferenceEngineTestCase_G6, Inference_P) ret = engine->Load(models, (inference_model_format_e) model_type); ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); - std::vector inputs, outputs; + IETensorBuffer inputs, outputs; ret = PrepareTensorBuffers(engine.get(), inputs, outputs); ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); // Copy input image tensor data from a given file to input tensor buffer. - for (int i = 0; i < (int) image_paths.size(); ++i) { - CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size); + ASSERT_EQ(image_paths.size(), inputs.size()); + int imageIndex = 0; + for (auto& input : inputs) { + CopyFileToMemory(image_paths[imageIndex++].c_str(), input.second, input.second.size); } for (int repeat = 0; repeat < iteration; ++repeat) { diff --git a/test/src/inference_engine_test_common.cpp b/test/src/inference_engine_test_common.cpp index de97a74..3033bc6 100644 --- a/test/src/inference_engine_test_common.cpp +++ b/test/src/inference_engine_test_common.cpp @@ -142,8 +142,8 @@ std::string GetModelString(const int model_type) } int PrepareTensorBuffers(InferenceEngineCommon *engine, - std::vector &inputs, - std::vector &outputs) + IETensorBuffer &inputs, + IETensorBuffer &outputs) { int ret = engine->GetInputTensorBuffers(inputs); EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); @@ -154,13 +154,13 @@ int PrepareTensorBuffers(InferenceEngineCommon *engine, EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); // If backend is OpenCV then the buffers will be allocated out of this function. - if (input_property.tensor_infos.empty()) { + if (input_property.layers.empty()) { return INFERENCE_ENGINE_ERROR_NONE; } - for (int i = 0; i < (int) input_property.tensor_infos.size(); ++i) { - inference_engine_tensor_info tensor_info = - input_property.tensor_infos[i]; + for (auto iter = input_property.layers.begin(); iter != input_property.layers.end(); ++iter) { + inference_engine_tensor_info tensor_info = iter->second; + inference_engine_tensor_buffer tensor_buffer; if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) { tensor_buffer.buffer = (void *) (new float[tensor_info.size]); @@ -175,7 +175,7 @@ int PrepareTensorBuffers(InferenceEngineCommon *engine, EXPECT_TRUE(tensor_buffer.buffer); tensor_buffer.owner_is_backend = 0; tensor_buffer.data_type = tensor_info.data_type; - inputs.push_back(tensor_buffer); + inputs.insert(std::make_pair(iter->first, tensor_buffer)); } } @@ -188,13 +188,13 @@ int PrepareTensorBuffers(InferenceEngineCommon *engine, EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); // If backend is OpenCV then the buffers will be allocated out of this function. - if (output_property.tensor_infos.empty()) { + if (output_property.layers.empty()) { return INFERENCE_ENGINE_ERROR_NONE; } - for (int i = 0; i < (int) output_property.tensor_infos.size(); ++i) { - inference_engine_tensor_info tensor_info = - output_property.tensor_infos[i]; + for (auto iter = output_property.layers.begin(); iter != output_property.layers.end(); ++iter) { + inference_engine_tensor_info tensor_info = iter->second; + inference_engine_tensor_buffer tensor_buffer; if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) { tensor_buffer.buffer = (void *) (new float[tensor_info.size]); @@ -215,20 +215,19 @@ int PrepareTensorBuffers(InferenceEngineCommon *engine, EXPECT_TRUE(tensor_buffer.buffer); tensor_buffer.owner_is_backend = 0; tensor_buffer.data_type = tensor_info.data_type; - outputs.push_back(tensor_buffer); + outputs.insert(std::make_pair(iter->first, tensor_buffer)); } } return INFERENCE_ENGINE_ERROR_NONE; } -void CleanupTensorBuffers(std::vector &inputs, - std::vector &outputs) +void CleanupTensorBuffers(IETensorBuffer &inputs, + IETensorBuffer &outputs) { if (!inputs.empty()) { - std::vector::iterator iter; - for (iter = inputs.begin(); iter != inputs.end(); iter++) { - inference_engine_tensor_buffer tensor_buffer = *iter; + for (auto iter = inputs.begin(); iter != inputs.end(); iter++) { + inference_engine_tensor_buffer tensor_buffer = iter->second; // If tensor buffer owner is a backend then skip to release the tensor buffer. // This tensor buffer will be released by the backend. @@ -241,13 +240,12 @@ void CleanupTensorBuffers(std::vector &inputs, else delete[](unsigned char *) tensor_buffer.buffer; } - std::vector().swap(inputs); + IETensorBuffer().swap(inputs); } if (!outputs.empty()) { - std::vector::iterator iter; - for (iter = outputs.begin(); iter != outputs.end(); iter++) { - inference_engine_tensor_buffer tensor_buffer = *iter; + for (auto iter = outputs.begin(); iter != outputs.end(); iter++) { + inference_engine_tensor_buffer tensor_buffer = iter->second; // If tensor buffer owner is a backend then skip to release the tensor buffer. // This tensor buffer will be released by the backend. @@ -260,7 +258,7 @@ void CleanupTensorBuffers(std::vector &inputs, else delete[](unsigned char *) tensor_buffer.buffer; } - std::vector().swap(outputs); + IETensorBuffer().swap(outputs); } } @@ -284,26 +282,27 @@ void CopyFileToMemory(const char *file_name, } void FillOutputResult(InferenceEngineCommon *engine, - std::vector &outputs, + IETensorBuffer &outputs, tensor_t &outputData) { inference_engine_layer_property property; int ret = engine->GetOutputLayerProperty(property); EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); - for (int i = 0; i < (int) property.tensor_infos.size(); ++i) { - inference_engine_tensor_info tensor_info = property.tensor_infos[i]; + for (auto& layer : property.layers) { + const inference_engine_tensor_info& tensor_info = layer.second; std::vector tmpDimInfo; - for (int i = 0; i < (int) tensor_info.shape.size(); i++) { - tmpDimInfo.push_back(tensor_info.shape[i]); + for (auto& dim : tensor_info.shape) { + tmpDimInfo.push_back(dim); } outputData.dimInfo.push_back(tmpDimInfo); // Normalize output tensor data converting it to float type in case of quantized model. if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) { - unsigned char *ori_buf = (unsigned char *) outputs[i].buffer; + auto *ori_buf = static_cast(outputs[layer.first].buffer); + float *new_buf = new float[tensor_info.size]; ASSERT_TRUE(new_buf); @@ -312,13 +311,13 @@ void FillOutputResult(InferenceEngineCommon *engine, } // replace original buffer with new one, and release origin one. - outputs[i].buffer = new_buf; - if (!outputs[i].owner_is_backend) { + outputs[layer.first].buffer = new_buf; + if (!outputs[layer.first].owner_is_backend) { delete[] ori_buf; } } - outputData.data.push_back((void *) outputs[i].buffer); + outputData.data.push_back(static_cast(outputs[layer.first].buffer)); } } @@ -494,28 +493,29 @@ int VerifyPoseEstimationResults(tensor_t &outputData, std::vector &answers, return ret; } -int VerifyAICHandGesture1Results(std::vector &output) +int VerifyAICHandGesture1Results(IETensorBuffer &output) { // ### output[0] ### // output name : "mobilenetv2/boundingbox" // data type : int64 // tensor shape : 1 * 56 * 56 + std::string outputNamebbox("mobilenetv2/boundingbox"); std::ifstream fin("/opt/usr/images/boundingbox.answer", std::ios_base::in | std::ios_base::binary); - char *o_buffer = new (std::nothrow) char[output[0].size]; + char *o_buffer = new (std::nothrow) char[output[outputNamebbox].size]; if (!o_buffer) { std::cout << "failed to alloc o_buffer." << std::endl; return 0; } - fin.read(o_buffer, output[0].size); + fin.read(o_buffer, output[outputNamebbox].size); fin.close(); const int *f_answer = (const int *)o_buffer; - const unsigned int output_size = output[0].size / 8; + const unsigned int output_size = output[outputNamebbox].size / 8; for (unsigned int i = 0; i < output_size; ++i) { - if (static_cast(output[0].buffer)[i] != f_answer[i]) { + if (static_cast(output[outputNamebbox].buffer)[i] != f_answer[i]) { std::cout << "boundingbox wrong answer at index[" << i << "]" << std::endl; - std::cout << static_cast(output[0].buffer)[i] << " vs " << f_answer[i] << std::endl; + std::cout << static_cast(output[outputNamebbox].buffer)[i] << " vs " << f_answer[i] << std::endl; delete[] o_buffer; return 0; } @@ -527,21 +527,22 @@ int VerifyAICHandGesture1Results(std::vector &ou // output name : "mobilenetv2/heatmap" // data type : float // tensor shape : 1 * 56 * 56 *21 + std::string outputNameHeatMap("mobilenetv2/heatmap"); std::ifstream fin_2("/opt/usr/images/heatmap.answer", std::ios_base::in | std::ios_base::binary); - char *o_buffer_2 = new (std::nothrow) char[output[1].size]; + char *o_buffer_2 = new (std::nothrow) char[output[outputNameHeatMap].size]; if (!o_buffer_2) { std::cout << "failed to alloc o_buffer_2." << std::endl; return 0; } - fin_2.read(o_buffer_2, output[1].size); + fin_2.read(o_buffer_2, output[outputNameHeatMap].size); fin_2.close(); const float *f_answer_2 = (const float *)o_buffer_2; - const unsigned int output_size_2 = output[1].size / 8; + const unsigned int output_size_2 = output[outputNameHeatMap].size / 8; const int margin = 2; for (unsigned int i = 0; i < output_size_2; ++i) { - const int value_left = static_cast((static_cast(output[1].buffer)[i])); + const int value_left = static_cast((static_cast(output[outputNameHeatMap].buffer)[i])); const int value_right = static_cast(f_answer_2[i]); int diff = value_left - value_right; diff = diff < 0 ? diff * -1 : diff; @@ -558,16 +559,17 @@ int VerifyAICHandGesture1Results(std::vector &ou return 1; } -int VerifyAICHandGesture2Results(std::vector &output, +int VerifyAICHandGesture2Results(IETensorBuffer &output, std::vector &answers) { // ### output[0] ### // output name : "mobilenetv2/coord_refine" // data type : float // tensor shape : 1 * 21 * 2 - unsigned int size = output[0].size / 4; + std::string outputNameCoord("mobilenetv2/coord_refine"); + unsigned int size = output[outputNameCoord].size / 4; for (unsigned int i = 0; i < size; ++i) { - unsigned int value = static_cast(static_cast(output[0].buffer)[i] * 100.0f); + unsigned int value = static_cast(static_cast(output[outputNameCoord].buffer)[i] * 100.0f); if (value != static_cast(answers[i])) { std::cout << "coord_refine wrong answer at index[" << i << "]" << std::endl; std::cout << value << " vs " << answers[i] << std::endl; @@ -579,7 +581,8 @@ int VerifyAICHandGesture2Results(std::vector &ou // output name : "mobilenetv2/gesture" // data type : int64 // tensor shape : 1 * 1 * 1 - unsigned int value = static_cast(static_cast(output[1].buffer)[0]); + std::string outputNameGesture("mobilenetv2/gesture"); + unsigned int value = static_cast(static_cast(output[outputNameGesture].buffer)[0]); if (value != static_cast(answers[answers.size() - 1])) { std::cout << "gesture wrong answer at index[0]" << std::endl; std::cout << value << " vs " << answers[0] << std::endl; diff --git a/test/src/inference_engine_test_common.h b/test/src/inference_engine_test_common.h index 8bc133d..3904a22 100644 --- a/test/src/inference_engine_test_common.h +++ b/test/src/inference_engine_test_common.h @@ -65,18 +65,18 @@ int GetModelInfo(std::vector &model_paths, std::string GetModelString(const int model_type); int PrepareTensorBuffers(InferenceEngineCommon *engine, - std::vector &inputs, - std::vector &outputs); + IETensorBuffer &inputs, + IETensorBuffer &outputs); -void CleanupTensorBuffers(std::vector &inputs, - std::vector &outputs); +void CleanupTensorBuffers(IETensorBuffer &inputs, + IETensorBuffer &outputs); void CopyFileToMemory(const char *file_name, inference_engine_tensor_buffer &buffer, unsigned int size); void FillOutputResult(InferenceEngineCommon *engine, - std::vector &outputs, + IETensorBuffer &outputs, tensor_t &outputData); int VerifyImageClassificationResults(tensor_t &outputData, int answer); @@ -92,7 +92,7 @@ int VerifyFacialLandmarkDetectionResults(tensor_t &outputData, int VerifyPoseEstimationResults(tensor_t &outputData, std::vector &answers, int height, int width); -int VerifyAICHandGesture1Results(std::vector &output); +int VerifyAICHandGesture1Results(IETensorBuffer &output); -int VerifyAICHandGesture2Results(std::vector &output, +int VerifyAICHandGesture2Results(IETensorBuffer &output, std::vector &answers); -- 2.7.4