and change vector<inference_engine_tensor_buffer> to map<string, inference_engine_tensor_buffer>
This patch is to use inference_engine_tensor_info and inference_engine_tensor_buffer
based on input/output layers' name.
Change-Id: I18d3e7ae80a8c2a1e6236938571b8f22b12b2e1e
Signed-off-by: Tae-Young Chung <ty83.chung@samsung.com>
#ifndef __INFERENCE_ENGINE_COMMON_H__
#define __INFERENCE_ENGINE_COMMON_H__
+#include <map>
#include <vector>
#include <string>
#include "inference_engine_type.h"
+using IETensorBuffer = std::map<std::string, inference_engine_tensor_buffer>;
namespace InferenceEngineInterface
{
namespace Common
* @param[out] buffers A backend engine should add input tensor buffers allocated itself to buffers vector.
* Otherwise, it should put buffers to be empty.
*/
- virtual int GetInputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers) = 0;
+ virtual int GetInputTensorBuffers(IETensorBuffer &buffers) = 0;
/**
* @brief Get output tensor buffers from a given backend engine.
* @param[out] buffers A backend engine should add output tensor buffers allocated itself to buffers vector.
* Otherwise, it should put buffers to be empty.
*/
- virtual int GetOutputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers) = 0;
+ virtual int GetOutputTensorBuffers(IETensorBuffer &buffers) = 0;
/**
* @brief Get input layer property information from a given backend engine.
* @param[in] output_buffers It contains tensor buffers to be used as output layer.
*/
virtual int
- Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers) = 0;
+ Run(IETensorBuffer &input_buffers, IETensorBuffer &output_buffers) = 0;
};
typedef void destroy_t(IInferenceEngineCommon *);
#ifndef __INFERENCE_ENGINE_COMMON_IMPL_H__
#define __INFERENCE_ENGINE_COMMON_IMPL_H__
+#include <map>
#include <vector>
#include <string>
* @param[out] buffers A backend engine should add input tensor buffers allocated itself to buffers vector.
* Otherwise, it should put buffers to be empty.
*/
- int GetInputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers);
+ int GetInputTensorBuffers(IETensorBuffer &buffers);
/**
* @brief Get output tensor buffers from a given backend engine.
* @param[out] buffers A backend engine should add output tensor buffers allocated itself to buffers vector.
* Otherwise, it should put buffers to be empty.
*/
- int GetOutputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers);
+ int GetOutputTensorBuffers(IETensorBuffer &buffers);
/**
* @brief Get input layer property information from a given backend engine.
* @param[in] input_buffers It contains tensor buffers to be used as input layer.
* @param[in] output_buffers It contains tensor buffers to be used as output layer.
*/
- int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers);
+ int Run(IETensorBuffer &input_buffers, IETensorBuffer &output_buffers);
/**
* @brief Enable or disable Inference engine profiler.
int GetNpuBackendType(dictionary *dict, const char *section_name);
int InitBackendEngine(const std::string &backend_path,
int backend_type, int device_type);
- int CheckTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers);
+ int CheckTensorBuffers(IETensorBuffer &buffers);
int CheckLayerProperty(inference_engine_layer_property &property);
inference_backend_type_e mSelectedBackendEngine;
#ifndef __INFERENCE_ENGINE_TYPE_H__
#define __INFERENCE_ENGINE_TYPE_H__
+#include <map>
+#include <vector>
+
#ifdef __cplusplus
extern "C"
{
* @since_tizen 6.0
*/
typedef struct _inference_engine_layer_property {
- std::vector<std::string> layer_names; /**< names of layers. */
- std::vector<inference_engine_tensor_info> tensor_infos; /**< information of tensors. */
+ std::map<std::string, inference_engine_tensor_info> layers;
// TODO.
} inference_engine_layer_property;
Name: inference-engine-interface
Summary: Interface of inference engines
Version: 0.0.2
-Release: 13
+Release: 14
Group: Multimedia/Framework
License: Apache-2.0
Source0: %{name}-%{version}.tar.gz
return ret;
}
- int InferenceEngineCommon::CheckTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers)
+ int InferenceEngineCommon::CheckTensorBuffers(IETensorBuffer &buffers)
{
- if (buffers.size() == 0) {
+ if (buffers.empty()) {
LOGE("tensor buffer vector is empty.");
return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
- for (std::vector<inference_engine_tensor_buffer>::const_iterator iter =
- buffers.begin();
- iter != buffers.end(); ++iter) {
- inference_engine_tensor_buffer tensor_buffer = *iter;
+ for (auto& buffer : buffers) {
+ const inference_engine_tensor_buffer& tensor_buffer = buffer.second;
if (tensor_buffer.buffer == nullptr || tensor_buffer.size == 0) {
LOGE("tensor buffer pointer is null or tensor buffer size is 0.");
return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
inference_engine_layer_property &property)
{
// Verity tensor info values.
- std::vector<inference_engine_tensor_info>::const_iterator info_iter;
- for (info_iter = property.tensor_infos.begin();
- info_iter != property.tensor_infos.end(); ++info_iter) {
- inference_engine_tensor_info tensor_info = *info_iter;
- if (tensor_info.shape.size() == 0 || tensor_info.size == 0) {
+ for (auto& layer : property.layers) {
+ const std::string& name = layer.first;
+
+ if (name.empty()) {
+ LOGE("layer name is invalid.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ const inference_engine_tensor_info& tensor_info = layer.second;
+ if (tensor_info.shape.empty() || tensor_info.size == 0) {
LOGE("shape size of tensor info or size of it is 0.");
return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
// TODO. we may need to check shape type also.
}
- // Verity layer names.
- std::vector<std::string>::const_iterator name_iter;
- for (name_iter = property.layer_names.begin();
- name_iter != property.layer_names.end(); ++name_iter) {
- std::string name = *name_iter;
-
- if (name.length() == 0) {
- LOGE("layer name is invalid.");
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
- }
- }
-
return INFERENCE_ENGINE_ERROR_NONE;
}
BackendTable.insert(std::make_pair("mlapi",INFERENCE_BACKEND_MLAPI));
BackendTable.insert(std::make_pair("one",INFERENCE_BACKEND_ONE));
- config->backend_type = BackendTable.find(config->backend_name)->second;
+ config->backend_type = BackendTable[config->backend_name];
}
std::string backendLibName;
return ret;
}
- int InferenceEngineCommon::GetInputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers)
+ int InferenceEngineCommon::GetInputTensorBuffers(IETensorBuffer &buffers)
{
CHECK_ENGINE_INSTANCE(mBackendHandle);
// If backend engine doesn't provide tensor buffers then just return.
// In this case, InferenceEngineCommon framework will allocate the tensor buffers.
- if (buffers.size() == 0) {
+ if (buffers.empty()) {
return ret;
}
return CheckTensorBuffers(buffers);
}
- int InferenceEngineCommon::GetOutputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers)
+ int InferenceEngineCommon::GetOutputTensorBuffers(IETensorBuffer &buffers)
{
CHECK_ENGINE_INSTANCE(mBackendHandle);
// If backend engine doesn't provide tensor buffers then just return.
// In this case, InferenceEngineCommon framework will allocate the tensor buffers.
- if (buffers.size() == 0) {
+ if (buffers.empty()) {
return ret;
}
// If backend engine doesn't provide input layer property information then just return.
// In this case, user has to provide the information manually.
- if (property.layer_names.size() == 0 &&
- property.tensor_infos.size() == 0) {
+ if (property.layers.empty()) {
LOGI("backend doesn't provide input layer property.");
return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
// If backend engine doesn't provide output layer property information then just return.
// In this case, user has to provide the information manually.
- if (property.layer_names.size() == 0 &&
- property.tensor_infos.size() == 0) {
+ if (property.layers.empty()) {
LOGI("backend doesn't provide output layer property.");
return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
{
CHECK_ENGINE_INSTANCE(mBackendHandle);
- if (property.layer_names.empty() || property.tensor_infos.empty()) {
- LOGE("layer_names or tensor_infos vector of a given property is empty.");
+ if (property.layers.empty()) {
+ LOGE("property is empty.");
return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
{
CHECK_ENGINE_INSTANCE(mBackendHandle);
- if (property.layer_names.empty()) {
- LOGE("layer_names vector of a given property is empty.");
+ if (property.layers.empty()) {
+ LOGE("property is empty.");
return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
return mBackendHandle->GetBackendCapacity(capacity);
}
- int InferenceEngineCommon::Run(
- std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers)
+ int InferenceEngineCommon::Run(IETensorBuffer &input_buffers,
+ IETensorBuffer &output_buffers)
{
CHECK_ENGINE_INSTANCE(mBackendHandle);
ASSERT_NE(model_type, -1);
inference_engine_layer_property input_property;
- std::vector<std::string>::iterator iter;
-
- for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
- inference_engine_tensor_info tensor_info = {
- { 1, ch, height, width },
- (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
- (inference_tensor_data_type_e) tensor_type,
- (size_t)(1 * ch * height * width)
- };
-
- input_property.layer_names.push_back(*iter);
- input_property.tensor_infos.push_back(tensor_info);
+
+ inference_engine_tensor_info input_tensor_info = {
+ { 1, ch, height, width },
+ (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
+ (inference_tensor_data_type_e) tensor_type,
+ (size_t)(1 * ch * height * width)
+ };
+
+ for (auto& layer : input_layers) {
+ input_property.layers.insert(std::make_pair(layer, input_tensor_info));
}
ret = engine->SetInputLayerProperty(input_property);
inference_engine_layer_property output_property;
- for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
- output_property.layer_names.push_back(*iter);
+ inference_engine_tensor_info output_tensor_info = {
+ std::vector<size_t>{1},
+ INFERENCE_TENSOR_SHAPE_NCHW,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ 1
+ };
+
+ for (auto& layer : output_layers) {
+ output_property.layers.insert(std::make_pair(layer, output_tensor_info));
}
ret = engine->SetOutputLayerProperty(output_property);
ret = engine->Load(models, (inference_model_format_e) model_type);
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- std::vector<inference_engine_tensor_buffer> inputs, outputs;
+ IETensorBuffer inputs, outputs;
ret = PrepareTensorBuffers(engine.get(), inputs, outputs);
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
// Copy input image tensor data from a given file to input tensor buffer.
- for (int i = 0; i < (int) image_paths.size(); ++i) {
- CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
+ ASSERT_EQ(image_paths.size(), inputs.size());
+ int imageIndex = 0;
+ for (auto& input : inputs) {
+ CopyFileToMemory(image_paths[imageIndex++].c_str(), input.second, input.second.size);
}
for (int repeat = 0; repeat < iteration; ++repeat) {
int model_type = GetModelInfo(model_paths, models);
ASSERT_NE(model_type, -1);
- std::vector<std::string>::iterator iter;
+ inference_engine_layer_property input_property;
- inference_engine_tensor_info tensor_info = {
+ inference_engine_tensor_info input_tensor_info = {
{ 1, ch, height, width },
INFERENCE_TENSOR_SHAPE_NCHW,
static_cast<inference_tensor_data_type_e>(tensor_type),
static_cast<size_t>(1 * ch * height * width)
};
- inference_engine_layer_property input_property;
-
- for (auto &input : input_layers) {
- input_property.layer_names.push_back(input);
- input_property.tensor_infos.push_back(tensor_info);
+ for (auto& input : input_layers) {
+ input_property.layers.insert(std::make_pair(input, input_tensor_info));
}
ret = engine->SetInputLayerProperty(input_property);
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- inference_engine_layer_property output_property = { output_layers, {} };
+ inference_engine_layer_property output_property;
+
+ inference_engine_tensor_info output_tensor_info = {
+ std::vector<size_t>{1},
+ INFERENCE_TENSOR_SHAPE_NCHW,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ 1
+ };
+
+ for (auto& layer : output_layers) {
+ output_property.layers.insert(std::make_pair(layer, output_tensor_info));
+ }
ret = engine->SetOutputLayerProperty(output_property);
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
ret = engine->Load(models, (inference_model_format_e) model_type);
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- std::vector<inference_engine_tensor_buffer> inputs, outputs;
+ IETensorBuffer inputs, outputs;
ret = PrepareTensorBuffers(engine.get(), inputs, outputs);
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
// Copy input image tensor data from a given file to input tensor buffer.
- for (int i = 0; i < (int) image_paths.size(); ++i) {
- CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
+ ASSERT_EQ(image_paths.size(), inputs.size());
+ int imageIndex = 0;
+ for (auto& input : inputs) {
+ CopyFileToMemory(image_paths[imageIndex++].c_str(), input.second, input.second.size);
}
for (int repeat = 0; repeat < iteration; ++repeat) {
ASSERT_NE(model_type, -1);
inference_engine_layer_property input_property;
- std::vector<std::string>::iterator iter;
-
- for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
- inference_engine_tensor_info tensor_info = {
- { 1, ch, height, width },
- (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
- (inference_tensor_data_type_e) tensor_type,
- (size_t)(1 * ch * height * width)
- };
-
- input_property.layer_names.push_back(*iter);
- input_property.tensor_infos.push_back(tensor_info);
+
+ inference_engine_tensor_info input_tensor_info = {
+ { 1, ch, height, width },
+ INFERENCE_TENSOR_SHAPE_NCHW,
+ (inference_tensor_data_type_e) tensor_type,
+ (size_t)(1 * ch * height * width)
+ };
+
+ for (auto& layer : input_layers) {
+ input_property.layers.insert(std::make_pair(layer, input_tensor_info));
}
ret = engine->SetInputLayerProperty(input_property);
inference_engine_layer_property output_property;
- for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
- output_property.layer_names.push_back(*iter);
+ inference_engine_tensor_info output_tensor_info = {
+ std::vector<size_t>{1},
+ INFERENCE_TENSOR_SHAPE_NCHW,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ 1
+ };
+
+ for (auto& layer : output_layers) {
+ output_property.layers.insert(std::make_pair(layer, output_tensor_info));
}
ret = engine->SetOutputLayerProperty(output_property);
ret = engine->Load(models, (inference_model_format_e) model_type);
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- std::vector<inference_engine_tensor_buffer> inputs, outputs;
+ IETensorBuffer inputs, outputs;
ret = PrepareTensorBuffers(engine.get(), inputs, outputs);
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
// Copy input image tensor data from a given file to input tensor buffer.
- for (int i = 0; i < (int) image_paths.size(); ++i) {
- CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
+ ASSERT_EQ(image_paths.size(), inputs.size());
+ int imageIndex = 0;
+ for (auto& input : inputs) {
+ CopyFileToMemory(image_paths[imageIndex++].c_str(), input.second, input.second.size);
}
for (int repeat = 0; repeat < iteration; ++repeat) {
ASSERT_NE(model_type, -1);
inference_engine_layer_property input_property;
- std::vector<std::string>::iterator iter;
-
- for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
- inference_engine_tensor_info tensor_info = {
- { 1, ch, height, width },
- (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
- (inference_tensor_data_type_e) tensor_type,
- (size_t)(1 * ch * height * width)
- };
-
- input_property.layer_names.push_back(*iter);
- input_property.tensor_infos.push_back(tensor_info);
+
+ inference_engine_tensor_info input_tensor_info = {
+ { 1, ch, height, width },
+ INFERENCE_TENSOR_SHAPE_NCHW,
+ (inference_tensor_data_type_e) tensor_type,
+ (size_t)(1 * ch * height * width)
+ };
+
+ for (auto& layer : input_layers) {
+ input_property.layers.insert(std::make_pair(layer, input_tensor_info));
}
ret = engine->SetInputLayerProperty(input_property);
inference_engine_layer_property output_property;
- for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
- output_property.layer_names.push_back(*iter);
+ inference_engine_tensor_info output_tensor_info = {
+ { 1, ch, height, width },
+ INFERENCE_TENSOR_SHAPE_NCHW,
+ (inference_tensor_data_type_e) tensor_type,
+ (size_t)(1 * ch * height * width)
+ };
+
+ for (auto& layer : output_layers) {
+ output_property.layers.insert(std::make_pair(layer, output_tensor_info));
}
ret = engine->SetOutputLayerProperty(output_property);
ret = engine->Load(models, (inference_model_format_e) model_type);
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- std::vector<inference_engine_tensor_buffer> inputs, outputs;
+ IETensorBuffer inputs, outputs;
ret = PrepareTensorBuffers(engine.get(), inputs, outputs);
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
// Copy input image tensor data from a given file to input tensor buffer.
- for (int i = 0; i < (int) image_paths.size(); ++i) {
- CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
+ ASSERT_EQ(image_paths.size(), inputs.size());
+ int imageIndex = 0;
+ for (auto& input : inputs) {
+ CopyFileToMemory(image_paths[imageIndex++].c_str(), input.second, input.second.size);
}
for (int repeat = 0; repeat < iteration; ++repeat) {
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
inference_engine_layer_property input_property;
- std::vector<std::string>::iterator iter;
- for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
+ for (auto& layer : input_layers) {
inference_engine_tensor_info tensor_info = {
{ 1, ch, height, width },
(inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
(size_t)(1 * ch * height * width)
};
- input_property.layer_names.push_back(*iter);
- input_property.tensor_infos.push_back(tensor_info);
+ input_property.layers.insert(std::make_pair(layer, tensor_info));
}
ret = engine->SetInputLayerProperty(input_property);
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
inference_engine_layer_property output_property;
- std::vector<std::string>::iterator iter;
- for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
+ for (auto& layer : output_layers) {
inference_engine_tensor_info tensor_info = {
{ 1, ch, height, width },
(inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
(size_t)(1 * ch * height * width)
};
- output_property.layer_names.push_back(*iter);
- output_property.tensor_infos.push_back(tensor_info);
+ output_property.layers.insert(std::make_pair(layer, tensor_info));
}
ret = engine->SetInputLayerProperty(output_property);
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PARAMETER);
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
inference_engine_layer_property output_property;
- std::vector<std::string>::iterator iter;
- for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
+ for (auto& layer : output_layers) {
inference_engine_tensor_info tensor_info = {
{ 1, ch, height, width },
(inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
(size_t)(1 * ch * height * width)
};
- output_property.layer_names.push_back(*iter);
- output_property.tensor_infos.push_back(tensor_info);
+ output_property.layers.insert(std::make_pair(layer, tensor_info));
}
ret = engine->SetOutputLayerProperty(output_property);
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
inference_engine_layer_property output_property;
- std::vector<std::string>::iterator iter;
- for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
+ for (auto& layer : output_layers) {
inference_engine_tensor_info tensor_info = {
{ 1, ch, height, width },
(inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
(size_t)(1 * ch * height * width)
};
- output_property.layer_names.push_back(*iter);
- output_property.tensor_infos.push_back(tensor_info);
+ output_property.layers.insert(std::make_pair(layer, tensor_info));
}
ret = engine->SetOutputLayerProperty(output_property);
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PARAMETER);
ASSERT_NE(model_type, -1);
inference_engine_layer_property input_property;
- std::vector<std::string>::iterator iter;
- for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
- inference_engine_tensor_info tensor_info = {
- { 1, ch, height, width },
- (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
- (inference_tensor_data_type_e) tensor_type,
- (size_t)(1 * ch * height * width)
- };
+ inference_engine_tensor_info input_tensor_info = {
+ { 1, ch, height, width },
+ (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
+ (inference_tensor_data_type_e) tensor_type,
+ (size_t)(1 * ch * height * width)
+ };
- input_property.layer_names.push_back(*iter);
- input_property.tensor_infos.push_back(tensor_info);
+ for (auto& layer : input_layers) {
+ input_property.layers.insert(std::make_pair(layer, input_tensor_info));
}
ret = engine->SetInputLayerProperty(input_property);
inference_engine_layer_property output_property;
- for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
- output_property.layer_names.push_back(*iter);
+ inference_engine_tensor_info output_tensor_info = { std::vector<size_t>{1},
+ INFERENCE_TENSOR_SHAPE_NCHW,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ 1};
+ for (auto& layer : output_layers) {
+ output_property.layers.insert(std::make_pair(layer, output_tensor_info));
}
ret = engine->SetOutputLayerProperty(output_property);
ret = engine->Load(models, (inference_model_format_e) model_type);
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- std::vector<inference_engine_tensor_buffer> inputs, outputs;
+ IETensorBuffer inputs, outputs;
ret = PrepareTensorBuffers(engine.get(), inputs, outputs);
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
// Copy input image tensor data from a given file to input tensor buffer.
- for (int i = 0; i < (int) image_paths.size(); ++i) {
- CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
+ ASSERT_EQ(image_paths.size(), inputs.size());
+ int imageIndex = 0;
+ for (auto& input : inputs) {
+ CopyFileToMemory(image_paths[imageIndex++].c_str(), input.second, input.second.size);
}
for (int repeat = 0; repeat < iteration; ++repeat) {
}
int PrepareTensorBuffers(InferenceEngineCommon *engine,
- std::vector<inference_engine_tensor_buffer> &inputs,
- std::vector<inference_engine_tensor_buffer> &outputs)
+ IETensorBuffer &inputs,
+ IETensorBuffer &outputs)
{
int ret = engine->GetInputTensorBuffers(inputs);
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
// If backend is OpenCV then the buffers will be allocated out of this function.
- if (input_property.tensor_infos.empty()) {
+ if (input_property.layers.empty()) {
return INFERENCE_ENGINE_ERROR_NONE;
}
- for (int i = 0; i < (int) input_property.tensor_infos.size(); ++i) {
- inference_engine_tensor_info tensor_info =
- input_property.tensor_infos[i];
+ for (auto iter = input_property.layers.begin(); iter != input_property.layers.end(); ++iter) {
+ inference_engine_tensor_info tensor_info = iter->second;
+
inference_engine_tensor_buffer tensor_buffer;
if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
tensor_buffer.buffer = (void *) (new float[tensor_info.size]);
EXPECT_TRUE(tensor_buffer.buffer);
tensor_buffer.owner_is_backend = 0;
tensor_buffer.data_type = tensor_info.data_type;
- inputs.push_back(tensor_buffer);
+ inputs.insert(std::make_pair(iter->first, tensor_buffer));
}
}
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
// If backend is OpenCV then the buffers will be allocated out of this function.
- if (output_property.tensor_infos.empty()) {
+ if (output_property.layers.empty()) {
return INFERENCE_ENGINE_ERROR_NONE;
}
- for (int i = 0; i < (int) output_property.tensor_infos.size(); ++i) {
- inference_engine_tensor_info tensor_info =
- output_property.tensor_infos[i];
+ for (auto iter = output_property.layers.begin(); iter != output_property.layers.end(); ++iter) {
+ inference_engine_tensor_info tensor_info = iter->second;
+
inference_engine_tensor_buffer tensor_buffer;
if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
tensor_buffer.buffer = (void *) (new float[tensor_info.size]);
EXPECT_TRUE(tensor_buffer.buffer);
tensor_buffer.owner_is_backend = 0;
tensor_buffer.data_type = tensor_info.data_type;
- outputs.push_back(tensor_buffer);
+ outputs.insert(std::make_pair(iter->first, tensor_buffer));
}
}
return INFERENCE_ENGINE_ERROR_NONE;
}
-void CleanupTensorBuffers(std::vector<inference_engine_tensor_buffer> &inputs,
- std::vector<inference_engine_tensor_buffer> &outputs)
+void CleanupTensorBuffers(IETensorBuffer &inputs,
+ IETensorBuffer &outputs)
{
if (!inputs.empty()) {
- std::vector<inference_engine_tensor_buffer>::iterator iter;
- for (iter = inputs.begin(); iter != inputs.end(); iter++) {
- inference_engine_tensor_buffer tensor_buffer = *iter;
+ for (auto iter = inputs.begin(); iter != inputs.end(); iter++) {
+ inference_engine_tensor_buffer tensor_buffer = iter->second;
// If tensor buffer owner is a backend then skip to release the tensor buffer.
// This tensor buffer will be released by the backend.
else
delete[](unsigned char *) tensor_buffer.buffer;
}
- std::vector<inference_engine_tensor_buffer>().swap(inputs);
+ IETensorBuffer().swap(inputs);
}
if (!outputs.empty()) {
- std::vector<inference_engine_tensor_buffer>::iterator iter;
- for (iter = outputs.begin(); iter != outputs.end(); iter++) {
- inference_engine_tensor_buffer tensor_buffer = *iter;
+ for (auto iter = outputs.begin(); iter != outputs.end(); iter++) {
+ inference_engine_tensor_buffer tensor_buffer = iter->second;
// If tensor buffer owner is a backend then skip to release the tensor buffer.
// This tensor buffer will be released by the backend.
else
delete[](unsigned char *) tensor_buffer.buffer;
}
- std::vector<inference_engine_tensor_buffer>().swap(outputs);
+ IETensorBuffer().swap(outputs);
}
}
}
void FillOutputResult(InferenceEngineCommon *engine,
- std::vector<inference_engine_tensor_buffer> &outputs,
+ IETensorBuffer &outputs,
tensor_t &outputData)
{
inference_engine_layer_property property;
int ret = engine->GetOutputLayerProperty(property);
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- for (int i = 0; i < (int) property.tensor_infos.size(); ++i) {
- inference_engine_tensor_info tensor_info = property.tensor_infos[i];
+ for (auto& layer : property.layers) {
+ const inference_engine_tensor_info& tensor_info = layer.second;
std::vector<int> tmpDimInfo;
- for (int i = 0; i < (int) tensor_info.shape.size(); i++) {
- tmpDimInfo.push_back(tensor_info.shape[i]);
+ for (auto& dim : tensor_info.shape) {
+ tmpDimInfo.push_back(dim);
}
outputData.dimInfo.push_back(tmpDimInfo);
// Normalize output tensor data converting it to float type in case of quantized model.
if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
- unsigned char *ori_buf = (unsigned char *) outputs[i].buffer;
+ auto *ori_buf = static_cast<unsigned char *>(outputs[layer.first].buffer);
+
float *new_buf = new float[tensor_info.size];
ASSERT_TRUE(new_buf);
}
// replace original buffer with new one, and release origin one.
- outputs[i].buffer = new_buf;
- if (!outputs[i].owner_is_backend) {
+ outputs[layer.first].buffer = new_buf;
+ if (!outputs[layer.first].owner_is_backend) {
delete[] ori_buf;
}
}
- outputData.data.push_back((void *) outputs[i].buffer);
+ outputData.data.push_back(static_cast<void *>(outputs[layer.first].buffer));
}
}
return ret;
}
-int VerifyAICHandGesture1Results(std::vector<inference_engine_tensor_buffer> &output)
+int VerifyAICHandGesture1Results(IETensorBuffer &output)
{
// ### output[0] ###
// output name : "mobilenetv2/boundingbox"
// data type : int64
// tensor shape : 1 * 56 * 56
+ std::string outputNamebbox("mobilenetv2/boundingbox");
std::ifstream fin("/opt/usr/images/boundingbox.answer", std::ios_base::in | std::ios_base::binary);
- char *o_buffer = new (std::nothrow) char[output[0].size];
+ char *o_buffer = new (std::nothrow) char[output[outputNamebbox].size];
if (!o_buffer) {
std::cout << "failed to alloc o_buffer." << std::endl;
return 0;
}
- fin.read(o_buffer, output[0].size);
+ fin.read(o_buffer, output[outputNamebbox].size);
fin.close();
const int *f_answer = (const int *)o_buffer;
- const unsigned int output_size = output[0].size / 8;
+ const unsigned int output_size = output[outputNamebbox].size / 8;
for (unsigned int i = 0; i < output_size; ++i) {
- if (static_cast<int *>(output[0].buffer)[i] != f_answer[i]) {
+ if (static_cast<int *>(output[outputNamebbox].buffer)[i] != f_answer[i]) {
std::cout << "boundingbox wrong answer at index[" << i << "]" << std::endl;
- std::cout << static_cast<int *>(output[0].buffer)[i] << " vs " << f_answer[i] << std::endl;
+ std::cout << static_cast<int *>(output[outputNamebbox].buffer)[i] << " vs " << f_answer[i] << std::endl;
delete[] o_buffer;
return 0;
}
// output name : "mobilenetv2/heatmap"
// data type : float
// tensor shape : 1 * 56 * 56 *21
+ std::string outputNameHeatMap("mobilenetv2/heatmap");
std::ifstream fin_2("/opt/usr/images/heatmap.answer", std::ios_base::in | std::ios_base::binary);
- char *o_buffer_2 = new (std::nothrow) char[output[1].size];
+ char *o_buffer_2 = new (std::nothrow) char[output[outputNameHeatMap].size];
if (!o_buffer_2) {
std::cout << "failed to alloc o_buffer_2." << std::endl;
return 0;
}
- fin_2.read(o_buffer_2, output[1].size);
+ fin_2.read(o_buffer_2, output[outputNameHeatMap].size);
fin_2.close();
const float *f_answer_2 = (const float *)o_buffer_2;
- const unsigned int output_size_2 = output[1].size / 8;
+ const unsigned int output_size_2 = output[outputNameHeatMap].size / 8;
const int margin = 2;
for (unsigned int i = 0; i < output_size_2; ++i) {
- const int value_left = static_cast<int>((static_cast<float *>(output[1].buffer)[i]));
+ const int value_left = static_cast<int>((static_cast<float *>(output[outputNameHeatMap].buffer)[i]));
const int value_right = static_cast<int>(f_answer_2[i]);
int diff = value_left - value_right;
diff = diff < 0 ? diff * -1 : diff;
return 1;
}
-int VerifyAICHandGesture2Results(std::vector<inference_engine_tensor_buffer> &output,
+int VerifyAICHandGesture2Results(IETensorBuffer &output,
std::vector<int> &answers)
{
// ### output[0] ###
// output name : "mobilenetv2/coord_refine"
// data type : float
// tensor shape : 1 * 21 * 2
- unsigned int size = output[0].size / 4;
+ std::string outputNameCoord("mobilenetv2/coord_refine");
+ unsigned int size = output[outputNameCoord].size / 4;
for (unsigned int i = 0; i < size; ++i) {
- unsigned int value = static_cast<unsigned int>(static_cast<float *>(output[0].buffer)[i] * 100.0f);
+ unsigned int value = static_cast<unsigned int>(static_cast<float *>(output[outputNameCoord].buffer)[i] * 100.0f);
if (value != static_cast<unsigned int>(answers[i])) {
std::cout << "coord_refine wrong answer at index[" << i << "]" << std::endl;
std::cout << value << " vs " << answers[i] << std::endl;
// output name : "mobilenetv2/gesture"
// data type : int64
// tensor shape : 1 * 1 * 1
- unsigned int value = static_cast<unsigned int>(static_cast<long long *>(output[1].buffer)[0]);
+ std::string outputNameGesture("mobilenetv2/gesture");
+ unsigned int value = static_cast<unsigned int>(static_cast<long long *>(output[outputNameGesture].buffer)[0]);
if (value != static_cast<unsigned int>(answers[answers.size() - 1])) {
std::cout << "gesture wrong answer at index[0]" << std::endl;
std::cout << value << " vs " << answers[0] << std::endl;
std::string GetModelString(const int model_type);
int PrepareTensorBuffers(InferenceEngineCommon *engine,
- std::vector<inference_engine_tensor_buffer> &inputs,
- std::vector<inference_engine_tensor_buffer> &outputs);
+ IETensorBuffer &inputs,
+ IETensorBuffer &outputs);
-void CleanupTensorBuffers(std::vector<inference_engine_tensor_buffer> &inputs,
- std::vector<inference_engine_tensor_buffer> &outputs);
+void CleanupTensorBuffers(IETensorBuffer &inputs,
+ IETensorBuffer &outputs);
void CopyFileToMemory(const char *file_name,
inference_engine_tensor_buffer &buffer,
unsigned int size);
void FillOutputResult(InferenceEngineCommon *engine,
- std::vector<inference_engine_tensor_buffer> &outputs,
+ IETensorBuffer &outputs,
tensor_t &outputData);
int VerifyImageClassificationResults(tensor_t &outputData, int answer);
int VerifyPoseEstimationResults(tensor_t &outputData, std::vector<int> &answers,
int height, int width);
-int VerifyAICHandGesture1Results(std::vector<inference_engine_tensor_buffer> &output);
+int VerifyAICHandGesture1Results(IETensorBuffer &output);
-int VerifyAICHandGesture2Results(std::vector<inference_engine_tensor_buffer> &output,
+int VerifyAICHandGesture2Results(IETensorBuffer &output,
std::vector<int> &answers);