From 8aae3a7bd8c8c043739532a507f67dcc5bf14660 Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Wed, 29 Apr 2020 11:56:37 +0900 Subject: [PATCH] test: add inference_engine_tc This patch adds a inference_engine_tc.cpp which contains test cases moved from inference_engine_test.cpp. Change-Id: I3e60b74b79a574f169d726447118dce883f58cfe Signed-off-by: Inki Dae --- packaging/inference-engine-interface.spec | 2 + test/src/CMakeLists.txt | 21 +- test/src/inference_engine_tc.cpp | 535 ++++++++++++++++++++++++++++++ test/src/inference_engine_test.cpp | 125 +------ 4 files changed, 555 insertions(+), 128 deletions(-) create mode 100644 test/src/inference_engine_tc.cpp diff --git a/packaging/inference-engine-interface.spec b/packaging/inference-engine-interface.spec index 81ce68d..0344a16 100644 --- a/packaging/inference-engine-interface.spec +++ b/packaging/inference-engine-interface.spec @@ -66,6 +66,7 @@ mkdir -p %{buildroot}/opt/usr/images/ %make_install install -m 755 test/bin/inference_engine_test %{buildroot}%{_bindir} +install -m 755 test/bin/inference_engine_tc %{buildroot}%{_bindir} install -m 755 start_profiler.sh %{buildroot}%{_bindir} install -m 666 test/res/*.bin %{buildroot}/opt/usr/images @@ -82,5 +83,6 @@ install -m 666 test/res/*.bin %{buildroot}/opt/usr/images %{_libdir}/pkgconfig/*common.pc %{_libdir}/lib*-common.so %{_bindir}/inference_engine_test +%{_bindir}/inference_engine_tc %{_bindir}/start_profiler.sh /opt/usr/images/*.bin diff --git a/test/src/CMakeLists.txt b/test/src/CMakeLists.txt index 379d2cd..7bd4400 100644 --- a/test/src/CMakeLists.txt +++ b/test/src/CMakeLists.txt @@ -1,5 +1,8 @@ project(inference_engine_test) +set(INFERENCE_ENGINE_TEST_CASE inference_engine_tc) +set(INFERENCE_TEST inference_engine_test) + set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DEBUG _DEBUG) if(NOT SKIP_WARNINGS) @@ -16,14 +19,24 @@ include_directories(${CMAKE_BINARY_DIR}/include) include_directories(/usr/include/gtest) file(GLOB INFER_GTEST_INC_LIST "${PROJECT_SOURCE_DIR}/*.h") -file(GLOB INFER_GTEST_SRC_LIST "${PROJECT_SOURCE_DIR}/*.cpp") -add_executable(${PROJECT_NAME} +add_executable(${INFERENCE_TEST} + ${INFER_GTEST_INC_LIST} + ${PROJECT_SOURCE_DIR}/inference_engine_test.cpp + ) + +target_link_libraries(${INFERENCE_TEST} ${GTEST_LIBRARY} + ${GTEST_MAIN_LIBRARY} + ${INFERENCE_ENGINE_INTERFACE_LIB_NAME} + dl + ) + +add_executable(${INFERENCE_ENGINE_TEST_CASE} ${INFER_GTEST_INC_LIST} - ${INFER_GTEST_SRC_LIST} + ${PROJECT_SOURCE_DIR}/inference_engine_tc.cpp ) -target_link_libraries(${PROJECT_NAME} ${GTEST_LIBRARY} +target_link_libraries(${INFERENCE_ENGINE_TEST_CASE} ${GTEST_LIBRARY} ${GTEST_MAIN_LIBRARY} ${INFERENCE_ENGINE_INTERFACE_LIB_NAME} dl diff --git a/test/src/inference_engine_tc.cpp b/test/src/inference_engine_tc.cpp new file mode 100644 index 0000000..78f2b67 --- /dev/null +++ b/test/src/inference_engine_tc.cpp @@ -0,0 +1,535 @@ +/** + * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "inference_engine_error.h" +#include "inference_engine_common_impl.h" + +using namespace InferenceEngineInterface::Common; + +typedef std::tuple ParamType_Bind; +typedef std::tuple> ParamType_Load; +typedef std::tuple, int, int, int, std::vector, std::vector, std::vector, std::vector> ParamType_Infer; + +class InferenceEngineTestCase_G1 : public testing::TestWithParam { }; +class InferenceEngineTestCase_G2 : public testing::TestWithParam { }; +class InferenceEngineTestCase_G3 : public testing::TestWithParam { }; + +std::map Model_Formats = { + { "caffemodel", INFERENCE_MODEL_CAFFE }, + { "pb", INFERENCE_MODEL_TF }, + { "tflite", INFERENCE_MODEL_TFLITE }, + { "t7", INFERENCE_MODEL_TORCH }, + { "weights", INFERENCE_MODEL_DARKNET }, + { "xml", INFERENCE_MODEL_DLDT }, + { "onnx", INFERENCE_MODEL_ONNX } +}; + +std::map Target_Formats = { + { INFERENCE_TARGET_CPU, "cpu" }, + { INFERENCE_TARGET_GPU, "gpu" }, + { INFERENCE_TARGET_CUSTOM, "custom" } +}; + +enum { + TEST_IMAGE_CLASSIFICATION = 0, + TEST_OBJECT_DETECTION, + TEST_FACE_DETECTION, + TEST_FACIAL_LANDMARK_DETECTION, + TEST_POSE_ESTIMATION +}; + +TEST_P(InferenceEngineTestCase_G1, Bind_P) +{ + std::string backend_name; + int target_devices; + + std::tie(backend_name, target_devices) = GetParam(); + + std::cout <<"Bind test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl; + + inference_engine_config config = { + .backend_name = backend_name, + .target_devices = target_devices + }; + + InferenceEngineCommon *engine = new InferenceEngineCommon(&config); + ASSERT_TRUE(engine); + + int ret = engine->BindBackend(&config); + if (ret != INFERENCE_ENGINE_ERROR_NONE) { + delete engine; + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + return; + } + + inference_engine_capacity capacity; + ret = engine->GetBackendCapacity(&capacity); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + engine->UnbindBackend(); + + delete engine; +} + +int GetModelInfo(std::vector &model_paths, std::vector &models) +{ + std::string model_path = model_paths[0]; + std::string ext_str = model_path.substr(model_path.find_last_of(".") + 1); + std::map::iterator key = Model_Formats.find(ext_str); + int ret = key != Model_Formats.end() ? key->second : -1; + EXPECT_NE(ret, -1); + + if (ret == -1) { + return ret; + } + + switch (ret) { + case INFERENCE_MODEL_CAFFE: + case INFERENCE_MODEL_TF: + case INFERENCE_MODEL_DARKNET: + case INFERENCE_MODEL_DLDT: + case INFERENCE_MODEL_ONNX: + models.push_back(model_paths[0]); + models.push_back(model_paths[1]); + break; + case INFERENCE_MODEL_TFLITE: + case INFERENCE_MODEL_TORCH: + models.push_back(model_paths[0]); + break; + default: + break; + } + + return ret; +} + +int PrepareTensorBuffers(InferenceEngineCommon *engine, std::vector &inputs, + std::vector &outputs) +{ + int ret = engine->GetInputTensorBuffers(inputs); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + if (inputs.empty()) { + inference_engine_layer_property input_property; + ret = engine->GetInputLayerProperty(input_property); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + // If backend is OpenCV then the buffers will be allocated out of this function. + if (input_property.tensor_infos.empty()) { + return INFERENCE_ENGINE_ERROR_NONE; + } + + for (int i = 0; i < (int)input_property.tensor_infos.size(); ++i) { + inference_engine_tensor_info tensor_info = input_property.tensor_infos[i]; + inference_engine_tensor_buffer tensor_buffer; + if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) { + tensor_buffer.buffer = (void *)(new float[tensor_info.size]); + tensor_buffer.size = tensor_info.size * 4; + } else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) { + tensor_buffer.buffer = (void *)(new unsigned char[tensor_info.size]); + tensor_buffer.size = tensor_info.size; + } + + EXPECT_TRUE(tensor_buffer.buffer); + tensor_buffer.owner_is_backend = 0; + tensor_buffer.data_type = tensor_info.data_type; + inputs.push_back(tensor_buffer); + } + } + + ret = engine->GetOutputTensorBuffers(outputs); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + if (outputs.empty()) { + inference_engine_layer_property output_property; + ret = engine->GetOutputLayerProperty(output_property); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + // If backend is OpenCV then the buffers will be allocated out of this function. + if (output_property.tensor_infos.empty()) { + return INFERENCE_ENGINE_ERROR_NONE; + } + + for (int i = 0; i < (int)output_property.tensor_infos.size(); ++i) { + inference_engine_tensor_info tensor_info = output_property.tensor_infos[i]; + inference_engine_tensor_buffer tensor_buffer; + if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) { + tensor_buffer.buffer = (void *)(new float[tensor_info.size]); + tensor_buffer.size = tensor_info.size * 4; + } else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) { + tensor_buffer.buffer = (void *)(new unsigned char[tensor_info.size]); + tensor_buffer.size = tensor_info.size; + } + + EXPECT_TRUE(tensor_buffer.buffer); + tensor_buffer.owner_is_backend = 0; + tensor_buffer.data_type = tensor_info.data_type; + outputs.push_back(tensor_buffer); + } + } + + return INFERENCE_ENGINE_ERROR_NONE; +} + +void CleanupTensorBuffers(std::vector &inputs, std::vector &outputs) +{ + if (!inputs.empty()) { + std::vector::iterator iter; + for (iter = inputs.begin(); iter != inputs.end(); iter++) { + inference_engine_tensor_buffer tensor_buffer = *iter; + + // If tensor buffer owner is a backend then skip to release the tensor buffer. + // This tensor buffer will be released by the backend. + if (tensor_buffer.owner_is_backend) { + continue; + } + + if (tensor_buffer.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) + delete[] (float *)tensor_buffer.buffer; + else + delete[] (unsigned char *)tensor_buffer.buffer; + } + std::vector().swap(inputs); + } + + if (!outputs.empty()) { + std::vector::iterator iter; + for (iter = outputs.begin(); iter != outputs.end(); iter++) { + inference_engine_tensor_buffer tensor_buffer = *iter; + + // If tensor buffer owner is a backend then skip to release the tensor buffer. + // This tensor buffer will be released by the backend. + if (tensor_buffer.owner_is_backend) { + continue; + } + + if (tensor_buffer.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) + delete[] (float *)tensor_buffer.buffer; + else + delete[] (unsigned char *)tensor_buffer.buffer; + } + std::vector().swap(outputs); + } +} + +void CopyFileToMemory(const char *file_name, inference_engine_tensor_buffer &buffer, unsigned int size) +{ + int fd = open(file_name, O_RDONLY); + if (fd == -1) { + ASSERT_NE(fd, -1); + return; + } + + int num = read(fd, buffer.buffer, size); + if (num == -1) { + close(fd); + ASSERT_NE(num, -1); + return; + } + + close(fd); +} + + +TEST_P(InferenceEngineTestCase_G2, Load_P) +{ + std::string backend_name; + int target_devices; + std::vector model_paths; + + std::tie(backend_name, target_devices, model_paths) = GetParam(); + + std::cout <<"Load test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl; + + inference_engine_config config = { + .backend_name = backend_name, + .target_devices = target_devices + }; + + InferenceEngineCommon *engine = new InferenceEngineCommon(&config); + ASSERT_TRUE(engine); + + int ret = engine->BindBackend(&config); + if (ret != INFERENCE_ENGINE_ERROR_NONE) { + delete engine; + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + return; + } + + inference_engine_capacity capacity; + ret = engine->GetBackendCapacity(&capacity); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + ret = engine->SetTargetDevices(target_devices); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + std::vector models; + int model_type = GetModelInfo(model_paths, models); + if (model_type == -1) { + delete engine; + ASSERT_NE(model_type, -1); + return; + } + + ret = engine->Load(models, (inference_model_format_e)model_type); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + engine->UnbindBackend(); + + delete engine; +} + +void FillOutputResult(InferenceEngineCommon *engine, std::vector &outputs, tensor_t &outputData) +{ + inference_engine_layer_property property; + int ret = engine->GetOutputLayerProperty(property); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + for (int i = 0; i < (int)property.tensor_infos.size(); ++i) { + inference_engine_tensor_info tensor_info = property.tensor_infos[i]; + + std::vector tmpDimInfo; + for (int i = 0; i < (int)tensor_info.shape.size(); i++) { + tmpDimInfo.push_back(tensor_info.shape[i]); + } + + outputData.dimInfo.push_back(tmpDimInfo); + + // Normalize output tensor data converting it to float type in case of quantized model. + if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) { + unsigned char *ori_buf = (unsigned char *)outputs[i].buffer; + float *new_buf = new float[tensor_info.size]; + ASSERT_TRUE(new_buf); + + for (int j = 0; j < (int)tensor_info.size; j++) { + new_buf[j] = (float)ori_buf[j] / 255.0f; + } + + // replace original buffer with new one, and release origin one. + outputs[i].buffer = new_buf; + if (!outputs[i].owner_is_backend) { + delete[] ori_buf; + } + } + + outputData.data.push_back((void *)outputs[i].buffer); + } +} + +int VerifyImageClassificationResults(tensor_t &outputData, int answer) +{ + std::vector> inferDimInfo(outputData.dimInfo); + std::vector inferResults(outputData.data.begin(), outputData.data.end()); + + int idx = -1; + int count = inferDimInfo[0][1]; + float value = 0.0f; + + float *prediction = reinterpret_cast(inferResults[0]); + for (int i = 0; i < count; ++i) { + if (value < prediction[i]) { + value = prediction[i]; + idx = i; + } + } + + return idx == answer; +} + +TEST_P(InferenceEngineTestCase_G3, Inference) +{ + std::string backend_name; + int target_devices; + int test_type; + int iteration; + int tensor_type; + std::vector image_paths; + size_t height; + size_t width; + size_t ch; + std::vector input_layers; + std::vector output_layers; + std::vector model_paths; + std::vector answers; + + std::tie(backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers) = GetParam(); + + if (iteration < 1) { + iteration = 1; + } + + std::string test_name; + switch (test_type) { + case TEST_IMAGE_CLASSIFICATION: + test_name.append("Image classification"); + break; + default: + return; + } + + std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl; + inference_engine_config config = { + .backend_name = backend_name, + .target_devices = target_devices + }; + + InferenceEngineCommon *engine = new InferenceEngineCommon(&config); + if (engine == nullptr) { + ASSERT_TRUE(engine); + return; + } + + int ret = engine->BindBackend(&config); + if (ret != INFERENCE_ENGINE_ERROR_NONE) { + delete engine; + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + return; + } + + inference_engine_capacity capacity; + ret = engine->GetBackendCapacity(&capacity); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + ret = engine->SetTargetDevices(target_devices); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + std::vector models; + int model_type = GetModelInfo(model_paths, models); + if (model_type == -1) { + delete engine; + ASSERT_NE(model_type, -1); + return; + } + + inference_engine_layer_property input_property; + std::vector::iterator iter; + + for (iter = input_layers.begin(); iter != input_layers.end(); iter++) { + inference_engine_tensor_info tensor_info = { + { 1, ch, height, width }, + (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW, + (inference_tensor_data_type_e)tensor_type, + (size_t)(1 * ch * height * width) + }; + + input_property.layer_names.push_back(*iter); + input_property.tensor_infos.push_back(tensor_info); + } + + ret = engine->SetInputLayerProperty(input_property); + if (ret != INFERENCE_ENGINE_ERROR_NONE) { + delete engine; + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + return; + } + + inference_engine_layer_property output_property; + + for (iter = output_layers.begin(); iter != output_layers.end(); iter++) { + output_property.layer_names.push_back(*iter); + } + + ret = engine->SetOutputLayerProperty(output_property); + if (ret != INFERENCE_ENGINE_ERROR_NONE) { + delete engine; + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + return; + } + + ret = engine->Load(models, (inference_model_format_e)model_type); + if (ret != INFERENCE_ENGINE_ERROR_NONE) { + delete engine; + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + return; + } + + std::vector inputs, outputs; + ret = PrepareTensorBuffers(engine, inputs, outputs); + if (ret != INFERENCE_ENGINE_ERROR_NONE) { + delete engine; + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + return; + } + + // Copy input image tensor data from a given file to input tensor buffer. + for (int i = 0; i < (int)image_paths.size(); ++i) { + CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size); + } + + for (int repeat = 0; repeat < iteration; ++repeat) { + ret = engine->Run(inputs, outputs); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + } + + tensor_t result; + FillOutputResult(engine, outputs, result); + + ret = VerifyImageClassificationResults(result, answers[0]); + EXPECT_EQ(ret, 1); + + CleanupTensorBuffers(inputs, outputs); + + engine->UnbindBackend(); + models.clear(); + + delete engine; +} + +INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G1, + testing::Values( + // parameter order : backend name, target device + // ARMNN. + ParamType_Bind("armnn", INFERENCE_TARGET_CPU), + // TFLITE. + ParamType_Bind("tflite", INFERENCE_TARGET_CPU) + /* TODO */ + + ) +); + +INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G2, + testing::Values( + // parameter order : backend name, target device, model path/s + // mobilenet based image classification model loading test + // ARMNN. + ParamType_Load("armnn", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }), + // TFLITE. + ParamType_Load("tflite", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }) + /* TODO */ + ) +); + +INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G3, + testing::Values( + // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result + // mobilenet based image classification test + // ARMNN. + ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }), + // TFLITE. + ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }) + /* TODO */ + ) +); diff --git a/test/src/inference_engine_test.cpp b/test/src/inference_engine_test.cpp index b0a457c..db50a80 100644 --- a/test/src/inference_engine_test.cpp +++ b/test/src/inference_engine_test.cpp @@ -29,12 +29,8 @@ using namespace InferenceEngineInterface::Common; -typedef std::tuple ParamType; -typedef std::tuple> ParamType_Load; typedef std::tuple, int, int, int, std::vector, std::vector, std::vector, std::vector> ParamType_Infer; -class InferenceEngineCommonTest : public testing::TestWithParam { }; -class InferenceEngineCommonTest_2 : public testing::TestWithParam { }; class InferenceEngineTfliteTest : public testing::TestWithParam { }; class InferenceEngineCaffeTest : public testing::TestWithParam { }; class InferenceEngineDldtTest : public testing::TestWithParam { }; @@ -63,39 +59,6 @@ enum { TEST_POSE_ESTIMATION }; -TEST_P(InferenceEngineCommonTest, Bind) -{ - std::string backend_name; - int target_devices; - - std::tie(backend_name, target_devices) = GetParam(); - - std::cout <<"Bind test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl; - - inference_engine_config config = { - .backend_name = backend_name, - .target_devices = target_devices - }; - - InferenceEngineCommon *engine = new InferenceEngineCommon(&config); - ASSERT_TRUE(engine); - - int ret = engine->BindBackend(&config); - if (ret != INFERENCE_ENGINE_ERROR_NONE) { - delete engine; - ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); - return; - } - - inference_engine_capacity capacity; - ret = engine->GetBackendCapacity(&capacity); - EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); - - engine->UnbindBackend(); - - delete engine; -} - int GetModelInfo(std::vector &model_paths, std::vector &models) { std::string model_path = model_paths[0]; @@ -255,55 +218,6 @@ void CopyFileToMemory(const char *file_name, inference_engine_tensor_buffer &buf close(fd); } - -TEST_P(InferenceEngineCommonTest_2, Load) -{ - std::string backend_name; - int target_devices; - std::vector model_paths; - - std::tie(backend_name, target_devices, model_paths) = GetParam(); - - std::cout <<"Load test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl; - - inference_engine_config config = { - .backend_name = backend_name, - .target_devices = target_devices - }; - - InferenceEngineCommon *engine = new InferenceEngineCommon(&config); - ASSERT_TRUE(engine); - - int ret = engine->BindBackend(&config); - if (ret != INFERENCE_ENGINE_ERROR_NONE) { - delete engine; - ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); - return; - } - - inference_engine_capacity capacity; - ret = engine->GetBackendCapacity(&capacity); - EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); - - ret = engine->SetTargetDevices(target_devices); - EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); - - std::vector models; - int model_type = GetModelInfo(model_paths, models); - if (model_type == -1) { - delete engine; - ASSERT_NE(model_type, -1); - return; - } - - ret = engine->Load(models, (inference_model_format_e)model_type); - EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); - - engine->UnbindBackend(); - - delete engine; -} - void FillOutputResult(InferenceEngineCommon *engine, std::vector &outputs, tensor_t &outputData) { inference_engine_layer_property property; @@ -333,7 +247,7 @@ void FillOutputResult(InferenceEngineCommon *engine, std::vector