mkdir -p %{buildroot}/opt/usr/images/
%make_install
-install -m 755 test/bin/inference_engine_test %{buildroot}%{_bindir}
+install -m 755 test/bin/inference_engine_profiler %{buildroot}%{_bindir}
install -m 755 test/bin/inference_engine_tc %{buildroot}%{_bindir}
install -m 755 start_profiler.sh %{buildroot}%{_bindir}
install -m 666 test/res/*.bin %{buildroot}/opt/usr/images
%{_includedir}/media/*.h
%{_libdir}/pkgconfig/*common.pc
%{_libdir}/lib*-common.so
-%{_bindir}/inference_engine_test
+%{_bindir}/inference_engine_profiler
%{_bindir}/inference_engine_tc
%{_bindir}/start_profiler.sh
/opt/usr/images/*.bin
LIST=$(seq 0 $CNT)
for i in $LIST
do
- /usr/bin/inference_engine_test --gtest_filter=Prefix/InferenceEngineTfliteTest.Inference/$i
+ /usr/bin/inference_engine_profiler --gtest_filter=Prefix/InferenceEngineTfliteTest.Inference/$i
done
# Caffe model
LIST=$(seq 0 $CNT)
for i in $LIST
do
- /usr/bin/inference_engine_test --gtest_filter=Prefix/InferenceEngineCaffeTest.Inference/$i
+ /usr/bin/inference_engine_profiler --gtest_filter=Prefix/InferenceEngineCaffeTest.Inference/$i
done
# If you want to add new model tests then add script for it below
# LIST=$(seq 0 $CNT)
# for i in $LIST
# do
-# /usr/bin/inference_engine_test --gtest_filter=Prefix/InferenceEngine_model_name_Test.Inference/$i
+# /usr/bin/inference_engine_profiler --gtest_filter=Prefix/InferenceEngine_model_name_Test.Inference/$i
# done
project(inference_engine_test)
set(INFERENCE_ENGINE_TEST_CASE inference_engine_tc)
-set(INFERENCE_TEST inference_engine_test)
+set(INFERENCE_ENGINE_PROFILER inference_engine_profiler)
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DEBUG _DEBUG)
file(GLOB INFER_GTEST_INC_LIST "${PROJECT_SOURCE_DIR}/*.h")
-add_executable(${INFERENCE_TEST}
+add_executable(${INFERENCE_ENGINE_PROFILER}
${INFER_GTEST_INC_LIST}
- ${PROJECT_SOURCE_DIR}/inference_engine_test.cpp
+ ${PROJECT_SOURCE_DIR}/inference_engine_profiler.cpp
${PROJECT_SOURCE_DIR}/inference_engine_test_common.cpp
)
-target_link_libraries(${INFERENCE_TEST} ${GTEST_LIBRARY}
+target_link_libraries(${INFERENCE_ENGINE_PROFILER} ${GTEST_LIBRARY}
${GTEST_MAIN_LIBRARY}
${INFERENCE_ENGINE_INTERFACE_LIB_NAME}
dl
--- /dev/null
+/**
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string.h>
+#include <tuple>
+#include <map>
+#include <fcntl.h>
+#include <unistd.h>
+#include <queue>
+#include <algorithm>
+
+#include "gtest/gtest.h"
+
+#include "inference_engine_error.h"
+#include "inference_engine_common_impl.h"
+#include "inference_engine_test_common.h"
+
+typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>, int, int, int, std::vector<std::string>, std::vector<std::string>, std::vector<std::string>, std::vector<int>> ParamType_Infer;
+
+class InferenceEngineTfliteTest : public testing::TestWithParam<ParamType_Infer> { };
+class InferenceEngineCaffeTest : public testing::TestWithParam<ParamType_Infer> { };
+class InferenceEngineDldtTest : public testing::TestWithParam<ParamType_Infer> { };
+
+TEST_P(InferenceEngineTfliteTest, Inference)
+{
+ std::string backend_name;
+ int target_devices;
+ int test_type;
+ int iteration;
+ int tensor_type;
+ std::vector<std::string> image_paths;
+ size_t height;
+ size_t width;
+ size_t ch;
+ std::vector<std::string> input_layers;
+ std::vector<std::string> output_layers;
+ std::vector<std::string> model_paths;
+ std::vector<int> answers;
+
+ std::tie(backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers) = GetParam();
+
+ if (iteration < 1) {
+ iteration = 1;
+ }
+
+ std::string test_name;
+ switch (test_type) {
+ case TEST_IMAGE_CLASSIFICATION:
+ test_name.append("Image classification");
+ break;
+ case TEST_OBJECT_DETECTION:
+ test_name.append("Object detection");
+ break;
+ case TEST_FACE_DETECTION:
+ test_name.append("Face detection");
+ break;
+ case TEST_FACIAL_LANDMARK_DETECTION:
+ test_name.append("Facial landmark detection");
+ break;
+ case TEST_POSE_ESTIMATION:
+ test_name.append("Pose estimation");
+ break;
+ }
+
+ std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
+ inference_engine_config config = {
+ .backend_name = backend_name,
+ .target_devices = target_devices
+ };
+
+ InferenceEngineCommon *engine = new InferenceEngineCommon(&config);
+ if (engine == nullptr) {
+ ASSERT_TRUE(engine);
+ return;
+ }
+
+ int ret = engine->EnableProfiler(true);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ ret = engine->DumpProfileToFile("profile_data_" + backend_name + "_tflite_model.txt");
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ ret = engine->BindBackend(&config);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ inference_engine_capacity capacity;
+ ret = engine->GetBackendCapacity(&capacity);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ ret = engine->SetTargetDevices(target_devices);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ std::vector <std::string> models;
+ int model_type = GetModelInfo(model_paths, models);
+ if (model_type == -1) {
+ delete engine;
+ ASSERT_NE(model_type, -1);
+ return;
+ }
+
+ inference_engine_layer_property input_property;
+ std::vector<std::string>::iterator iter;
+
+ for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
+ inference_engine_tensor_info tensor_info = {
+ { 1, ch, height, width },
+ (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
+ (inference_tensor_data_type_e)tensor_type,
+ (size_t)(1 * ch * height * width)
+ };
+
+ input_property.layer_names.push_back(*iter);
+ input_property.tensor_infos.push_back(tensor_info);
+ }
+
+ ret = engine->SetInputLayerProperty(input_property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ inference_engine_layer_property output_property;
+
+ for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
+ output_property.layer_names.push_back(*iter);
+ }
+
+ ret = engine->SetOutputLayerProperty(output_property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ ret = engine->Load(models, (inference_model_format_e)model_type);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ std::vector<inference_engine_tensor_buffer> inputs, outputs;
+ ret = PrepareTensorBuffers(engine, inputs, outputs);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ // Copy input image tensor data from a given file to input tensor buffer.
+ for (int i = 0; i < (int)image_paths.size(); ++i) {
+ CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
+ }
+
+ for (int repeat = 0; repeat < iteration; ++repeat) {
+ ret = engine->Run(inputs, outputs);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ }
+
+ tensor_t result;
+ FillOutputResult(engine, outputs, result);
+
+ switch (test_type) {
+ case TEST_IMAGE_CLASSIFICATION:
+ ret = VerifyImageClassificationResults(result, answers[0]);
+ EXPECT_EQ(ret, 1);
+ break;
+ case TEST_OBJECT_DETECTION:
+ // 1072 : fixed height size of dumped image, 1608 : fixed width size of dumped image.
+ ret = VerifyObjectDetectionResults(result, answers, 1072, 1608);
+ EXPECT_EQ(ret, 1);
+ break;
+ case TEST_FACE_DETECTION:
+ // 1152 : fixed height size of dumped image, 1536 : fixed width size of dumped image.
+ ret = VerifyObjectDetectionResults(result, answers, 1152, 1536);
+ EXPECT_EQ(ret, 1);
+ break;
+ case TEST_FACIAL_LANDMARK_DETECTION:
+ // TODO.
+ break;
+ case TEST_POSE_ESTIMATION:
+ // 563 : fixed height size of dumped image, 750 : fixed width size of dumped image.
+ ret = VerifyPoseEstimationResults(result, answers, 563, 750);
+ EXPECT_EQ(ret, 1);
+ break;
+ }
+
+ CleanupTensorBuffers(inputs, outputs);
+
+ engine->UnbindBackend();
+ models.clear();
+
+ delete engine;
+}
+
+TEST_P(InferenceEngineCaffeTest, Inference)
+{
+ std::string backend_name;
+ int target_devices;
+ int test_type;
+ int iteration;
+ int tensor_type;
+ std::vector<std::string> image_paths;
+ size_t height;
+ size_t width;
+ size_t ch;
+ std::vector<std::string> input_layers;
+ std::vector<std::string> output_layers;
+ std::vector<std::string> model_paths;
+ std::vector<int> answers;
+
+ std::tie(backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers) = GetParam();
+
+ if (iteration < 1) {
+ iteration = 1;
+ }
+
+ std::string test_name;
+ switch (test_type) {
+ case TEST_IMAGE_CLASSIFICATION:
+ test_name.append("Image classification");
+ break;
+ case TEST_OBJECT_DETECTION:
+ test_name.append("Object detection");
+ break;
+ case TEST_FACE_DETECTION:
+ test_name.append("Face detection");
+ break;
+ case TEST_FACIAL_LANDMARK_DETECTION:
+ test_name.append("Facial landmark detection");
+ break;
+ case TEST_POSE_ESTIMATION:
+ test_name.append("Pose estimation");
+ break;
+ }
+
+ std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
+ inference_engine_config config = {
+ .backend_name = backend_name,
+ .target_devices = target_devices
+ };
+
+ InferenceEngineCommon *engine = new InferenceEngineCommon(&config);
+ if (engine == nullptr) {
+ ASSERT_TRUE(engine);
+ return;
+ }
+
+ int ret = engine->EnableProfiler(true);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ ret = engine->DumpProfileToFile("profile_data_" + backend_name + "_caffe_model.txt");
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ ret = engine->BindBackend(&config);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ inference_engine_capacity capacity;
+ ret = engine->GetBackendCapacity(&capacity);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ ret = engine->SetTargetDevices(target_devices);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ std::vector <std::string> models;
+ int model_type = GetModelInfo(model_paths, models);
+ if (model_type == -1) {
+ delete engine;
+ ASSERT_NE(model_type, -1);
+ return;
+ }
+
+ inference_engine_layer_property input_property;
+ std::vector<std::string>::iterator iter;
+
+ for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
+ inference_engine_tensor_info tensor_info = {
+ { 1, ch, height, width },
+ (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
+ (inference_tensor_data_type_e)tensor_type,
+ (size_t)(1 * ch * height * width)
+ };
+
+ input_property.layer_names.push_back(*iter);
+ input_property.tensor_infos.push_back(tensor_info);
+ }
+
+ ret = engine->SetInputLayerProperty(input_property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ inference_engine_layer_property output_property;
+
+ for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
+ output_property.layer_names.push_back(*iter);
+ }
+
+ ret = engine->SetOutputLayerProperty(output_property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ ret = engine->Load(models, (inference_model_format_e)model_type);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ std::vector<inference_engine_tensor_buffer> inputs, outputs;
+ ret = PrepareTensorBuffers(engine, inputs, outputs);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ // Copy input image tensor data from a given file to input tensor buffer.
+ for (int i = 0; i < (int)image_paths.size(); ++i) {
+ CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
+ }
+
+ for (int repeat = 0; repeat < iteration; ++repeat) {
+ ret = engine->Run(inputs, outputs);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ }
+
+ tensor_t result;
+ FillOutputResult(engine, outputs, result);
+
+ switch (test_type) {
+ case TEST_IMAGE_CLASSIFICATION:
+ ret = VerifyImageClassificationResults(result, answers[0]);
+ EXPECT_EQ(ret, 1);
+ break;
+ case TEST_OBJECT_DETECTION:
+ // 1024 : fixed height size of dumped image, 636 : fixed width size of dumped image.
+ ret = VerifyObjectDetectionResults(result, answers, 636, 1024);
+ EXPECT_EQ(ret, 1);
+ break;
+ case TEST_FACE_DETECTION:
+ // 1152 : fixed height size of dumped image, 1536 : fixed width size of dumped image.
+ ret = VerifyObjectDetectionResults(result, answers, 1152, 1536);
+ EXPECT_EQ(ret, 1);
+ break;
+ case TEST_FACIAL_LANDMARK_DETECTION:
+ // 128 : fixed height size of dumped image, 128 : fixed width size of dumped image.
+ ret = VerifyFacialLandmarkDetectionResults(result, answers, 128, 128);
+ EXPECT_EQ(ret, 1);
+ break;
+ case TEST_POSE_ESTIMATION:
+ // 563 : fixed height size of dumped image, 750 : fixed width size of dumped image.
+ ret = VerifyPoseEstimationResults(result, answers, 563, 750);
+ EXPECT_EQ(ret, 1);
+ break;
+ }
+
+ CleanupTensorBuffers(inputs, outputs);
+
+ engine->UnbindBackend();
+ models.clear();
+
+ delete engine;
+}
+
+TEST_P(InferenceEngineDldtTest, Inference)
+{
+ std::string backend_name;
+ int target_devices;
+ int test_type;
+ int iteration;
+ int tensor_type;
+ std::vector<std::string> image_paths;
+ size_t height;
+ size_t width;
+ size_t ch;
+ std::vector<std::string> input_layers;
+ std::vector<std::string> output_layers;
+ std::vector<std::string> model_paths;
+ std::vector<int> answers;
+
+ std::tie(backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers) = GetParam();
+
+ if (iteration < 1) {
+ iteration = 1;
+ }
+
+ std::string test_name;
+ switch (test_type) {
+ case TEST_IMAGE_CLASSIFICATION:
+ test_name.append("Image classification");
+ break;
+ case TEST_OBJECT_DETECTION:
+ test_name.append("Object detection");
+ break;
+ case TEST_FACE_DETECTION:
+ test_name.append("Face detection");
+ break;
+ case TEST_FACIAL_LANDMARK_DETECTION:
+ test_name.append("Facial landmark detection");
+ break;
+ case TEST_POSE_ESTIMATION:
+ test_name.append("Pose estimation");
+ break;
+ }
+
+ std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
+ inference_engine_config config = {
+ .backend_name = backend_name,
+ .target_devices = target_devices
+ };
+
+ InferenceEngineCommon *engine = new InferenceEngineCommon(&config);
+ if (engine == nullptr) {
+ ASSERT_TRUE(engine);
+ return;
+ }
+
+ int ret = engine->EnableProfiler(true);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ ret = engine->DumpProfileToFile("profile_data_" + backend_name + "_dldt_model.txt");
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ ret = engine->BindBackend(&config);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ inference_engine_capacity capacity;
+ ret = engine->GetBackendCapacity(&capacity);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ ret = engine->SetTargetDevices(target_devices);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ std::vector <std::string> models;
+ int model_type = GetModelInfo(model_paths, models);
+ if (model_type == -1) {
+ delete engine;
+ ASSERT_NE(model_type, -1);
+ return;
+ }
+
+ inference_engine_layer_property input_property;
+ std::vector<std::string>::iterator iter;
+
+ for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
+ inference_engine_tensor_info tensor_info = {
+ { 1, ch, height, width },
+ (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
+ (inference_tensor_data_type_e)tensor_type,
+ (size_t)(1 * ch * height * width)
+ };
+
+ input_property.layer_names.push_back(*iter);
+ input_property.tensor_infos.push_back(tensor_info);
+ }
+
+ ret = engine->SetInputLayerProperty(input_property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ inference_engine_layer_property output_property;
+
+ for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
+ output_property.layer_names.push_back(*iter);
+ }
+
+ ret = engine->SetOutputLayerProperty(output_property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ ret = engine->Load(models, (inference_model_format_e)model_type);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ std::vector<inference_engine_tensor_buffer> inputs, outputs;
+ ret = PrepareTensorBuffers(engine, inputs, outputs);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ // Copy input image tensor data from a given file to input tensor buffer.
+ for (int i = 0; i < (int)image_paths.size(); ++i) {
+ CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
+ }
+
+ for (int repeat = 0; repeat < iteration; ++repeat) {
+ ret = engine->Run(inputs, outputs);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ }
+
+ tensor_t result;
+ FillOutputResult(engine, outputs, result);
+
+ switch (test_type) {
+ case TEST_IMAGE_CLASSIFICATION:
+ ret = VerifyImageClassificationResults(result, answers[0]);
+ EXPECT_EQ(ret, 1);
+ break;
+ case TEST_OBJECT_DETECTION:
+ // 1024 : fixed height size of dumped image, 636 : fixed width size of dumped image.
+ ret = VerifyObjectDetectionResults(result, answers, 636, 1024);
+ EXPECT_EQ(ret, 1);
+ break;
+ case TEST_FACE_DETECTION:
+ // 1152 : fixed height size of dumped image, 1536 : fixed width size of dumped image.
+ ret = VerifyObjectDetectionResults(result, answers, 1152, 1536);
+ EXPECT_EQ(ret, 1);
+ break;
+ case TEST_FACIAL_LANDMARK_DETECTION:
+ // 128 : fixed height size of dumped image, 128 : fixed width size of dumped image.
+ ret = VerifyFacialLandmarkDetectionResults(result, answers, 128, 128);
+ EXPECT_EQ(ret, 1);
+ break;
+ case TEST_POSE_ESTIMATION:
+ // 563 : fixed height size of dumped image, 750 : fixed width size of dumped image.
+ ret = VerifyPoseEstimationResults(result, answers, 563, 750);
+ EXPECT_EQ(ret, 1);
+ break;
+ }
+
+ CleanupTensorBuffers(inputs, outputs);
+
+ engine->UnbindBackend();
+ models.clear();
+
+ delete engine;
+}
+
+INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTfliteTest,
+ testing::Values(
+ // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+ // mobilenet based image classification test
+ // ARMNN.
+ ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
+ ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
+ // quantized mobilenet based image classification test
+ ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
+ ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
+ // object detection test
+ ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
+ ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
+ // face detection test
+ ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
+ ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
+ // pose estimation test
+ ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+ { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
+ 76, 146, 170, 193, 216, 146, 123, 99, 287, 381, 451, 287, 381, 475 }),
+ ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+ { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
+ 76, 146, 170, 193, 216, 146, 123, 99, 287, 381, 451, 287, 381, 475 }),
+ // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+ // mobilenet based image classification test
+ // TFLITE.
+ ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
+ ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
+ // quantized mobilenet based image classification test
+ ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
+ ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
+ // object detection test
+ ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
+ ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
+ // face detection test
+ ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
+ ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
+ // pose estimation test
+ ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+ { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
+ 76, 146, 170, 193, 216, 146, 123, 99, 287, 381, 451, 287, 381, 475 }),
+ ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+ { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
+ 76, 146, 170, 193, 216, 146, 123, 99, 287, 381, 451, 287, 381, 475 })
+ /* TODO */
+ )
+);
+
+INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCaffeTest,
+ testing::Values(
+ // parameter order : backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers
+ // OPENCV
+ // squeezenet based image classification test
+ ParamType_Infer("opencv", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification_caffe.bin" }, 227, 227, 3, { "data" }, { "prob" }, { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel", "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" }, { 281 }),
+ ParamType_Infer("opencv", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification_caffe.bin" }, 227, 227, 3, { "data" }, { "prob" }, { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel", "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" }, { 281 }),
+
+ // mobilenet-ssd based object detection test
+ ParamType_Infer("opencv", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection_caffe.bin" }, 300, 300, 3, { "data" }, { "detection_out" }, { "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.caffemodel", "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.prototxt" }, { 15, 19, 335, 557 }),
+ ParamType_Infer("opencv", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection_caffe.bin" }, 300, 300, 3, { "data" }, { "detection_out" }, { "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.caffemodel", "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.prototxt" }, { 15, 19, 335, 557 }),
+
+ // mobilenet-ssd based object detection test
+ ParamType_Infer("opencv", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection_caffe.bin" }, 300, 300, 3, { "data" }, { "detection_out" }, { "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.caffemodel", "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.prototxt" }, { 733, 233, 965, 539 }),
+ ParamType_Infer("opencv", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection_caffe.bin" }, 300, 300, 3, { "data" }, { "detection_out" }, { "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.caffemodel", "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.prototxt" }, { 733, 233, 965, 539 }),
+
+ // tweakcnn based facial landmark detection test
+ ParamType_Infer("opencv", INFERENCE_TARGET_CPU, TEST_FACIAL_LANDMARK_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/faciallandmark_detection_caffe.bin" }, 128, 128, 3, { "data" }, { "Sigmoid_fc2" }, { "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.caffemodel", "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.prototxt" },
+ { 53, 45, 85, 46, 66, 64, 54, 78, 82, 79}),
+ ParamType_Infer("opencv", INFERENCE_TARGET_GPU, TEST_FACIAL_LANDMARK_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/faciallandmark_detection_caffe.bin" }, 128, 128, 3, { "data" }, { "Sigmoid_fc2" }, { "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.caffemodel", "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.prototxt" },
+ { 53, 45, 85, 46, 66, 64, 54, 78, 82, 79})
+ /* TODO */
+ )
+);
+
+INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineDldtTest,
+ testing::Values(
+ // DLDT
+ ParamType_Infer("dldt", INFERENCE_TARGET_CUSTOM, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/dldt_banana_classification.bin" }, 224, 224, 3, { "data" }, { "prob" }, { "/usr/share/capi-media-vision/models/IC/dldt/googlenet-v1.xml", "/usr/share/capi-media-vision/models/IC/dldt/googlenet-v1.bin" }, { 954 })
+ )
+);
+++ /dev/null
-/**
- * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <string.h>
-#include <tuple>
-#include <map>
-#include <fcntl.h>
-#include <unistd.h>
-#include <queue>
-#include <algorithm>
-
-#include "gtest/gtest.h"
-
-#include "inference_engine_error.h"
-#include "inference_engine_common_impl.h"
-#include "inference_engine_test_common.h"
-
-typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>, int, int, int, std::vector<std::string>, std::vector<std::string>, std::vector<std::string>, std::vector<int>> ParamType_Infer;
-
-class InferenceEngineTfliteTest : public testing::TestWithParam<ParamType_Infer> { };
-class InferenceEngineCaffeTest : public testing::TestWithParam<ParamType_Infer> { };
-class InferenceEngineDldtTest : public testing::TestWithParam<ParamType_Infer> { };
-
-TEST_P(InferenceEngineTfliteTest, Inference)
-{
- std::string backend_name;
- int target_devices;
- int test_type;
- int iteration;
- int tensor_type;
- std::vector<std::string> image_paths;
- size_t height;
- size_t width;
- size_t ch;
- std::vector<std::string> input_layers;
- std::vector<std::string> output_layers;
- std::vector<std::string> model_paths;
- std::vector<int> answers;
-
- std::tie(backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers) = GetParam();
-
- if (iteration < 1) {
- iteration = 1;
- }
-
- std::string test_name;
- switch (test_type) {
- case TEST_IMAGE_CLASSIFICATION:
- test_name.append("Image classification");
- break;
- case TEST_OBJECT_DETECTION:
- test_name.append("Object detection");
- break;
- case TEST_FACE_DETECTION:
- test_name.append("Face detection");
- break;
- case TEST_FACIAL_LANDMARK_DETECTION:
- test_name.append("Facial landmark detection");
- break;
- case TEST_POSE_ESTIMATION:
- test_name.append("Pose estimation");
- break;
- }
-
- std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
- inference_engine_config config = {
- .backend_name = backend_name,
- .target_devices = target_devices
- };
-
- InferenceEngineCommon *engine = new InferenceEngineCommon(&config);
- if (engine == nullptr) {
- ASSERT_TRUE(engine);
- return;
- }
-
- int ret = engine->EnableProfiler(true);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- ret = engine->DumpProfileToFile("profile_data_" + backend_name + "_tflite_model.txt");
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- ret = engine->BindBackend(&config);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- inference_engine_capacity capacity;
- ret = engine->GetBackendCapacity(&capacity);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
- ret = engine->SetTargetDevices(target_devices);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
- std::vector <std::string> models;
- int model_type = GetModelInfo(model_paths, models);
- if (model_type == -1) {
- delete engine;
- ASSERT_NE(model_type, -1);
- return;
- }
-
- inference_engine_layer_property input_property;
- std::vector<std::string>::iterator iter;
-
- for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
- inference_engine_tensor_info tensor_info = {
- { 1, ch, height, width },
- (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
- (inference_tensor_data_type_e)tensor_type,
- (size_t)(1 * ch * height * width)
- };
-
- input_property.layer_names.push_back(*iter);
- input_property.tensor_infos.push_back(tensor_info);
- }
-
- ret = engine->SetInputLayerProperty(input_property);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- inference_engine_layer_property output_property;
-
- for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
- output_property.layer_names.push_back(*iter);
- }
-
- ret = engine->SetOutputLayerProperty(output_property);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- ret = engine->Load(models, (inference_model_format_e)model_type);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- std::vector<inference_engine_tensor_buffer> inputs, outputs;
- ret = PrepareTensorBuffers(engine, inputs, outputs);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- // Copy input image tensor data from a given file to input tensor buffer.
- for (int i = 0; i < (int)image_paths.size(); ++i) {
- CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
- }
-
- for (int repeat = 0; repeat < iteration; ++repeat) {
- ret = engine->Run(inputs, outputs);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- }
-
- tensor_t result;
- FillOutputResult(engine, outputs, result);
-
- switch (test_type) {
- case TEST_IMAGE_CLASSIFICATION:
- ret = VerifyImageClassificationResults(result, answers[0]);
- EXPECT_EQ(ret, 1);
- break;
- case TEST_OBJECT_DETECTION:
- // 1072 : fixed height size of dumped image, 1608 : fixed width size of dumped image.
- ret = VerifyObjectDetectionResults(result, answers, 1072, 1608);
- EXPECT_EQ(ret, 1);
- break;
- case TEST_FACE_DETECTION:
- // 1152 : fixed height size of dumped image, 1536 : fixed width size of dumped image.
- ret = VerifyObjectDetectionResults(result, answers, 1152, 1536);
- EXPECT_EQ(ret, 1);
- break;
- case TEST_FACIAL_LANDMARK_DETECTION:
- // TODO.
- break;
- case TEST_POSE_ESTIMATION:
- // 563 : fixed height size of dumped image, 750 : fixed width size of dumped image.
- ret = VerifyPoseEstimationResults(result, answers, 563, 750);
- EXPECT_EQ(ret, 1);
- break;
- }
-
- CleanupTensorBuffers(inputs, outputs);
-
- engine->UnbindBackend();
- models.clear();
-
- delete engine;
-}
-
-TEST_P(InferenceEngineCaffeTest, Inference)
-{
- std::string backend_name;
- int target_devices;
- int test_type;
- int iteration;
- int tensor_type;
- std::vector<std::string> image_paths;
- size_t height;
- size_t width;
- size_t ch;
- std::vector<std::string> input_layers;
- std::vector<std::string> output_layers;
- std::vector<std::string> model_paths;
- std::vector<int> answers;
-
- std::tie(backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers) = GetParam();
-
- if (iteration < 1) {
- iteration = 1;
- }
-
- std::string test_name;
- switch (test_type) {
- case TEST_IMAGE_CLASSIFICATION:
- test_name.append("Image classification");
- break;
- case TEST_OBJECT_DETECTION:
- test_name.append("Object detection");
- break;
- case TEST_FACE_DETECTION:
- test_name.append("Face detection");
- break;
- case TEST_FACIAL_LANDMARK_DETECTION:
- test_name.append("Facial landmark detection");
- break;
- case TEST_POSE_ESTIMATION:
- test_name.append("Pose estimation");
- break;
- }
-
- std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
- inference_engine_config config = {
- .backend_name = backend_name,
- .target_devices = target_devices
- };
-
- InferenceEngineCommon *engine = new InferenceEngineCommon(&config);
- if (engine == nullptr) {
- ASSERT_TRUE(engine);
- return;
- }
-
- int ret = engine->EnableProfiler(true);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- ret = engine->DumpProfileToFile("profile_data_" + backend_name + "_caffe_model.txt");
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- ret = engine->BindBackend(&config);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- inference_engine_capacity capacity;
- ret = engine->GetBackendCapacity(&capacity);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
- ret = engine->SetTargetDevices(target_devices);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
- std::vector <std::string> models;
- int model_type = GetModelInfo(model_paths, models);
- if (model_type == -1) {
- delete engine;
- ASSERT_NE(model_type, -1);
- return;
- }
-
- inference_engine_layer_property input_property;
- std::vector<std::string>::iterator iter;
-
- for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
- inference_engine_tensor_info tensor_info = {
- { 1, ch, height, width },
- (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
- (inference_tensor_data_type_e)tensor_type,
- (size_t)(1 * ch * height * width)
- };
-
- input_property.layer_names.push_back(*iter);
- input_property.tensor_infos.push_back(tensor_info);
- }
-
- ret = engine->SetInputLayerProperty(input_property);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- inference_engine_layer_property output_property;
-
- for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
- output_property.layer_names.push_back(*iter);
- }
-
- ret = engine->SetOutputLayerProperty(output_property);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- ret = engine->Load(models, (inference_model_format_e)model_type);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- std::vector<inference_engine_tensor_buffer> inputs, outputs;
- ret = PrepareTensorBuffers(engine, inputs, outputs);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- // Copy input image tensor data from a given file to input tensor buffer.
- for (int i = 0; i < (int)image_paths.size(); ++i) {
- CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
- }
-
- for (int repeat = 0; repeat < iteration; ++repeat) {
- ret = engine->Run(inputs, outputs);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- }
-
- tensor_t result;
- FillOutputResult(engine, outputs, result);
-
- switch (test_type) {
- case TEST_IMAGE_CLASSIFICATION:
- ret = VerifyImageClassificationResults(result, answers[0]);
- EXPECT_EQ(ret, 1);
- break;
- case TEST_OBJECT_DETECTION:
- // 1024 : fixed height size of dumped image, 636 : fixed width size of dumped image.
- ret = VerifyObjectDetectionResults(result, answers, 636, 1024);
- EXPECT_EQ(ret, 1);
- break;
- case TEST_FACE_DETECTION:
- // 1152 : fixed height size of dumped image, 1536 : fixed width size of dumped image.
- ret = VerifyObjectDetectionResults(result, answers, 1152, 1536);
- EXPECT_EQ(ret, 1);
- break;
- case TEST_FACIAL_LANDMARK_DETECTION:
- // 128 : fixed height size of dumped image, 128 : fixed width size of dumped image.
- ret = VerifyFacialLandmarkDetectionResults(result, answers, 128, 128);
- EXPECT_EQ(ret, 1);
- break;
- case TEST_POSE_ESTIMATION:
- // 563 : fixed height size of dumped image, 750 : fixed width size of dumped image.
- ret = VerifyPoseEstimationResults(result, answers, 563, 750);
- EXPECT_EQ(ret, 1);
- break;
- }
-
- CleanupTensorBuffers(inputs, outputs);
-
- engine->UnbindBackend();
- models.clear();
-
- delete engine;
-}
-
-TEST_P(InferenceEngineDldtTest, Inference)
-{
- std::string backend_name;
- int target_devices;
- int test_type;
- int iteration;
- int tensor_type;
- std::vector<std::string> image_paths;
- size_t height;
- size_t width;
- size_t ch;
- std::vector<std::string> input_layers;
- std::vector<std::string> output_layers;
- std::vector<std::string> model_paths;
- std::vector<int> answers;
-
- std::tie(backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers) = GetParam();
-
- if (iteration < 1) {
- iteration = 1;
- }
-
- std::string test_name;
- switch (test_type) {
- case TEST_IMAGE_CLASSIFICATION:
- test_name.append("Image classification");
- break;
- case TEST_OBJECT_DETECTION:
- test_name.append("Object detection");
- break;
- case TEST_FACE_DETECTION:
- test_name.append("Face detection");
- break;
- case TEST_FACIAL_LANDMARK_DETECTION:
- test_name.append("Facial landmark detection");
- break;
- case TEST_POSE_ESTIMATION:
- test_name.append("Pose estimation");
- break;
- }
-
- std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
- inference_engine_config config = {
- .backend_name = backend_name,
- .target_devices = target_devices
- };
-
- InferenceEngineCommon *engine = new InferenceEngineCommon(&config);
- if (engine == nullptr) {
- ASSERT_TRUE(engine);
- return;
- }
-
- int ret = engine->EnableProfiler(true);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- ret = engine->DumpProfileToFile("profile_data_" + backend_name + "_dldt_model.txt");
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- ret = engine->BindBackend(&config);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- inference_engine_capacity capacity;
- ret = engine->GetBackendCapacity(&capacity);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
- ret = engine->SetTargetDevices(target_devices);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
- std::vector <std::string> models;
- int model_type = GetModelInfo(model_paths, models);
- if (model_type == -1) {
- delete engine;
- ASSERT_NE(model_type, -1);
- return;
- }
-
- inference_engine_layer_property input_property;
- std::vector<std::string>::iterator iter;
-
- for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
- inference_engine_tensor_info tensor_info = {
- { 1, ch, height, width },
- (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
- (inference_tensor_data_type_e)tensor_type,
- (size_t)(1 * ch * height * width)
- };
-
- input_property.layer_names.push_back(*iter);
- input_property.tensor_infos.push_back(tensor_info);
- }
-
- ret = engine->SetInputLayerProperty(input_property);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- inference_engine_layer_property output_property;
-
- for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
- output_property.layer_names.push_back(*iter);
- }
-
- ret = engine->SetOutputLayerProperty(output_property);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- ret = engine->Load(models, (inference_model_format_e)model_type);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- std::vector<inference_engine_tensor_buffer> inputs, outputs;
- ret = PrepareTensorBuffers(engine, inputs, outputs);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- // Copy input image tensor data from a given file to input tensor buffer.
- for (int i = 0; i < (int)image_paths.size(); ++i) {
- CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
- }
-
- for (int repeat = 0; repeat < iteration; ++repeat) {
- ret = engine->Run(inputs, outputs);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- }
-
- tensor_t result;
- FillOutputResult(engine, outputs, result);
-
- switch (test_type) {
- case TEST_IMAGE_CLASSIFICATION:
- ret = VerifyImageClassificationResults(result, answers[0]);
- EXPECT_EQ(ret, 1);
- break;
- case TEST_OBJECT_DETECTION:
- // 1024 : fixed height size of dumped image, 636 : fixed width size of dumped image.
- ret = VerifyObjectDetectionResults(result, answers, 636, 1024);
- EXPECT_EQ(ret, 1);
- break;
- case TEST_FACE_DETECTION:
- // 1152 : fixed height size of dumped image, 1536 : fixed width size of dumped image.
- ret = VerifyObjectDetectionResults(result, answers, 1152, 1536);
- EXPECT_EQ(ret, 1);
- break;
- case TEST_FACIAL_LANDMARK_DETECTION:
- // 128 : fixed height size of dumped image, 128 : fixed width size of dumped image.
- ret = VerifyFacialLandmarkDetectionResults(result, answers, 128, 128);
- EXPECT_EQ(ret, 1);
- break;
- case TEST_POSE_ESTIMATION:
- // 563 : fixed height size of dumped image, 750 : fixed width size of dumped image.
- ret = VerifyPoseEstimationResults(result, answers, 563, 750);
- EXPECT_EQ(ret, 1);
- break;
- }
-
- CleanupTensorBuffers(inputs, outputs);
-
- engine->UnbindBackend();
- models.clear();
-
- delete engine;
-}
-
-INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTfliteTest,
- testing::Values(
- // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
- // mobilenet based image classification test
- // ARMNN.
- ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
- ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
- // quantized mobilenet based image classification test
- ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
- ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
- // object detection test
- ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
- ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
- // face detection test
- ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
- ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
- // pose estimation test
- ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
- { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
- 76, 146, 170, 193, 216, 146, 123, 99, 287, 381, 451, 287, 381, 475 }),
- ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
- { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
- 76, 146, 170, 193, 216, 146, 123, 99, 287, 381, 451, 287, 381, 475 }),
- // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
- // mobilenet based image classification test
- // TFLITE.
- ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
- ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
- // quantized mobilenet based image classification test
- ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
- ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
- // object detection test
- ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
- ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
- // face detection test
- ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
- ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
- // pose estimation test
- ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
- { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
- 76, 146, 170, 193, 216, 146, 123, 99, 287, 381, 451, 287, 381, 475 }),
- ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
- { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
- 76, 146, 170, 193, 216, 146, 123, 99, 287, 381, 451, 287, 381, 475 })
- /* TODO */
- )
-);
-
-INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCaffeTest,
- testing::Values(
- // parameter order : backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers
- // OPENCV
- // squeezenet based image classification test
- ParamType_Infer("opencv", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification_caffe.bin" }, 227, 227, 3, { "data" }, { "prob" }, { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel", "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" }, { 281 }),
- ParamType_Infer("opencv", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification_caffe.bin" }, 227, 227, 3, { "data" }, { "prob" }, { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel", "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" }, { 281 }),
-
- // mobilenet-ssd based object detection test
- ParamType_Infer("opencv", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection_caffe.bin" }, 300, 300, 3, { "data" }, { "detection_out" }, { "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.caffemodel", "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.prototxt" }, { 15, 19, 335, 557 }),
- ParamType_Infer("opencv", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection_caffe.bin" }, 300, 300, 3, { "data" }, { "detection_out" }, { "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.caffemodel", "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.prototxt" }, { 15, 19, 335, 557 }),
-
- // mobilenet-ssd based object detection test
- ParamType_Infer("opencv", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection_caffe.bin" }, 300, 300, 3, { "data" }, { "detection_out" }, { "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.caffemodel", "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.prototxt" }, { 733, 233, 965, 539 }),
- ParamType_Infer("opencv", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection_caffe.bin" }, 300, 300, 3, { "data" }, { "detection_out" }, { "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.caffemodel", "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.prototxt" }, { 733, 233, 965, 539 }),
-
- // tweakcnn based facial landmark detection test
- ParamType_Infer("opencv", INFERENCE_TARGET_CPU, TEST_FACIAL_LANDMARK_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/faciallandmark_detection_caffe.bin" }, 128, 128, 3, { "data" }, { "Sigmoid_fc2" }, { "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.caffemodel", "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.prototxt" },
- { 53, 45, 85, 46, 66, 64, 54, 78, 82, 79}),
- ParamType_Infer("opencv", INFERENCE_TARGET_GPU, TEST_FACIAL_LANDMARK_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/faciallandmark_detection_caffe.bin" }, 128, 128, 3, { "data" }, { "Sigmoid_fc2" }, { "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.caffemodel", "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.prototxt" },
- { 53, 45, 85, 46, 66, 64, 54, 78, 82, 79})
- /* TODO */
- )
-);
-
-INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineDldtTest,
- testing::Values(
- // DLDT
- ParamType_Infer("dldt", INFERENCE_TARGET_CUSTOM, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/dldt_banana_classification.bin" }, 224, 224, 3, { "data" }, { "prob" }, { "/usr/share/capi-media-vision/models/IC/dldt/googlenet-v1.xml", "/usr/share/capi-media-vision/models/IC/dldt/googlenet-v1.bin" }, { 954 })
- )
-);