Add DLDT inference result test
authorKwanghoon Son <k.son@samsung.com>
Fri, 10 Apr 2020 03:27:38 +0000 (12:27 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 14 Apr 2020 00:42:53 +0000 (09:42 +0900)
add gtest

Change-Id: Ie3a25292f0c109277c4ea1c95865a7162d5126ac
Signed-off-by: Kwanghoon Son <k.son@samsung.com>
test/src/inference_engine_test.cpp

index d93c90d..1352c62 100644 (file)
@@ -37,6 +37,7 @@ class InferenceEngineCommonTest : public testing::TestWithParam<ParamType> { };
 class InferenceEngineCommonTest_2 : public testing::TestWithParam<ParamType_Load> { };
 class InferenceEngineTfliteTest : public testing::TestWithParam<ParamType_Infer> { };
 class InferenceEngineCaffeTest : public testing::TestWithParam<ParamType_Infer> { };
+class InferenceEngineDldtTest : public testing::TestWithParam<ParamType_Infer> { };
 
 std::map<std::string, int> Model_Formats = {
        { "caffemodel", INFERENCE_MODEL_CAFFE },
@@ -864,6 +865,192 @@ TEST_P(InferenceEngineCaffeTest, Inference)
        delete engine;
 }
 
+TEST_P(InferenceEngineDldtTest, Inference)
+{
+       std::string backend_name;
+       int target_devices;
+       int test_type;
+       int iteration;
+       int tensor_type;
+       std::vector<std::string> image_paths;
+       size_t height;
+       size_t width;
+       size_t ch;
+       std::vector<std::string> input_layers;
+       std::vector<std::string> output_layers;
+       std::vector<std::string> model_paths;
+       std::vector<int> answers;
+
+       std::tie(backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers) = GetParam();
+
+       if (iteration < 1) {
+               iteration = 1;
+       }
+
+       std::string test_name;
+       switch (test_type) {
+       case TEST_IMAGE_CLASSIFICATION:
+               test_name.append("Image classification");
+               break;
+       case TEST_OBJECT_DETECTION:
+               test_name.append("Object detection");
+               break;
+       case TEST_FACE_DETECTION:
+               test_name.append("Face detection");
+               break;
+       case TEST_FACIAL_LANDMARK_DETECTION:
+               test_name.append("Facial landmark detection");
+               break;
+       case TEST_POSE_ESTIMATION:
+               test_name.append("Pose estimation");
+               break;
+       }
+
+       std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
+       inference_engine_config config = {
+               .backend_name = backend_name,
+               .target_devices = target_devices
+       };
+
+       InferenceEngineCommon *engine = new InferenceEngineCommon(&config);
+       if (engine == nullptr) {
+               ASSERT_TRUE(engine);
+               return;
+       }
+
+       int ret = engine->EnableProfiler(true);
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               delete engine;
+               ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+               return;
+       }
+
+       ret = engine->DumpProfileToFile("profile_data_" + backend_name + "_caffe_model.txt");
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               delete engine;
+               ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+               return;
+       }
+
+       ret = engine->BindBackend(&config);
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               delete engine;
+               ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+               return;
+       }
+
+       inference_engine_capacity capacity;
+       ret = engine->GetBackendCapacity(&capacity);
+       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       ret = engine->SetTargetDevices(target_devices);
+       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       std::vector <std::string> models;
+       int model_type = GetModelInfo(model_paths, models);
+       if (model_type == -1) {
+               delete engine;
+               ASSERT_NE(model_type, -1);
+               return;
+       }
+
+       inference_engine_layer_property input_property;
+       std::vector<std::string>::iterator iter;
+
+       for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
+               inference_engine_tensor_info tensor_info = {
+                       { 1, ch, height, width },
+                       (inference_tensor_shape_type_e)TENSOR_SHAPE_NCHW,
+                       (inference_tensor_data_type_e)tensor_type,
+                       (size_t)(1 * ch * height * width)
+               };
+
+               input_property.layer_names.push_back(*iter);
+               input_property.tensor_infos.push_back(tensor_info);
+    }
+
+       ret = engine->SetInputLayerProperty(input_property);
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               delete engine;
+               ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+               return;
+       }
+
+       inference_engine_layer_property output_property;
+
+       for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
+               output_property.layer_names.push_back(*iter);
+       }
+
+       ret = engine->SetOutputLayerProperty(output_property);
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               delete engine;
+               ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+               return;
+       }
+
+       ret = engine->Load(models, (inference_model_format_e)model_type);
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               delete engine;
+               ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+               return;
+       }
+
+       std::vector<inference_engine_tensor_buffer> inputs, outputs;
+       ret = PrepareTensorBuffers(engine, inputs, outputs);
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               delete engine;
+               ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+               return;
+       }
+
+       // Copy input image tensor data from a given file to input tensor buffer.
+       for (int i = 0; i < (int)image_paths.size(); ++i) {
+               CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
+       }
+
+       for (int repeat = 0; repeat < iteration; ++repeat) {
+               ret = engine->Run(inputs, outputs);
+               EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+       }
+
+       tensor_t result;
+       FillOutputResult(engine, outputs, result);
+
+       switch (test_type) {
+       case TEST_IMAGE_CLASSIFICATION:
+               ret = VerifyImageClassificationResults(result, answers[0]);
+               EXPECT_EQ(ret, 1);
+               break;
+       case TEST_OBJECT_DETECTION:
+               // 1024 : fixed height size of dumped image, 636 : fixed width size of dumped image.
+               ret = VerifyObjectDetectionResults(result, answers, 636, 1024);
+               EXPECT_EQ(ret, 1);
+               break;
+       case TEST_FACE_DETECTION:
+               // 1152 : fixed height size of dumped image, 1536 : fixed width size of dumped image.
+               ret = VerifyObjectDetectionResults(result, answers, 1152, 1536);
+               EXPECT_EQ(ret, 1);
+               break;
+       case TEST_FACIAL_LANDMARK_DETECTION:
+               // 128 : fixed height size of dumped image, 128 : fixed width size of dumped image.
+               ret = VerifyFacialLandmarkDetectionResults(result, answers, 128, 128);
+               EXPECT_EQ(ret, 1);
+               break;
+       case TEST_POSE_ESTIMATION:
+               // 563 : fixed height size of dumped image, 750 : fixed width size of dumped image.
+               ret = VerifyPoseEstimationResults(result, answers, 563, 750);
+               EXPECT_EQ(ret, 1);
+               break;
+       }
+
+       CleanupTensorBuffers(inputs, outputs);
+
+       engine->UnbindBackend();
+       models.clear();
+
+       delete engine;
+}
 INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest,
                testing::Values(
                        // parameter order : backend name, target device
@@ -973,4 +1160,11 @@ INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCaffeTest,
                                                        { 53, 45, 85, 46, 66, 64, 54, 78, 82, 79})
                        /* TODO */
                )
+);
+
+INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineDldtTest,
+               testing::Values(
+                       // DLDT
+                       ParamType_Infer("dldt", INFERENCE_TARGET_CUSTOM, TEST_FACE_DETECTION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/dldt_banana_classification.bin" }, 224, 224, 3, { "data" }, { "prob" }, { "/usr/share/capi-media-vision/models/IC/dldt/googlenet-v1.xml", "/usr/share/capi-media-vision/models/IC/dldt/googlenet-v1.bin" }, { 954 })
+               )
 );
\ No newline at end of file