From 4aab2d3b45c97ad1c55702aef851b7bcd3b68e33 Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Fri, 29 Jan 2021 15:31:59 +0900 Subject: [PATCH] test: Add CLTuner verification code This patch adds CLTuner verification code which generages a tuner file and then requests an inference with the generated file for each tuning mode and image classification model. Change-Id: I82dd0f3de7ba3f1ba8213241eca5f175490c575b Signed-off-by: Inki Dae --- test/src/inference_engine_profiler.cpp | 287 +++++++++++++++++++++++++++++++-- 1 file changed, 277 insertions(+), 10 deletions(-) diff --git a/test/src/inference_engine_profiler.cpp b/test/src/inference_engine_profiler.cpp index c97dfa1..96cb21f 100644 --- a/test/src/inference_engine_profiler.cpp +++ b/test/src/inference_engine_profiler.cpp @@ -36,6 +36,16 @@ typedef std::tuple, std::vector > ParamType_Infer; + +typedef std::tuple, + int, int, int, std::vector, + std::vector, std::vector, + std::vector > + ParamType_CLTuner; + +class InferenceEngineTfliteCLTunerTest : public testing::TestWithParam +{}; class InferenceEngineTfliteTest : public testing::TestWithParam {}; class InferenceEngineCaffeTest : public testing::TestWithParam @@ -161,16 +171,6 @@ TEST_P(InferenceEngineTfliteTest, Inference) ret = engine->GetBackendCapacity(&capacity); EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); - if (capacity.cltuner_supported) { - inference_engine_cltuner cltuner; - cltuner.active = true; - cltuner.update = false; - cltuner.tuning_mode = static_cast(INFERENCE_ENGINE_CLTUNER_READ); - - ret = engine->SetCLTuner(&cltuner); - EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); - } - ret = engine->SetTargetDevices(target_devices); EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); @@ -279,6 +279,216 @@ TEST_P(InferenceEngineTfliteTest, Inference) models.clear(); } +TEST_P(InferenceEngineTfliteCLTunerTest, Inference) +{ + std::string backend_name; + int target_devices; + bool active; + bool update; + inference_engine_cltuner_mode_e tuning_mode; + int test_type; + int iteration; + int tensor_type; + std::vector image_paths; + size_t height; + size_t width; + size_t ch; + std::vector input_layers; + std::vector output_layers; + std::vector model_paths; + std::vector answers; + + std::tie(backend_name, target_devices, active, update, tuning_mode, test_type, + iteration, tensor_type, image_paths, height, width, ch, input_layers, + output_layers, model_paths, answers) = GetParam(); + + if (iteration < 1) { + iteration = 1; + } + + MachineCapacity *Cap = GetMachineCapacity(); + if (Cap == NULL) { + std::cout << "Failed to get machine capacity" << std::endl; + return; + } + + // If current machine doesn't support inference engine then skip this test. + if (Cap->available == false) { + return; + } + + // If current machine doesn't support OpenCL then skip the inference on GPU. + if (target_devices == INFERENCE_TARGET_GPU && Cap->has_gpu == false) { + return; + } + + std::string test_name; + switch (test_type) { + case TEST_IMAGE_CLASSIFICATION: + test_name.append("Image classification"); + break; + case TEST_OBJECT_DETECTION: + test_name.append("Object detection"); + break; + case TEST_FACE_DETECTION: + test_name.append("Face detection"); + break; + case TEST_FACIAL_LANDMARK_DETECTION: + test_name.append("Facial landmark detection"); + break; + case TEST_POSE_ESTIMATION: + test_name.append("Pose estimation"); + break; + case TEST_AIC_HAND_GESTURE_1: + test_name.append("AIC Hand Gesture detection 1"); + break; + case TEST_AIC_HAND_GESTURE_2: + test_name.append("AIC Hand Gesture detection 2"); + break; + } + + std::cout << test_name << " inference test : backend = " << backend_name + << ", target device = " << Target_Formats[target_devices] + << ", CLTuning mode = " << tuning_mode << std::endl; + + int backend_type = -1; + + // If backend name is "one" then change it to "mlapi" + // and set backend_type to INFERENCE_BACKEND_ONE. + if (backend_name.compare("one") == 0) { + backend_name = "mlapi"; + backend_type = INFERENCE_BACKEND_ONE; + } + + inference_engine_config config = { .backend_name = backend_name, + .backend_type = backend_type, + .target_devices = target_devices }; + + auto engine = std::make_unique(); + ASSERT_TRUE(engine); + + int ret = engine->EnableProfiler(true); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + if (backend_type == INFERENCE_BACKEND_ONE) + backend_name = "one"; + + ret = engine->DumpProfileToFile("profile_data_" + backend_name + + "_" + Target_Formats[target_devices] + + "_tflite_model.txt"); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + ret = engine->LoadConfigFile(); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + ret = engine->BindBackend(&config); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + inference_engine_capacity capacity; + ret = engine->GetBackendCapacity(&capacity); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + if (capacity.cltuner_supported) { + inference_engine_cltuner cltuner; + cltuner.active = active; + cltuner.update = update; + cltuner.tuning_mode = tuning_mode; + + ret = engine->SetCLTuner(&cltuner); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + } + + ret = engine->SetTargetDevices(target_devices); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + std::vector models; + int model_type = GetModelInfo(model_paths, models); + ASSERT_NE(model_type, -1); + + std::vector::iterator iter; + + inference_engine_tensor_info tensor_info = { + { 1, ch, height, width }, + INFERENCE_TENSOR_SHAPE_NCHW, + static_cast(tensor_type), + static_cast(1 * ch * height * width) + }; + + inference_engine_layer_property input_property; + + for (auto &input : input_layers) { + input_property.layer_names.push_back(input); + input_property.tensor_infos.push_back(tensor_info); + } + + ret = engine->SetInputLayerProperty(input_property); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + inference_engine_layer_property output_property = { output_layers, {} }; + + ret = engine->SetOutputLayerProperty(output_property); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + ret = engine->Load(models, (inference_model_format_e) model_type); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + std::vector inputs, outputs; + ret = PrepareTensorBuffers(engine.get(), inputs, outputs); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + // Copy input image tensor data from a given file to input tensor buffer. + for (int i = 0; i < (int) image_paths.size(); ++i) { + CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size); + } + + for (int repeat = 0; repeat < iteration; ++repeat) { + ret = engine->Run(inputs, outputs); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + } + + tensor_t result; + FillOutputResult(engine.get(), outputs, result); + + switch (test_type) { + case TEST_IMAGE_CLASSIFICATION: + ret = VerifyImageClassificationResults(result, answers[0]); + EXPECT_EQ(ret, 1); + break; + case TEST_OBJECT_DETECTION: + // 1072 : fixed height size of dumped image, 1608 : fixed width size of dumped image. + ret = VerifyObjectDetectionResults(result, answers, 1072, 1608); + EXPECT_EQ(ret, 1); + break; + case TEST_FACE_DETECTION: + // 1152 : fixed height size of dumped image, 1536 : fixed width size of dumped image. + ret = VerifyObjectDetectionResults(result, answers, 1152, 1536); + EXPECT_EQ(ret, 1); + break; + case TEST_FACIAL_LANDMARK_DETECTION: + // TODO. + break; + case TEST_POSE_ESTIMATION: + // 563 : fixed height size of dumped image, 750 : fixed width size of dumped image. + ret = VerifyPoseEstimationResults(result, answers, 563, 750); + EXPECT_EQ(ret, 1); + break; + case TEST_AIC_HAND_GESTURE_1: + ret = VerifyAICHandGesture1Results(outputs); + EXPECT_EQ(ret, 1); + break; + case TEST_AIC_HAND_GESTURE_2: + ret = VerifyAICHandGesture2Results(outputs, answers); + EXPECT_EQ(ret, 1); + break; + } + + CleanupTensorBuffers(inputs, outputs); + + engine->UnbindBackend(); + models.clear(); +} + + TEST_P(InferenceEngineCaffeTest, Inference) { std::string backend_name; @@ -1248,6 +1458,63 @@ INSTANTIATE_TEST_CASE_P( )); INSTANTIATE_TEST_CASE_P( + Opensource, InferenceEngineTfliteCLTunerTest, + testing::Values( + // parameter order : backend name, target device, CLTuner active flag, CLTuner update flag, CLTuner tuning mode, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result + // mobilenet based image classification test + // ARMNN. + ParamType_CLTuner( + "armnn", INFERENCE_TARGET_GPU, true, true, INFERENCE_ENGINE_CLTUNER_RAPID, + TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, + INFERENCE_TENSOR_DATA_TYPE_FLOAT32, + { "/opt/usr/images/image_classification.bin" }, 224, + 224, 3, { "input_2" }, { "dense_3/Softmax" }, + { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, + { 3 }), + ParamType_CLTuner( + "armnn", INFERENCE_TARGET_GPU, true, false, INFERENCE_ENGINE_CLTUNER_READ, + TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, + INFERENCE_TENSOR_DATA_TYPE_FLOAT32, + { "/opt/usr/images/image_classification.bin" }, 224, + 224, 3, { "input_2" }, { "dense_3/Softmax" }, + { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, + { 3 }), + ParamType_CLTuner( + "armnn", INFERENCE_TARGET_GPU, true, true, INFERENCE_ENGINE_CLTUNER_NORMAL, + TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, + INFERENCE_TENSOR_DATA_TYPE_FLOAT32, + { "/opt/usr/images/image_classification.bin" }, 224, + 224, 3, { "input_2" }, { "dense_3/Softmax" }, + { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, + { 3 }), + ParamType_CLTuner( + "armnn", INFERENCE_TARGET_GPU, true, false, INFERENCE_ENGINE_CLTUNER_READ, + TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, + INFERENCE_TENSOR_DATA_TYPE_FLOAT32, + { "/opt/usr/images/image_classification.bin" }, 224, + 224, 3, { "input_2" }, { "dense_3/Softmax" }, + { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, + { 3 }), + ParamType_CLTuner( + "armnn", INFERENCE_TARGET_GPU, true, true, INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE, + TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, + INFERENCE_TENSOR_DATA_TYPE_FLOAT32, + { "/opt/usr/images/image_classification.bin" }, 224, + 224, 3, { "input_2" }, { "dense_3/Softmax" }, + { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, + { 3 }), + ParamType_CLTuner( + "armnn", INFERENCE_TARGET_GPU, true, false, INFERENCE_ENGINE_CLTUNER_READ, + TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, + INFERENCE_TENSOR_DATA_TYPE_FLOAT32, + { "/opt/usr/images/image_classification.bin" }, 224, + 224, 3, { "input_2" }, { "dense_3/Softmax" }, + { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, + { 3 }) + /* TODO */ + )); + +INSTANTIATE_TEST_CASE_P( Opensource, InferenceEngineCaffeTest, testing::Values( // parameter order : backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers -- 2.7.4