test: fix profiler 07/316607/5
authorInki Dae <inki.dae@samsung.com>
Mon, 16 Dec 2024 01:29:15 +0000 (10:29 +0900)
committerInki Dae <inki.dae@samsung.com>
Mon, 23 Dec 2024 00:11:24 +0000 (09:11 +0900)
Fix profiler by dropping invalid code and correcting model path.

Change-Id: I30a6e0796320e165d2b9932168bbb24476c9364d
Signed-off-by: Inki Dae <inki.dae@samsung.com>
test/src/inference_engine_profiler.cpp

index d111725b4bc1887629c54ebfef4f20f0bbb6b246..ea6a899fa44dfb02f33afd14eae4ec642d150c8a 100644 (file)
                        iter, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,                       \
                        { "/opt/usr/images/image_classification.bin" }, 224,\
                          224, 3, { "input_2" }, { "dense_3/Softmax" },         \
-                       { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },     \
+                       { "/opt/usr/home/owner/media/Others/mv_test/res/inference/models/ic_tflite_model.tflite" },     \
                        { 3 }
 
-#define PARAM_TYPE_TFLITE_IC_Q_INFER(backend, device, iter)            \
-                       backend, device, TEST_MODEL_IMAGE_CLASSIFICATION,       \
-                       iter, INFERENCE_TENSOR_DATA_TYPE_UINT8,                         \
-                       { "/opt/usr/images/image_classification_q.bin" },       \
-                       224, 224, 3, { "input" },                                                       \
-                       { "MobilenetV1/Predictions/Reshape_1" },                        \
-                       { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" }, \
+#define PARAM_TYPE_TFLITE_IC_Q_INFER(backend, device, iter)            \
+                       backend, device, TEST_MODEL_IMAGE_CLASSIFICATION,       \
+                       iter, INFERENCE_TENSOR_DATA_TYPE_UINT8,                         \
+                       { "/opt/usr/images/image_classification_q.bin" },       \
+                       224, 224, 3, { "input" },                                                       \
+                       { "MobilenetV1/Predictions/Reshape_1" },                        \
+                       { "/opt/usr/home/owner/media/Others/mv_test/open_model_zoo/models/IC/tflite/quant_mobilenet_v1_224x224.tflite" }, \
                        { 955 }
 
 #define PARAM_TYPE_TFLITE_OD_INFER(backend, device, iter)      \
@@ -62,7 +62,7 @@
                          "TFLite_Detection_PostProcess:1",                             \
                          "TFLite_Detection_PostProcess:2",                             \
                          "TFLite_Detection_PostProcess:3" },                   \
-                       { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },     \
+                       { "/opt/usr/home/owner/media/Others/mv_test/res/inference/models/od_tflite_model.tflite" },     \
                        { 451, 474, 714, 969 }
 
 #define PARAM_TYPE_TFLITE_FD_INFER(backend, device, iter)      \
@@ -75,7 +75,7 @@
                          "TFLite_Detection_PostProcess:1",                             \
                          "TFLite_Detection_PostProcess:2",                             \
                          "TFLite_Detection_PostProcess:3" },                   \
-                       { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },    \
+                       { "/opt/usr/home/owner/media/Others/mv_test/res/inference/models/fd_tflite_model1.tflite" },    \
                        { 727, 225, 960, 555 }
 
 #define PARAM_TYPE_TFLITE_PE_INFER(backend, device, iter)      \
                        { "/opt/usr/images/pose_estimation.bin" },              \
                        192, 192, 3, { "image" },                                               \
                        { "Convolutional_Pose_Machine/stage_5_out" },   \
-                       { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },    \
+                       { "/opt/usr/home/owner/media/Others/mv_test/open_model_zoo/models/pose-estimation/tflite/pld_cpm_192x192.tflite" },     \
                        { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,     \
                          351, 382, 382, 382, 76,  146, 170, 193, 216, 146,     \
                          123, 99,  287, 381, 451, 287, 381, 475 }
 
-#define PARAM_TYPE_TFLITE_AICHG_1_INFER(backend, device, iter) \
-                       backend, device, TEST_MODEL_AIC_HAND_GESTURE_1,                 \
-                       iter, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,                               \
-                       { "/opt/usr/images/hand.bin" }, 224, 224, 3,                    \
-                       { "input" },                                                                                    \
-                       { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },   \
-                       { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, \
-                       { 0 }
-
-#define PARAM_TYPE_TFLITE_AICHG_2_INFER(backend, device, iter) \
-                       backend, device, TEST_MODEL_AIC_HAND_GESTURE_2,                 \
-                       iter, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,                               \
-                       { "/opt/usr/images/hand.bin" }, 56, 56, 21,                             \
-                       { "input" },                                                                                    \
-                       { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },  \
-                       { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" }, \
-                       { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,                               \
-                         78, 36, 82, 42, 82, 44, 83, 45, 35, 37,                               \
-                         61, 36, 59, 36, 52, 39, 35, 32, 40, 34,                               \
-                         62, 39, 70, 40, 58, 41, 34, 42, 34, 41,                               \
-                         38, 38, 12 }
-
-
-// Macros for tflite model based cltuner test cases.
-// parameter order : CLTuner active flag, CLTuner update flag, CLTuner tuning level,
-//                   backend name, target device, test iteration count
-#define PARAM_TYPE_TFLITE_IC_CLTUNER(active, update, mode, backend, device, iter)      \
-                       active, update, mode,                                                                                                   \
-                       PARAM_TYPE_TFLITE_IC_INFER(backend, device, iter)
-
-#define PARAM_TYPE_TFLITE_IC_Q_CLTUNER(active, update, mode, backend, device, iter)    \
-                       active, update, mode,                                                                                                   \
-                       PARAM_TYPE_TFLITE_IC_Q_INFER(backend, device, iter)
-
-#define PARAM_TYPE_TFLITE_OD_CLTUNER(active, update, mode, backend, device, iter)      \
-                       active, update, mode,                                                                                                   \
-                       PARAM_TYPE_TFLITE_OD_INFER(backend, device, iter)
-
-#define PARAM_TYPE_TFLITE_FD_CLTUNER(active, update, mode, backend, device, iter)      \
-                       active, update, mode,                                                                                                   \
-                       PARAM_TYPE_TFLITE_FD_INFER(backend, device, iter)
-
-#define PARAM_TYPE_TFLITE_PE_CLTUNER(active, update, mode, backend, device, iter)      \
-                       active, update, mode,                                                                                                   \
-                       PARAM_TYPE_TFLITE_PE_INFER(backend, device, iter)
-
 using namespace testing;
 
 typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>,
@@ -143,16 +97,6 @@ typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>,
                                   std::vector<int> >
                ParamType_Infer;
 
-
-typedef std::tuple<bool, bool, inference_engine_cltuner_mode_e, std::string, int,
-                                  int, int, int, std::vector<std::string>,
-                                  int, int, int, std::vector<std::string>,
-                                  std::vector<std::string>, std::vector<std::string>,
-                                  std::vector<int> >
-               ParamType_CLTuner;
-
-class InferenceEngineTfliteCLTunerTest : public testing::TestWithParam<ParamType_CLTuner>
-{};
 class InferenceEngineTfliteTest : public testing::TestWithParam<ParamType_Infer>
 {};
 class InferenceEngineCaffeTest : public testing::TestWithParam<ParamType_Infer>
@@ -348,592 +292,9 @@ TEST_P(InferenceEngineTfliteTest, Inference)
        models.clear();
 }
 
-TEST_P(InferenceEngineTfliteCLTunerTest, Inference)
-{
-       bool active;
-       bool update;
-       inference_engine_cltuner_mode_e tuning_mode;
-       std::string backend_name;
-       int target_devices;
-       int test_type;
-       int iteration;
-       int tensor_type;
-       std::vector<std::string> image_paths;
-       size_t height;
-       size_t width;
-       size_t ch;
-       std::vector<std::string> input_layers;
-       std::vector<std::string> output_layers;
-       std::vector<std::string> model_paths;
-       std::vector<int> answers;
-
-       std::tie(active, update, tuning_mode, backend_name, target_devices, test_type,
-                        iteration, tensor_type, image_paths, height, width, ch, input_layers,
-                        output_layers, model_paths, answers) = GetParam();
-
-       if (iteration < 1) {
-               iteration = 1;
-       }
-
-       MachineCapacity *Cap = GetMachineCapacity();
-       if (Cap == NULL) {
-               std::cout << "Failed to get machine capacity" << std::endl;
-               return;
-       }
-
-       // If current machine doesn't support inference engine then skip this test.
-       if (Cap->available == false) {
-               return;
-       }
-
-       // If current machine doesn't support OpenCL then skip the inference on GPU.
-       if (target_devices == INFERENCE_TARGET_GPU && Cap->has_gpu == false) {
-               return;
-       }
-
-       std::string test_name = GetModelString(test_type);
-       ASSERT_NE(test_name, "");
-
-       std::cout << test_name << " inference test : backend = " << backend_name
-                         << ", target device = " << Target_Formats[target_devices]
-                         << ", CLTuning mode = " << tuning_mode << std::endl;
-
-       int backend_type = -1;
-
-       // If backend name is "one" then change it to "mlapi"
-       // and set backend_type to INFERENCE_BACKEND_ONE.
-       if (backend_name.compare("one") == 0) {
-               backend_name = "mlapi";
-               backend_type = INFERENCE_BACKEND_ONE;
-       }
-
-       inference_engine_config config = { .backend_name = backend_name,
-                                                                          .backend_type = backend_type,
-                                                                          .target_devices = target_devices };
-
-       auto engine = std::make_unique<InferenceEngineCommon>();
-       ASSERT_TRUE(engine);
-
-       int ret = engine->EnableProfiler(true);
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       if (backend_type == INFERENCE_BACKEND_ONE)
-               backend_name = "one";
-
-       ret = engine->DumpProfileToFile("profile_data_" + backend_name +
-                                                                       "_" + Target_Formats[target_devices] +
-                                                                       "_tflite_model.txt");
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       ret = engine->LoadConfigFile();
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       ret = engine->BindBackend(&config);
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       inference_engine_capacity capacity;
-       ret = engine->GetBackendCapacity(&capacity);
-       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       if (capacity.cltuner_supported) {
-               inference_engine_cltuner cltuner;
-                       cltuner.active = active;
-                       cltuner.update = update;
-                       cltuner.tuning_mode = tuning_mode;
-
-               ret = engine->SetCLTuner(&cltuner);
-               EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-       }
-
-       ret = engine->SetTargetDevices(target_devices);
-       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       std::vector<std::string> models;
-       int model_type = GetModelInfo(model_paths, models);
-       ASSERT_NE(model_type, -1);
-
-       inference_engine_layer_property input_property;
-
-       inference_engine_tensor_info input_tensor_info = {
-               { 1, ch, height, width },
-               INFERENCE_TENSOR_SHAPE_NCHW,
-               static_cast<inference_tensor_data_type_e>(tensor_type),
-               static_cast<size_t>(1 * ch * height * width),
-               0.0f, 0, INFERENCE_TENSOR_QUANTIZATION_NONE
-       };
-
-       for (auto& input : input_layers) {
-               input_property.layers.insert(std::make_pair(input, input_tensor_info));
-       }
-
-       ret = engine->SetInputLayerProperty(input_property);
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       inference_engine_layer_property output_property;
-
-       inference_engine_tensor_info output_tensor_info = {
-               std::vector<size_t>{1},
-               INFERENCE_TENSOR_SHAPE_NCHW,
-               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-               1,
-               0.0f,
-               0,
-               INFERENCE_TENSOR_QUANTIZATION_NONE
-       };
-
-       for (auto& layer : output_layers) {
-               output_property.layers.insert(std::make_pair(layer, output_tensor_info));
-       }
-
-       ret = engine->SetOutputLayerProperty(output_property);
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       ret = engine->Load(models, (inference_model_format_e) model_type);
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       IETensorBuffer inputs, outputs;
-       ret = PrepareTensorBuffers(engine.get(), inputs, outputs);
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       // Copy input image tensor data from a given file to input tensor buffer.
-       ASSERT_EQ(image_paths.size(), inputs.size());
-       int imageIndex = 0;
-       for (auto& input : inputs) {
-               CopyFileToMemory(image_paths[imageIndex++].c_str(), input.second, input.second.size);
-       }
-
-       for (int repeat = 0; repeat < iteration; ++repeat) {
-               ret = engine->Run(inputs, outputs);
-               EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-       }
-
-       tensor_t result;
-       FillOutputResult(engine.get(), outputs, result);
-
-       switch (test_type) {
-       case TEST_MODEL_IMAGE_CLASSIFICATION:
-               ret = VerifyImageClassificationResults(result, answers[0]);
-               EXPECT_EQ(ret, 1);
-               break;
-       case TEST_MODEL_OBJECT_DETECTION:
-               // 1072 : fixed height size of dumped image, 1608 : fixed width size of dumped image.
-               ret = VerifyObjectDetectionResults(result, answers, 1072, 1608);
-               EXPECT_EQ(ret, 1);
-               break;
-       case TEST_MODEL_FACE_DETECTION:
-               // 1152 : fixed height size of dumped image, 1536 : fixed width size of dumped image.
-               ret = VerifyObjectDetectionResults(result, answers, 1152, 1536);
-               EXPECT_EQ(ret, 1);
-               break;
-       case TEST_MODEL_FACIAL_LANDMARK_DETECTION:
-               // TODO.
-               break;
-       case TEST_MODEL_POSE_ESTIMATION:
-               // 563 : fixed height size of dumped image, 750 : fixed width size of dumped image.
-               ret = VerifyPoseEstimationResults(result, answers, 563, 750);
-               EXPECT_EQ(ret, 1);
-               break;
-       case TEST_MODEL_AIC_HAND_GESTURE_1:
-               ret = VerifyAICHandGesture1Results(outputs);
-               EXPECT_EQ(ret, 1);
-               break;
-       case TEST_MODEL_AIC_HAND_GESTURE_2:
-               ret = VerifyAICHandGesture2Results(outputs, answers);
-               EXPECT_EQ(ret, 1);
-               break;
-       }
-
-       CleanupTensorBuffers(inputs, outputs);
-
-       engine->UnbindBackend();
-       models.clear();
-}
-
-
-TEST_P(InferenceEngineCaffeTest, Inference)
-{
-       std::string backend_name;
-       int target_devices;
-       int test_type;
-       int iteration;
-       int tensor_type;
-       std::vector<std::string> image_paths;
-       size_t height;
-       size_t width;
-       size_t ch;
-       std::vector<std::string> input_layers;
-       std::vector<std::string> output_layers;
-       std::vector<std::string> model_paths;
-       std::vector<int> answers;
-
-       std::tie(backend_name, target_devices, test_type, iteration, tensor_type,
-                        image_paths, height, width, ch, input_layers, output_layers,
-                        model_paths, answers) = GetParam();
-
-       if (iteration < 1) {
-               iteration = 1;
-       }
-
-       MachineCapacity *Cap = GetMachineCapacity();
-       if (Cap == NULL) {
-               std::cout << "Failed to get machine capacity" << std::endl;
-               return;
-       }
-
-       // If current machine doesn't support inference engine then skip this test.
-       if (Cap->available == false) {
-               return;
-       }
-
-       // If current machine doesn't support OpenCL then skip the inference on GPU.
-       if (target_devices == INFERENCE_TARGET_GPU && Cap->has_gpu == false) {
-               return;
-       }
-
-       std::string test_name = GetModelString(test_type);
-       ASSERT_NE(test_name, "");
-
-       std::cout << test_name << " inference test : backend = " << backend_name
-                         << ", target device = " << Target_Formats[target_devices]
-                         << std::endl;
-       inference_engine_config config = { .backend_name = backend_name,
-                                                                          .backend_type = -1,
-                                                                          .target_devices = target_devices };
-
-       auto engine = std::make_unique<InferenceEngineCommon>();
-       ASSERT_TRUE(engine);
-
-       int ret = engine->EnableProfiler(true);
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       ret = engine->DumpProfileToFile("profile_data_" + backend_name +
-                                                                       "_caffe_model.txt");
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       ret = engine->LoadConfigFile();
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       ret = engine->BindBackend(&config);
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       inference_engine_capacity capacity;
-       ret = engine->GetBackendCapacity(&capacity);
-       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       ret = engine->SetTargetDevices(target_devices);
-       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       std::vector<std::string> models;
-       int model_type = GetModelInfo(model_paths, models);
-       ASSERT_NE(model_type, -1);
-
-       inference_engine_layer_property input_property;
-
-       inference_engine_tensor_info input_tensor_info = {
-               { 1, ch, height, width },
-               INFERENCE_TENSOR_SHAPE_NCHW,
-               (inference_tensor_data_type_e) tensor_type,
-               (size_t)(1 * ch * height * width),
-               0.0f, 0, INFERENCE_TENSOR_QUANTIZATION_NONE
-       };
-
-       for (auto& layer : input_layers) {
-               input_property.layers.insert(std::make_pair(layer, input_tensor_info));
-       }
-
-       ret = engine->SetInputLayerProperty(input_property);
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       inference_engine_layer_property output_property;
-
-       inference_engine_tensor_info output_tensor_info = {
-               std::vector<size_t>{1},
-               INFERENCE_TENSOR_SHAPE_NCHW,
-               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-               1,
-               0.0f,
-               0,
-               INFERENCE_TENSOR_QUANTIZATION_NONE
-       };
-
-       for (auto& layer : output_layers) {
-               output_property.layers.insert(std::make_pair(layer, output_tensor_info));
-       }
-
-       ret = engine->SetOutputLayerProperty(output_property);
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       ret = engine->Load(models, (inference_model_format_e) model_type);
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       IETensorBuffer inputs, outputs;
-       ret = PrepareTensorBuffers(engine.get(), inputs, outputs);
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       // Copy input image tensor data from a given file to input tensor buffer.
-       ASSERT_EQ(image_paths.size(), inputs.size());
-       int imageIndex = 0;
-       for (auto& input : inputs) {
-               CopyFileToMemory(image_paths[imageIndex++].c_str(), input.second, input.second.size);
-       }
-
-       for (int repeat = 0; repeat < iteration; ++repeat) {
-               ret = engine->Run(inputs, outputs);
-               EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-       }
-
-       tensor_t result;
-       FillOutputResult(engine.get(), outputs, result);
-
-       switch (test_type) {
-       case TEST_MODEL_IMAGE_CLASSIFICATION:
-               ret = VerifyImageClassificationResults(result, answers[0]);
-               EXPECT_EQ(ret, 1);
-               break;
-       case TEST_MODEL_OBJECT_DETECTION:
-               // 1024 : fixed height size of dumped image, 636 : fixed width size of dumped image.
-               ret = VerifyObjectDetectionResults(result, answers, 636, 1024);
-               EXPECT_EQ(ret, 1);
-               break;
-       case TEST_MODEL_FACE_DETECTION:
-               // 1152 : fixed height size of dumped image, 1536 : fixed width size of dumped image.
-               ret = VerifyObjectDetectionResults(result, answers, 1152, 1536);
-               EXPECT_EQ(ret, 1);
-               break;
-       case TEST_MODEL_FACIAL_LANDMARK_DETECTION:
-               // 128 : fixed height size of dumped image, 128 : fixed width size of dumped image.
-               ret = VerifyFacialLandmarkDetectionResults(result, answers, 128, 128);
-               EXPECT_EQ(ret, 1);
-               break;
-       case TEST_MODEL_POSE_ESTIMATION:
-               // 563 : fixed height size of dumped image, 750 : fixed width size of dumped image.
-               ret = VerifyPoseEstimationResults(result, answers, 563, 750);
-               EXPECT_EQ(ret, 1);
-               break;
-       }
-
-       CleanupTensorBuffers(inputs, outputs);
-
-       engine->UnbindBackend();
-       models.clear();
-}
-
-TEST_P(InferenceEngineDldtTest, Inference)
-{
-       std::string backend_name;
-       int target_devices;
-       int test_type;
-       int iteration;
-       int tensor_type;
-       std::vector<std::string> image_paths;
-       size_t height;
-       size_t width;
-       size_t ch;
-       std::vector<std::string> input_layers;
-       std::vector<std::string> output_layers;
-       std::vector<std::string> model_paths;
-       std::vector<int> answers;
-
-       std::tie(backend_name, target_devices, test_type, iteration, tensor_type,
-                        image_paths, height, width, ch, input_layers, output_layers,
-                        model_paths, answers) = GetParam();
-
-       if (iteration < 1) {
-               iteration = 1;
-       }
-
-       MachineCapacity *Cap = GetMachineCapacity();
-       if (Cap == NULL) {
-               std::cout << "Failed to get machine capacity" << std::endl;
-               return;
-       }
-
-       // If current machine doesn't support inference engine then skip this test.
-       if (Cap->available == false) {
-               return;
-       }
-
-       // If current machine doesn't support OpenCL then skip the inference on GPU.
-       if (target_devices == INFERENCE_TARGET_GPU && Cap->has_gpu == false) {
-               return;
-       }
-
-       std::string test_name = GetModelString(test_type);
-       ASSERT_NE(test_name, "");
-
-       std::cout << test_name << " inference test : backend = " << backend_name
-                         << ", target device = " << Target_Formats[target_devices]
-                         << std::endl;
-       inference_engine_config config = { .backend_name = backend_name,
-                                                                          .backend_type = -1,
-                                                                          .target_devices = target_devices };
-
-       auto engine = std::make_unique<InferenceEngineCommon>();
-       ASSERT_TRUE(engine);
-
-       int ret = engine->EnableProfiler(true);
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       ret = engine->DumpProfileToFile("profile_data_" + backend_name +
-                                                                       "_dldt_model.txt");
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       ret = engine->LoadConfigFile();
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       ret = engine->BindBackend(&config);
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       inference_engine_capacity capacity;
-       ret = engine->GetBackendCapacity(&capacity);
-       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       ret = engine->SetTargetDevices(target_devices);
-       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       std::vector<std::string> models;
-       int model_type = GetModelInfo(model_paths, models);
-       ASSERT_NE(model_type, -1);
-
-       inference_engine_layer_property input_property;
-
-       inference_engine_tensor_info input_tensor_info = {
-               { 1, ch, height, width },
-               INFERENCE_TENSOR_SHAPE_NCHW,
-               (inference_tensor_data_type_e) tensor_type,
-               (size_t)(1 * ch * height * width),
-               0.0f, 0, INFERENCE_TENSOR_QUANTIZATION_NONE
-       };
-
-       for (auto& layer : input_layers) {
-               input_property.layers.insert(std::make_pair(layer, input_tensor_info));
-       }
-
-       ret = engine->SetInputLayerProperty(input_property);
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       inference_engine_layer_property output_property;
-
-       inference_engine_tensor_info output_tensor_info = {
-               { 1, ch, height, width },
-               INFERENCE_TENSOR_SHAPE_NCHW,
-               (inference_tensor_data_type_e) tensor_type,
-               (size_t)(1 * ch * height * width),
-               0.0f, 0, INFERENCE_TENSOR_QUANTIZATION_NONE
-       };
-
-       for (auto& layer : output_layers) {
-               output_property.layers.insert(std::make_pair(layer, output_tensor_info));
-       }
-
-       ret = engine->SetOutputLayerProperty(output_property);
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       ret = engine->Load(models, (inference_model_format_e) model_type);
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       IETensorBuffer inputs, outputs;
-       ret = PrepareTensorBuffers(engine.get(), inputs, outputs);
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       // Copy input image tensor data from a given file to input tensor buffer.
-       ASSERT_EQ(image_paths.size(), inputs.size());
-       int imageIndex = 0;
-       for (auto& input : inputs) {
-               CopyFileToMemory(image_paths[imageIndex++].c_str(), input.second, input.second.size);
-       }
-
-       for (int repeat = 0; repeat < iteration; ++repeat) {
-               ret = engine->Run(inputs, outputs);
-               EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-       }
-
-       tensor_t result;
-       FillOutputResult(engine.get(), outputs, result);
-
-       switch (test_type) {
-       case TEST_MODEL_IMAGE_CLASSIFICATION:
-               ret = VerifyImageClassificationResults(result, answers[0]);
-               EXPECT_EQ(ret, 1);
-               break;
-       case TEST_MODEL_OBJECT_DETECTION:
-               // 1024 : fixed height size of dumped image, 636 : fixed width size of dumped image.
-               ret = VerifyObjectDetectionResults(result, answers, 636, 1024);
-               EXPECT_EQ(ret, 1);
-               break;
-       case TEST_MODEL_FACE_DETECTION:
-               // 1152 : fixed height size of dumped image, 1536 : fixed width size of dumped image.
-               ret = VerifyObjectDetectionResults(result, answers, 1152, 1536);
-               EXPECT_EQ(ret, 1);
-               break;
-       case TEST_MODEL_FACIAL_LANDMARK_DETECTION:
-               // 128 : fixed height size of dumped image, 128 : fixed width size of dumped image.
-               ret = VerifyFacialLandmarkDetectionResults(result, answers, 128, 128);
-               EXPECT_EQ(ret, 1);
-               break;
-       case TEST_MODEL_POSE_ESTIMATION:
-               // 563 : fixed height size of dumped image, 750 : fixed width size of dumped image.
-               ret = VerifyPoseEstimationResults(result, answers, 563, 750);
-               EXPECT_EQ(ret, 1);
-               break;
-       }
-
-       CleanupTensorBuffers(inputs, outputs);
-
-       engine->UnbindBackend();
-       models.clear();
-}
-
 INSTANTIATE_TEST_CASE_P(
                Opensource, InferenceEngineTfliteTest,
                testing::Values(
-                               // parameter order : backend name, target device, test iteration count.
-                               // mobilenet based image classification test
-                               // ARMNN.
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_IC_INFER("armnn", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-                               // quantized mobilenet based image classification test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_IC_Q_INFER("armnn", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-                               // object detection test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_OD_INFER("armnn", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-                               // face detection test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_FD_INFER("armnn", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-                               // pose estimation test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_PE_INFER("armnn", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-                               // Hand gesture model 1 from AIC
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_AICHG_1_INFER("armnn", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-                               // Hand gesture model 2 from AIC
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_AICHG_2_INFER("armnn", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-
-                               // mobilenet based image classification test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_IC_INFER("armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               // quantized mobilenet based image classification test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_IC_Q_INFER("armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               // object detection test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_OD_INFER("armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               // face detection test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_FD_INFER("armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               // pose estimation test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_PE_INFER("armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               // Hand gesture model 1 from AIC
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_AICHG_1_INFER("armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               // Hand gesture model 2 from AIC
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_AICHG_2_INFER("armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-
-                               /*********************************************************************************/
                                // parameter order : backend name, target device, test iteration count.
                                // mobilenet based image classification test
                                // TFLITE.
@@ -951,13 +312,6 @@ INSTANTIATE_TEST_CASE_P(
                                // pose estimation test
                                ParamType_Infer(
                                                PARAM_TYPE_TFLITE_PE_INFER("tflite", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-                               // Hand gesture model 1 from AIC
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_AICHG_1_INFER("tflite", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-                               // Hand gesture model 2 from AIC
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_AICHG_2_INFER("tflite", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-
                                // mobilenet based image classification test
                                ParamType_Infer(
                                                PARAM_TYPE_TFLITE_IC_INFER("tflite", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
@@ -972,325 +326,10 @@ INSTANTIATE_TEST_CASE_P(
                                                PARAM_TYPE_TFLITE_FD_INFER("tflite", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
                                // pose estimation test
                                ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_PE_INFER("tflite", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               // Hand gesture model 1 from AIC
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_AICHG_1_INFER("tflite", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               // Hand gesture model 2 from AIC
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_AICHG_2_INFER("tflite", INFERENCE_TARGET_GPU, INFERENCE_ITERATION))
+                                               PARAM_TYPE_TFLITE_PE_INFER("tflite", INFERENCE_TARGET_GPU, INFERENCE_ITERATION))
                                /* TODO */
                ));
 
-INSTANTIATE_TEST_CASE_P(
-               Inhouse, InferenceEngineTfliteTest,
-               testing::Values(
-                               /*********************************************************************************/
-                               // parameter order : backend name, target device, test iteration count.
-                               // mobilenet based image classification test
-                               // ONE via MLAPI.
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_IC_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-                               // quantized mobilenet based image classification test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_IC_Q_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-                               // object detection test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_OD_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-                               // face detection test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_FD_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-                               // pose estimation test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_PE_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-                               // Hand gesture model 1 from AIC
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_AICHG_1_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-                               // Hand gesture model 2 from AIC
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_AICHG_2_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-
-                               // mobilenet based image classification test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_IC_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               // quantized mobilenet based image classification test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_IC_Q_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               // object detection test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_OD_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               // face detection test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_FD_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               // pose estimation test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_PE_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               // Hand gesture model 1 from AIC
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_AICHG_1_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               // Hand gesture model 2 from AIC
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_AICHG_2_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-
-                               /*********************************************************************************/
-                               // parameter order : backend name, target device, test iteration count.
-                               // mobilenet based image classification test
-                               // TFLITE via MLAPI.
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_IC_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-                               // quantized mobilenet based image classification test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_IC_Q_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-                               // object detection test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_OD_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-                               // face detection test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_FD_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-                               // pose estimation test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_PE_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-                               // Hand gesture model 1 from AIC
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_AICHG_1_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-                               // Hand gesture model 2 from AIC
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_AICHG_2_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)),
-
-                               // mobilenet based image classification test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_IC_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               // quantized mobilenet based image classification test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_IC_Q_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               // object detection test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_OD_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               // face detection test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_FD_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               // pose estimation test
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_PE_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               // Hand gesture model 1 from AIC
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_AICHG_1_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               // Hand gesture model 2 from AIC
-                               ParamType_Infer(
-                                               PARAM_TYPE_TFLITE_AICHG_2_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION))
-                               /* TODO */
-                               ));
-
-INSTANTIATE_TEST_CASE_P(
-               Opensource, InferenceEngineTfliteCLTunerTest,
-               testing::Values(
-                               // parameter order : CLTuner active flag, CLTuner update flag, CLTuner tuning level or mode, backend name, target device, test iteration count.
-                               // mobilenet based image classification test
-                               // ARMNN.
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_IC_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_RAPID, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_IC_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_IC_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_NORMAL, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_IC_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_IC_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_IC_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-
-                               // quantized mobilenet based image classification test
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_IC_Q_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_RAPID, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_IC_Q_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_IC_Q_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_NORMAL, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_IC_Q_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_IC_Q_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_IC_Q_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-
-                               // object detection test
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_OD_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_RAPID, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_OD_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_OD_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_NORMAL, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_OD_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_OD_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_OD_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-
-                               // face detection test
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_FD_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_RAPID, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_FD_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_FD_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_NORMAL, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_FD_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_FD_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_FD_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-
-                               // pose estimation test
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_PE_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_RAPID, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_PE_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_PE_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_NORMAL, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_PE_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_PE_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_PE_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-
-                               // CLTuner file generation.
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_IC_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE, "armnn", INFERENCE_TARGET_GPU, 1)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_IC_Q_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE, "armnn", INFERENCE_TARGET_GPU, 1)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_OD_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE, "armnn", INFERENCE_TARGET_GPU, 1)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_FD_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE, "armnn", INFERENCE_TARGET_GPU, 1)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_PE_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE, "armnn", INFERENCE_TARGET_GPU, 1)),
-
-                               // Measure inference performance without CLTuner.
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_IC_CLTUNER(false, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_IC_Q_CLTUNER(false, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_OD_CLTUNER(false, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_FD_CLTUNER(false, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_PE_CLTUNER(false, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-
-                               // Measure inference performance with CLTuner.
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_IC_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_IC_Q_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_OD_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_FD_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)),
-                               ParamType_CLTuner(
-                                               PARAM_TYPE_TFLITE_PE_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION))
-                               /* TODO */
-                               ));
-
-INSTANTIATE_TEST_CASE_P(
-               Opensource, InferenceEngineCaffeTest,
-               testing::Values(
-                               // parameter order : backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers
-                               // OPENCV
-                               // squeezenet based image classification test
-                               ParamType_Infer(
-                                               "opencv", INFERENCE_TARGET_CPU,
-                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
-                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/image_classification_caffe.bin" },
-                                               227, 227, 3, { "data" }, { "prob" },
-                                               { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel",
-                                                 "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" },
-                                               { 281 }),
-                               ParamType_Infer(
-                                               "opencv", INFERENCE_TARGET_GPU,
-                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
-                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/image_classification_caffe.bin" },
-                                               227, 227, 3, { "data" }, { "prob" },
-                                               { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel",
-                                                 "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" },
-                                               { 281 }),
-
-                               // mobilenet-ssd based object detection test
-                               ParamType_Infer(
-                                               "opencv", INFERENCE_TARGET_CPU, TEST_MODEL_OBJECT_DETECTION,
-                                               INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/object_detection_caffe.bin" }, 300,
-                                               300, 3, { "data" }, { "detection_out" },
-                                               { "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.caffemodel",
-                                                 "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.prototxt" },
-                                               { 15, 19, 335, 557 }),
-                               ParamType_Infer(
-                                               "opencv", INFERENCE_TARGET_GPU, TEST_MODEL_OBJECT_DETECTION,
-                                               INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/object_detection_caffe.bin" }, 300,
-                                               300, 3, { "data" }, { "detection_out" },
-                                               { "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.caffemodel",
-                                                 "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.prototxt" },
-                                               { 15, 19, 335, 557 }),
-
-                               // mobilenet-ssd based object detection test
-                               ParamType_Infer(
-                                               "opencv", INFERENCE_TARGET_CPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION,
-                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/face_detection_caffe.bin" }, 300,
-                                               300, 3, { "data" }, { "detection_out" },
-                                               { "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.caffemodel",
-                                                 "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.prototxt" },
-                                               { 733, 233, 965, 539 }),
-                               ParamType_Infer(
-                                               "opencv", INFERENCE_TARGET_GPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION,
-                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/face_detection_caffe.bin" }, 300,
-                                               300, 3, { "data" }, { "detection_out" },
-                                               { "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.caffemodel",
-                                                 "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.prototxt" },
-                                               { 733, 233, 965, 539 }),
-
-                               // tweakcnn based facial landmark detection test
-                               ParamType_Infer(
-                                               "opencv", INFERENCE_TARGET_CPU,
-                                               TEST_MODEL_FACIAL_LANDMARK_DETECTION, INFERENCE_ITERATION,
-                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/faciallandmark_detection_caffe.bin" },
-                                               128, 128, 3, { "data" }, { "Sigmoid_fc2" },
-                                               { "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.caffemodel",
-                                                 "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.prototxt" },
-                                               { 53, 45, 85, 46, 66, 64, 54, 78, 82, 79 }),
-                               ParamType_Infer(
-                                               "opencv", INFERENCE_TARGET_GPU,
-                                               TEST_MODEL_FACIAL_LANDMARK_DETECTION, INFERENCE_ITERATION,
-                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/faciallandmark_detection_caffe.bin" },
-                                               128, 128, 3, { "data" }, { "Sigmoid_fc2" },
-                                               { "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.caffemodel",
-                                                 "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.prototxt" },
-                                               { 53, 45, 85, 46, 66, 64, 54, 78, 82, 79 })
-                               /* TODO */
-                               ));
-
-INSTANTIATE_TEST_CASE_P(
-               Opensource, InferenceEngineDldtTest,
-               testing::Values(
-                               // DLDT
-                               ParamType_Infer(
-                                               "dldt", INFERENCE_TARGET_CUSTOM,
-                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
-                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/dldt_banana_classification.bin" },
-                                               224, 224, 3, { "data" }, { "prob" },
-                                               { "/usr/share/capi-media-vision/models/IC/dldt/googlenet-v1.xml",
-                                                 "/usr/share/capi-media-vision/models/IC/dldt/googlenet-v1.bin" },
-                                               { 954 })));
-
 int main(int argc, char **argv)
 {
        InitGoogleTest(&argc, argv);