test: Clean up profiler 68/249168/1 accepted/tizen/unified/20201209.124900 submit/tizen/20201208.082555
authorInki Dae <inki.dae@samsung.com>
Tue, 8 Dec 2020 07:57:31 +0000 (16:57 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 8 Dec 2020 07:57:31 +0000 (16:57 +0900)
Change-Id: I1894b47931c46ee341be6415d3cab4020dda4575
Signed-off-by: Inki Dae <inki.dae@samsung.com>
start_profiler.sh
test/src/inference_engine_profiler.cpp
test/src/inference_engine_test_common.cpp

index 5bb9253..284e601 100644 (file)
@@ -1,25 +1,25 @@
 #!/bin/sh
 
-CNT=39
+CNT=27
 
-echo "Tflite model test case count = $CNT"
+echo "Opensource based inference engine(tflite model) test case count = $CNT"
 
 # TFLITE model
 LIST=$(seq 0 $CNT)
 for i in $LIST
 do
-  /usr/bin/inference_engine_profiler --gtest_filter=Prefix/InferenceEngineTfliteTest.Inference/$i
+  /usr/bin/inference_engine_profiler --gtest_filter=Opensource/InferenceEngineTfliteTest.Inference/$i
 done
 
-# Hand gesture model from AIC
-CNT=9
+CNT=27
 
-echo "Hand gesture model from AIC test case count = $CNT"
+echo "Inhouse based inference engine(tflite model) test case count = $CNT"
 
+# TFLITE model
 LIST=$(seq 0 $CNT)
 for i in $LIST
 do
-  /usr/bin/inference_engine_profiler --gtest_filter=Prefix/InferenceEngineHandGestureTest.Inference/$i
+  /usr/bin/inference_engine_profiler --gtest_filter=Inhouse/InferenceEngineTfliteTest.Inference/$i
 done
 
 # Caffe model
index 7b34a37..c5f6715 100644 (file)
@@ -40,8 +40,6 @@ class InferenceEngineCaffeTest : public testing::TestWithParam<ParamType_Infer>
 {};
 class InferenceEngineDldtTest : public testing::TestWithParam<ParamType_Infer>
 {};
-class InferenceEngineHandGestureTest : public testing::TestWithParam<ParamType_Infer>
-{};
 
 TEST_P(InferenceEngineTfliteTest, Inference)
 {
@@ -100,6 +98,12 @@ TEST_P(InferenceEngineTfliteTest, Inference)
        case TEST_POSE_ESTIMATION:
                test_name.append("Pose estimation");
                break;
+       case TEST_AIC_HAND_GESTURE_1:
+               test_name.append("AIC Hand Gesture detection 1");
+               break;
+       case TEST_AIC_HAND_GESTURE_2:
+               test_name.append("AIC Hand Gesture detection 2");
+               break;
        }
 
        std::cout << test_name << " inference test : backend = " << backend_name
@@ -135,6 +139,7 @@ TEST_P(InferenceEngineTfliteTest, Inference)
                backend_name = "one";
 
        ret = engine->DumpProfileToFile("profile_data_" + backend_name +
+                                                                       "_" + Target_Formats[target_devices] +
                                                                        "_tflite_model.txt");
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
@@ -246,6 +251,14 @@ TEST_P(InferenceEngineTfliteTest, Inference)
                ret = VerifyPoseEstimationResults(result, answers, 563, 750);
                EXPECT_EQ(ret, 1);
                break;
+       case TEST_AIC_HAND_GESTURE_1:
+               ret = VerifyAICHandGesture1Results(outputs);
+               EXPECT_EQ(ret, 1);
+               break;
+       case TEST_AIC_HAND_GESTURE_2:
+               ret = VerifyAICHandGesture2Results(outputs, answers);
+               EXPECT_EQ(ret, 1);
+               break;
        }
 
        CleanupTensorBuffers(inputs, outputs);
@@ -654,193 +667,8 @@ TEST_P(InferenceEngineDldtTest, Inference)
        models.clear();
 }
 
-TEST_P(InferenceEngineHandGestureTest, Inference)
-{
-       std::string backend_name;
-       int target_devices;
-       int test_type;
-       int iteration;
-       int tensor_type;
-       std::vector<std::string> image_paths;
-       size_t height;
-       size_t width;
-       size_t ch;
-       std::vector<std::string> input_layers;
-       std::vector<std::string> output_layers;
-       std::vector<std::string> model_paths;
-       std::vector<int> answers;
-
-       std::tie(backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers) = GetParam();
-
-       if (iteration < 1) {
-               iteration = 1;
-       }
-
-       MachineCapacity *Cap = GetMachineCapacity();
-       if (Cap == NULL) {
-               std::cout << "Failed to get machine capacity" << std::endl;
-               return;
-       }
-
-       // If current machine doesn't support inference engine then skip this test.
-       if (Cap->available == false) {
-               return;
-       }
-
-       // If current machine doesn't support OpenCL then skip the inference on GPU.
-       if (target_devices == INFERENCE_TARGET_GPU && Cap->has_gpu == false) {
-               return;
-       }
-
-       std::string test_name;
-       switch (test_type) {
-       case TEST_AIC_HAND_GESTURE_1:
-               test_name.append("AIC Hand Gesture detection 1");
-               break;
-       case TEST_AIC_HAND_GESTURE_2:
-               test_name.append("AIC Hand Gesture detection 2");
-               break;
-       }
-
-
-       std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
-
-       int backend_type = -1;
-
-       // If backend name is "one" then change it to "mlapi"
-       // and set backend_type to INFERENCE_BACKEND_ONE.
-       if (backend_name.compare("one") == 0) {
-               backend_name = "mlapi";
-               backend_type = INFERENCE_BACKEND_ONE;
-       }
-
-       inference_engine_config config = {
-               .backend_name = backend_name,
-               .backend_type = backend_type,
-               .target_devices = target_devices
-       };
-
-       auto engine = std::make_unique<InferenceEngineCommon>();
-       if (engine == nullptr) {
-               ASSERT_TRUE(engine);
-               return;
-       }
-
-       int ret = engine->EnableProfiler(true);
-       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-               ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-               return;
-       }
-
-       if (backend_type == INFERENCE_BACKEND_ONE)
-               backend_name = "one";
-
-       ret = engine->DumpProfileToFile("profile_data_" + backend_name +
-                                                                       "_hand_gesture_model.txt");
-       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-               ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-               return;
-       }
-
-       ret = engine->LoadConfigFile();
-       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       ret = engine->BindBackend(&config);
-       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-               ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-               return;
-       }
-
-       inference_engine_capacity capacity;
-       ret = engine->GetBackendCapacity(&capacity);
-       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       ret = engine->SetTargetDevices(target_devices);
-       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
-       std::vector <std::string> models;
-       int model_type = GetModelInfo(model_paths, models);
-       if (model_type == -1) {
-               ASSERT_NE(model_type, -1);
-               return;
-       }
-
-       inference_engine_layer_property input_property;
-       std::vector<std::string>::iterator iter;
-
-       for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
-               inference_engine_tensor_info tensor_info = {
-                       { 1, ch, height, width },
-                       (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
-                       (inference_tensor_data_type_e)tensor_type,
-                       (size_t)(1 * ch * height * width)
-               };
-
-               input_property.layer_names.push_back(*iter);
-               input_property.tensor_infos.push_back(tensor_info);
-    }
-
-       ret = engine->SetInputLayerProperty(input_property);
-       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-               ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-               return;
-       }
-
-       inference_engine_layer_property output_property;
-
-       for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
-               output_property.layer_names.push_back(*iter);
-       }
-
-       ret = engine->SetOutputLayerProperty(output_property);
-       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-               ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-               return;
-       }
-
-       ret = engine->Load(models, (inference_model_format_e)model_type);
-       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-               ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-               return;
-       }
-
-       std::vector<inference_engine_tensor_buffer> inputs, outputs;
-       ret = PrepareTensorBuffers(engine.get(), inputs, outputs);
-       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-               ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-               return;
-       }
-
-       // Copy input image tensor data from a given file to input tensor buffer.
-       for (int i = 0; i < (int)image_paths.size(); ++i) {
-               CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
-       }
-
-       for (int repeat = 0; repeat < iteration; ++repeat) {
-               ret = engine->Run(inputs, outputs);
-               EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-       }
-
-       switch (test_type) {
-       case TEST_AIC_HAND_GESTURE_1:
-               ret = VerifyAICHandGesture1Results(outputs);
-               EXPECT_EQ(ret, 1);
-               break;
-       case TEST_AIC_HAND_GESTURE_2:
-               ret = VerifyAICHandGesture2Results(outputs, answers);
-               EXPECT_EQ(ret, 1);
-               break;
-       }
-
-       CleanupTensorBuffers(inputs, outputs);
-
-       engine->UnbindBackend();
-       models.clear();
-}
-
-
 INSTANTIATE_TEST_CASE_P(
-               Prefix, InferenceEngineTfliteTest,
+               Opensource, InferenceEngineTfliteTest,
                testing::Values(
                                // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
                                // mobilenet based image classification test
@@ -853,14 +681,6 @@ INSTANTIATE_TEST_CASE_P(
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
                                                { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
                                                { 3 }),
-                               ParamType_Infer(
-                                               "armnn", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
-                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/image_classification.bin" }, 224,
-                                               224, 3, { "input_2" }, { "dense_3/Softmax" },
-                                               { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
-                                               { 3 }),
                                // quantized mobilenet based image classification test
                                ParamType_Infer(
                                                "armnn", INFERENCE_TARGET_CPU,
@@ -871,15 +691,6 @@ INSTANTIATE_TEST_CASE_P(
                                                { "MobilenetV1/Predictions/Reshape_1" },
                                                { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
                                                { 955 }),
-                               ParamType_Infer(
-                                               "armnn", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
-                                               INFERENCE_TENSOR_DATA_TYPE_UINT8,
-                                               { "/opt/usr/images/image_classification_q.bin" }, 224,
-                                               224, 3, { "input" },
-                                               { "MobilenetV1/Predictions/Reshape_1" },
-                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
-                                               { 955 }),
                                // object detection test
                                ParamType_Infer(
                                                "armnn", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
@@ -892,17 +703,6 @@ INSTANTIATE_TEST_CASE_P(
                                                  "TFLite_Detection_PostProcess:3" },
                                                { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
                                                { 451, 474, 714, 969 }),
-                               ParamType_Infer(
-                                               "armnn", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
-                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
-                                               { "normalized_input_image_tensor" },
-                                               { "TFLite_Detection_PostProcess",
-                                                 "TFLite_Detection_PostProcess:1",
-                                                 "TFLite_Detection_PostProcess:2",
-                                                 "TFLite_Detection_PostProcess:3" },
-                                               { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
-                                               { 451, 474, 714, 969 }),
                                // face detection test
                                ParamType_Infer(
                                                "armnn", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
@@ -915,17 +715,6 @@ INSTANTIATE_TEST_CASE_P(
                                                  "TFLite_Detection_PostProcess:3" },
                                                { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
                                                { 727, 225, 960, 555 }),
-                               ParamType_Infer(
-                                               "armnn", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
-                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
-                                               { "normalized_input_image_tensor" },
-                                               { "TFLite_Detection_PostProcess",
-                                                 "TFLite_Detection_PostProcess:1",
-                                                 "TFLite_Detection_PostProcess:2",
-                                                 "TFLite_Detection_PostProcess:3" },
-                                               { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
-                                               { 727, 225, 960, 555 }),
                                // pose estimation test
                                ParamType_Infer(
                                                "armnn", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, 10,
@@ -937,47 +726,31 @@ INSTANTIATE_TEST_CASE_P(
                                                { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
-                               ParamType_Infer(
-                                               "armnn", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, 10,
-                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
-                                               { "image" },
-                                               { "Convolutional_Pose_Machine/stage_5_out" },
-                                               { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
-                                               { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
-                                                 351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
-                                                 123, 99,  287, 381, 451, 287, 381, 475 }),
-                               // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+                               // Hand gesture model 1 from AIC
+                               ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
+                                               { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
+                               // Hand gesture model 2 from AIC
+                               ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
+                                               { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
+                                               { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
+                                                 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
+                                                 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
+                                                 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
+                                                 38, 38, 12 }),
                                // mobilenet based image classification test
-                               // ONE via MLAPI.
                                ParamType_Infer(
-                                               "one", INFERENCE_TARGET_CPU,
+                                               "armnn", INFERENCE_TARGET_GPU,
                                                TEST_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
                                                { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
                                                { 3 }),
+                               // quantized mobilenet based image classification test
                                ParamType_Infer(
-                                               "one", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
-                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/image_classification.bin" }, 224,
-                                               224, 3, { "input_2" }, { "dense_3/Softmax" },
-                                               { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
-                                               { 3 }),
-                               // quantized mobilenet based image classification test
-                               ParamType_Infer(
-                                               "one", INFERENCE_TARGET_CPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
-                                               INFERENCE_TENSOR_DATA_TYPE_UINT8,
-                                               { "/opt/usr/images/image_classification_q.bin" }, 224,
-                                               224, 3, { "input" },
-                                               { "MobilenetV1/Predictions/Reshape_1" },
-                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
-                                               { 955 }),
-                               ParamType_Infer(
-                                               "one", INFERENCE_TARGET_GPU,
+                                               "armnn", INFERENCE_TARGET_GPU,
                                                TEST_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
@@ -987,7 +760,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 955 }),
                                // object detection test
                                ParamType_Infer(
-                                               "one", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+                                               "armnn", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
                                                10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -997,31 +770,80 @@ INSTANTIATE_TEST_CASE_P(
                                                  "TFLite_Detection_PostProcess:3" },
                                                { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
                                                { 451, 474, 714, 969 }),
+                               // face detection test
                                ParamType_Infer(
-                                               "one", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
-                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+                                               "armnn", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
                                                { "TFLite_Detection_PostProcess",
                                                  "TFLite_Detection_PostProcess:1",
                                                  "TFLite_Detection_PostProcess:2",
                                                  "TFLite_Detection_PostProcess:3" },
-                                               { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
-                                               { 451, 474, 714, 969 }),
-                               // face detection test
+                                               { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+                                               { 727, 225, 960, 555 }),
+                               // pose estimation test
                                ParamType_Infer(
-                                               "one", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+                                               "armnn", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+                                               { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
+                                               { "image" },
+                                               { "Convolutional_Pose_Machine/stage_5_out" },
+                                               { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+                                               { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
+                                                 351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
+                                                 123, 99,  287, 381, 451, 287, 381, 475 }),
+                               // Hand gesture model 1 from AIC
+                               ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
+                                               { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
+                               // Hand gesture model 2 from AIC
+                               ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
+                                               { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
+                                               { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
+                                                 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
+                                                 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
+                                                 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
+                                                 38, 38, 12 }),
+
+                               /*********************************************************************************/
+                               // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+                               // mobilenet based image classification test
+                               // TFLITE.
+                               ParamType_Infer(
+                                               "tflite", INFERENCE_TARGET_CPU,
+                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/image_classification.bin" }, 224,
+                                               224, 3, { "input_2" }, { "dense_3/Softmax" },
+                                               { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+                                               { 3 }),
+                               // quantized mobilenet based image classification test
+                               ParamType_Infer(
+                                               "tflite", INFERENCE_TARGET_CPU,
+                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_UINT8,
+                                               { "/opt/usr/images/image_classification_q.bin" }, 224,
+                                               224, 3, { "input" },
+                                               { "MobilenetV1/Predictions/Reshape_1" },
+                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
+                                               { 955 }),
+                               // object detection test
+                               ParamType_Infer(
+                                               "tflite", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
                                                { "TFLite_Detection_PostProcess",
                                                  "TFLite_Detection_PostProcess:1",
                                                  "TFLite_Detection_PostProcess:2",
                                                  "TFLite_Detection_PostProcess:3" },
-                                               { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
-                                               { 727, 225, 960, 555 }),
+                                               { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+                                               { 451, 474, 714, 969 }),
+                               // face detection test
                                ParamType_Infer(
-                                               "one", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+                                               "tflite", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -1033,17 +855,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 727, 225, 960, 555 }),
                                // pose estimation test
                                ParamType_Infer(
-                                               "one", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
-                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
-                                               { "image" },
-                                               { "Convolutional_Pose_Machine/stage_5_out" },
-                                               { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
-                                               { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
-                                                 351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
-                                                 123, 99,  287, 381, 451, 287, 381, 475 }),
-                               ParamType_Infer(
-                                               "one", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
+                                               "tflite", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
                                                10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
                                                { "image" },
@@ -1052,17 +864,21 @@ INSTANTIATE_TEST_CASE_P(
                                                { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
-                               // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+                               // Hand gesture model 1 from AIC
+                               ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
+                                               { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
+                               // Hand gesture model 2 from AIC
+                               ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
+                                               { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
+                                               { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
+                                                 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
+                                                 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
+                                                 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
+                                                 38, 38, 12 }),
+
                                // mobilenet based image classification test
-                               // TFLITE.
-                               ParamType_Infer(
-                                               "tflite", INFERENCE_TARGET_CPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
-                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/image_classification.bin" }, 224,
-                                               224, 3, { "input_2" }, { "dense_3/Softmax" },
-                                               { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
-                                               { 3 }),
                                ParamType_Infer(
                                                "tflite", INFERENCE_TARGET_GPU,
                                                TEST_IMAGE_CLASSIFICATION, 10,
@@ -1073,15 +889,6 @@ INSTANTIATE_TEST_CASE_P(
                                                { 3 }),
                                // quantized mobilenet based image classification test
                                ParamType_Infer(
-                                               "tflite", INFERENCE_TARGET_CPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
-                                               INFERENCE_TENSOR_DATA_TYPE_UINT8,
-                                               { "/opt/usr/images/image_classification_q.bin" }, 224,
-                                               224, 3, { "input" },
-                                               { "MobilenetV1/Predictions/Reshape_1" },
-                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
-                                               { 955 }),
-                               ParamType_Infer(
                                                "tflite", INFERENCE_TARGET_GPU,
                                                TEST_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
@@ -1092,7 +899,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 955 }),
                                // object detection test
                                ParamType_Infer(
-                                               "tflite", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+                                               "tflite", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
                                                10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -1102,31 +909,85 @@ INSTANTIATE_TEST_CASE_P(
                                                  "TFLite_Detection_PostProcess:3" },
                                                { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
                                                { 451, 474, 714, 969 }),
+                               // face detection test
                                ParamType_Infer(
-                                               "tflite", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
-                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+                                               "tflite", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
                                                { "TFLite_Detection_PostProcess",
                                                  "TFLite_Detection_PostProcess:1",
                                                  "TFLite_Detection_PostProcess:2",
                                                  "TFLite_Detection_PostProcess:3" },
-                                               { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
-                                               { 451, 474, 714, 969 }),
-                               // face detection test
+                                               { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+                                               { 727, 225, 960, 555 }),
+                               // pose estimation test
                                ParamType_Infer(
-                                               "tflite", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+                                               "tflite", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
+                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
+                                               { "image" },
+                                               { "Convolutional_Pose_Machine/stage_5_out" },
+                                               { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+                                               { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
+                                                 351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
+                                                 123, 99,  287, 381, 451, 287, 381, 475 }),
+                               // Hand gesture model 1 from AIC
+                               ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
+                                               { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
+                               // Hand gesture model 2 from AIC
+                               ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
+                                               { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
+                                               { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
+                                                 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
+                                                 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
+                                                 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
+                                                 38, 38, 12 })
+                               /* TODO */
+                               ));
+
+INSTANTIATE_TEST_CASE_P(
+               Inhouse, InferenceEngineTfliteTest,
+               testing::Values(
+                               /*********************************************************************************/
+                               // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+                               // mobilenet based image classification test
+                               // ONE via MLAPI.
+                               ParamType_Infer(
+                                               "one", INFERENCE_TARGET_CPU,
+                                               TEST_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+                                               { "/opt/usr/images/image_classification.bin" }, 224,
+                                               224, 3, { "input_2" }, { "dense_3/Softmax" },
+                                               { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+                                               { 3 }),
+                               // quantized mobilenet based image classification test
+                               ParamType_Infer(
+                                               "one", INFERENCE_TARGET_CPU,
+                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_UINT8,
+                                               { "/opt/usr/images/image_classification_q.bin" }, 224,
+                                               224, 3, { "input" },
+                                               { "MobilenetV1/Predictions/Reshape_1" },
+                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
+                                               { 955 }),
+                               // object detection test
+                               ParamType_Infer(
+                                               "one", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
                                                { "TFLite_Detection_PostProcess",
                                                  "TFLite_Detection_PostProcess:1",
                                                  "TFLite_Detection_PostProcess:2",
                                                  "TFLite_Detection_PostProcess:3" },
-                                               { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
-                                               { 727, 225, 960, 555 }),
+                                               { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+                                               { 451, 474, 714, 969 }),
+                               // face detection test
                                ParamType_Infer(
-                                               "tflite", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+                                               "one", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -1138,7 +999,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 727, 225, 960, 555 }),
                                // pose estimation test
                                ParamType_Infer(
-                                               "tflite", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
+                                               "one", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
                                                10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
                                                { "image" },
@@ -1147,8 +1008,66 @@ INSTANTIATE_TEST_CASE_P(
                                                { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
+                               // Hand gesture model 1 from AIC
+                               ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
+                                               { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
+                               // Hand gesture model 2 from AIC
+                               ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
+                                               { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
+                                               { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
+                                                 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
+                                                 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
+                                                 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
+                                                 38, 38, 12 }),
+
+                               // mobilenet based image classification test
                                ParamType_Infer(
-                                               "tflite", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
+                                               "one", INFERENCE_TARGET_GPU,
+                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/image_classification.bin" }, 224,
+                                               224, 3, { "input_2" }, { "dense_3/Softmax" },
+                                               { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+                                               { 3 }),
+                               // quantized mobilenet based image classification test
+                               ParamType_Infer(
+                                               "one", INFERENCE_TARGET_GPU,
+                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_UINT8,
+                                               { "/opt/usr/images/image_classification_q.bin" }, 224,
+                                               224, 3, { "input" },
+                                               { "MobilenetV1/Predictions/Reshape_1" },
+                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
+                                               { 955 }),
+                               // object detection test
+                               ParamType_Infer(
+                                               "one", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+                                               { "normalized_input_image_tensor" },
+                                               { "TFLite_Detection_PostProcess",
+                                                 "TFLite_Detection_PostProcess:1",
+                                                 "TFLite_Detection_PostProcess:2",
+                                                 "TFLite_Detection_PostProcess:3" },
+                                               { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+                                               { 451, 474, 714, 969 }),
+                               // face detection test
+                               ParamType_Infer(
+                                               "one", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+                                               { "normalized_input_image_tensor" },
+                                               { "TFLite_Detection_PostProcess",
+                                                 "TFLite_Detection_PostProcess:1",
+                                                 "TFLite_Detection_PostProcess:2",
+                                                 "TFLite_Detection_PostProcess:3" },
+                                               { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+                                               { 727, 225, 960, 555 }),
+                               // pose estimation test
+                               ParamType_Infer(
+                                               "one", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
                                                10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
                                                { "image" },
@@ -1157,6 +1076,23 @@ INSTANTIATE_TEST_CASE_P(
                                                { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
+                               // Hand gesture model 1 from AIC
+                               ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
+                                               { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
+                               // Hand gesture model 2 from AIC
+                               ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
+                                               { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
+                                               { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
+                                                 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
+                                                 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
+                                                 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
+                                                 38, 38, 12 }),
+
+                               /*********************************************************************************/
+                               // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+                               // mobilenet based image classification test
                                // TFLITE via MLAPI.
                                ParamType_Infer(
                                                "mlapi", INFERENCE_TARGET_CPU,
@@ -1166,14 +1102,6 @@ INSTANTIATE_TEST_CASE_P(
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
                                                { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
                                                { 3 }),
-                               ParamType_Infer(
-                                               "mlapi", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
-                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/image_classification.bin" }, 224,
-                                               224, 3, { "input_2" }, { "dense_3/Softmax" },
-                                               { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
-                                               { 3 }),
                                // quantized mobilenet based image classification test
                                ParamType_Infer(
                                                "mlapi", INFERENCE_TARGET_CPU,
@@ -1184,15 +1112,6 @@ INSTANTIATE_TEST_CASE_P(
                                                { "MobilenetV1/Predictions/Reshape_1" },
                                                { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
                                                { 955 }),
-                               ParamType_Infer(
-                                               "mlapi", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
-                                               INFERENCE_TENSOR_DATA_TYPE_UINT8,
-                                               { "/opt/usr/images/image_classification_q.bin" }, 224,
-                                               224, 3, { "input" },
-                                               { "MobilenetV1/Predictions/Reshape_1" },
-                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
-                                               { 955 }),
                                // object detection test
                                ParamType_Infer(
                                                "mlapi", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
@@ -1205,29 +1124,78 @@ INSTANTIATE_TEST_CASE_P(
                                                  "TFLite_Detection_PostProcess:3" },
                                                { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
                                                { 451, 474, 714, 969 }),
+                               // face detection test
                                ParamType_Infer(
-                                               "mlapi", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
-                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+                                               "mlapi", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
                                                { "TFLite_Detection_PostProcess",
                                                  "TFLite_Detection_PostProcess:1",
                                                  "TFLite_Detection_PostProcess:2",
                                                  "TFLite_Detection_PostProcess:3" },
-                                               { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
-                                               { 451, 474, 714, 969 }),
-                               // face detection test
+                                               { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+                                               { 727, 225, 960, 555 }),
+                               // pose estimation test
                                ParamType_Infer(
-                                               "mlapi", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+                                               "mlapi", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
+                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
+                                               { "image" },
+                                               { "Convolutional_Pose_Machine/stage_5_out" },
+                                               { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+                                               { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
+                                                 351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
+                                                 123, 99,  287, 381, 451, 287, 381, 475 }),
+                               // Hand gesture model 1 from AIC
+                               ParamType_Infer("mlapi", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
+                                               { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
+                               // Hand gesture model 2 from AIC
+                               ParamType_Infer("mlapi", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
+                                               { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
+                                               { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
+                                                 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
+                                                 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
+                                                 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
+                                                 38, 38, 12 }),
+
+                               // mobilenet based image classification test
+                               ParamType_Infer(
+                                               "mlapi", INFERENCE_TARGET_GPU,
+                                               TEST_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+                                               { "/opt/usr/images/image_classification.bin" }, 224,
+                                               224, 3, { "input_2" }, { "dense_3/Softmax" },
+                                               { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+                                               { 3 }),
+
+                               // quantized mobilenet based image classification test
+                               ParamType_Infer(
+                                               "mlapi", INFERENCE_TARGET_GPU,
+                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_UINT8,
+                                               { "/opt/usr/images/image_classification_q.bin" }, 224,
+                                               224, 3, { "input" },
+                                               { "MobilenetV1/Predictions/Reshape_1" },
+                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
+                                               { 955 }),
+
+                               // object detection test
+                               ParamType_Infer(
+                                               "mlapi", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
                                                { "TFLite_Detection_PostProcess",
                                                  "TFLite_Detection_PostProcess:1",
                                                  "TFLite_Detection_PostProcess:2",
                                                  "TFLite_Detection_PostProcess:3" },
-                                               { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
-                                               { 727, 225, 960, 555 }),
+                                               { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+                                               { 451, 474, 714, 969 }),
+
+                               // face detection test
                                ParamType_Infer(
                                                "mlapi", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
@@ -1239,18 +1207,9 @@ INSTANTIATE_TEST_CASE_P(
                                                  "TFLite_Detection_PostProcess:3" },
                                                { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
                                                { 727, 225, 960, 555 }),
+
                                // pose estimation test
                                ParamType_Infer(
-                                               "mlapi", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
-                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
-                                               { "image" },
-                                               { "Convolutional_Pose_Machine/stage_5_out" },
-                                               { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
-                                               { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
-                                                 351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
-                                                 123, 99,  287, 381, 451, 287, 381, 475 }),
-                               ParamType_Infer(
                                                "mlapi", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
                                                10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
@@ -1259,12 +1218,25 @@ INSTANTIATE_TEST_CASE_P(
                                                { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
                                                { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
-                                                 123, 99,  287, 381, 451, 287, 381, 475 })
+                                                 123, 99,  287, 381, 451, 287, 381, 475 }),
+                               // Hand gesture model 1 from AIC
+                               ParamType_Infer("mlapi", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
+                                               { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
+                               // Hand gesture model 2 from AIC
+                               ParamType_Infer("mlapi", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
+                                               { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
+                                               { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
+                                                 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
+                                                 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
+                                                 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
+                                                 38, 38, 12 })
                                /* TODO */
                                ));
 
 INSTANTIATE_TEST_CASE_P(
-               Prefix, InferenceEngineCaffeTest,
+               Opensource, InferenceEngineCaffeTest,
                testing::Values(
                                // parameter order : backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers
                                // OPENCV
@@ -1347,7 +1319,7 @@ INSTANTIATE_TEST_CASE_P(
                                ));
 
 INSTANTIATE_TEST_CASE_P(
-               Prefix, InferenceEngineDldtTest,
+               Opensource, InferenceEngineDldtTest,
                testing::Values(
                                // DLDT
                                ParamType_Infer(
@@ -1359,67 +1331,3 @@ INSTANTIATE_TEST_CASE_P(
                                                { "/usr/share/capi-media-vision/models/IC/dldt/googlenet-v1.xml",
                                                  "/usr/share/capi-media-vision/models/IC/dldt/googlenet-v1.bin" },
                                                { 954 })));
-
-INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineHandGestureTest,
-               testing::Values(
-                               // TFLITE
-                               ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
-                                               { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
-                               ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
-                                               { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
-                                               { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
-                                                 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
-                                                 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
-                                                 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
-                                                 38, 38, 12 }),
-                               // TFLITE via MLAPI
-                               ParamType_Infer("mlapi", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
-                                               { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
-                               ParamType_Infer("mlapi", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
-                                               { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
-                                               { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
-                                                 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
-                                                 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
-                                                 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
-                                                 38, 38, 12 }),
-                               // ARMNN
-                               ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
-                                               { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
-                               ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
-                                               { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
-                                               { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
-                                                 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
-                                                 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
-                                                 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
-                                                 38, 38, 12 }),
-                               ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
-                                               { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
-                               ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
-                                               { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
-                                               { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
-                                                 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
-                                                 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
-                                                 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
-                                                 38, 38, 12 }),
-                               // ONE via MLAPI
-                               ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
-                                               { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
-                               ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
-                                               { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
-                                               { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
-                                                 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
-                                                 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
-                                                 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
-                                                 38, 38, 12 })
-                               /* TODO */
-                               ));
index 45770b9..e25f2a1 100644 (file)
@@ -47,7 +47,7 @@ static MachineCapacity MachineCap[] = {
        { true, true, false },      // TM4
        { true, false, false },     // RPI4
        { true, true, false },      // ODROID
-       { true, false, true },      // VIM3
+       { true, true, true },      // VIM3
        { true, true, false },      // NIKE-M
        { false, false, false }     // MAX
 };