test: Rename test model enumeration values
authorInki Dae <inki.dae@samsung.com>
Thu, 4 Feb 2021 08:35:29 +0000 (17:35 +0900)
committerInki Dae <inki.dae@samsung.com>
Thu, 25 Mar 2021 02:16:22 +0000 (11:16 +0900)
Just let's keep name consistency.

Change-Id: Id8e593a37160a49929184afc2d8f8afcf7948dc1
Signed-off-by: Inki Dae <inki.dae@samsung.com>
test/src/inference_engine_profiler.cpp
test/src/inference_engine_tc.cpp
test/src/inference_engine_test_common.h

index 0a4cb8f..16fccf3 100644 (file)
@@ -192,33 +192,33 @@ TEST_P(InferenceEngineTfliteTest, Inference)
        FillOutputResult(engine.get(), outputs, result);
 
        switch (test_type) {
-       case TEST_IMAGE_CLASSIFICATION:
+       case TEST_MODEL_IMAGE_CLASSIFICATION:
                ret = VerifyImageClassificationResults(result, answers[0]);
                EXPECT_EQ(ret, 1);
                break;
-       case TEST_OBJECT_DETECTION:
+       case TEST_MODEL_OBJECT_DETECTION:
                // 1072 : fixed height size of dumped image, 1608 : fixed width size of dumped image.
                ret = VerifyObjectDetectionResults(result, answers, 1072, 1608);
                EXPECT_EQ(ret, 1);
                break;
-       case TEST_FACE_DETECTION:
+       case TEST_MODEL_FACE_DETECTION:
                // 1152 : fixed height size of dumped image, 1536 : fixed width size of dumped image.
                ret = VerifyObjectDetectionResults(result, answers, 1152, 1536);
                EXPECT_EQ(ret, 1);
                break;
-       case TEST_FACIAL_LANDMARK_DETECTION:
+       case TEST_MODEL_FACIAL_LANDMARK_DETECTION:
                // TODO.
                break;
-       case TEST_POSE_ESTIMATION:
+       case TEST_MODEL_POSE_ESTIMATION:
                // 563 : fixed height size of dumped image, 750 : fixed width size of dumped image.
                ret = VerifyPoseEstimationResults(result, answers, 563, 750);
                EXPECT_EQ(ret, 1);
                break;
-       case TEST_AIC_HAND_GESTURE_1:
+       case TEST_MODEL_AIC_HAND_GESTURE_1:
                ret = VerifyAICHandGesture1Results(outputs);
                EXPECT_EQ(ret, 1);
                break;
-       case TEST_AIC_HAND_GESTURE_2:
+       case TEST_MODEL_AIC_HAND_GESTURE_2:
                ret = VerifyAICHandGesture2Results(outputs, answers);
                EXPECT_EQ(ret, 1);
                break;
@@ -379,33 +379,33 @@ TEST_P(InferenceEngineTfliteCLTunerTest, Inference)
        FillOutputResult(engine.get(), outputs, result);
 
        switch (test_type) {
-       case TEST_IMAGE_CLASSIFICATION:
+       case TEST_MODEL_IMAGE_CLASSIFICATION:
                ret = VerifyImageClassificationResults(result, answers[0]);
                EXPECT_EQ(ret, 1);
                break;
-       case TEST_OBJECT_DETECTION:
+       case TEST_MODEL_OBJECT_DETECTION:
                // 1072 : fixed height size of dumped image, 1608 : fixed width size of dumped image.
                ret = VerifyObjectDetectionResults(result, answers, 1072, 1608);
                EXPECT_EQ(ret, 1);
                break;
-       case TEST_FACE_DETECTION:
+       case TEST_MODEL_FACE_DETECTION:
                // 1152 : fixed height size of dumped image, 1536 : fixed width size of dumped image.
                ret = VerifyObjectDetectionResults(result, answers, 1152, 1536);
                EXPECT_EQ(ret, 1);
                break;
-       case TEST_FACIAL_LANDMARK_DETECTION:
+       case TEST_MODEL_FACIAL_LANDMARK_DETECTION:
                // TODO.
                break;
-       case TEST_POSE_ESTIMATION:
+       case TEST_MODEL_POSE_ESTIMATION:
                // 563 : fixed height size of dumped image, 750 : fixed width size of dumped image.
                ret = VerifyPoseEstimationResults(result, answers, 563, 750);
                EXPECT_EQ(ret, 1);
                break;
-       case TEST_AIC_HAND_GESTURE_1:
+       case TEST_MODEL_AIC_HAND_GESTURE_1:
                ret = VerifyAICHandGesture1Results(outputs);
                EXPECT_EQ(ret, 1);
                break;
-       case TEST_AIC_HAND_GESTURE_2:
+       case TEST_MODEL_AIC_HAND_GESTURE_2:
                ret = VerifyAICHandGesture2Results(outputs, answers);
                EXPECT_EQ(ret, 1);
                break;
@@ -543,26 +543,26 @@ TEST_P(InferenceEngineCaffeTest, Inference)
        FillOutputResult(engine.get(), outputs, result);
 
        switch (test_type) {
-       case TEST_IMAGE_CLASSIFICATION:
+       case TEST_MODEL_IMAGE_CLASSIFICATION:
                ret = VerifyImageClassificationResults(result, answers[0]);
                EXPECT_EQ(ret, 1);
                break;
-       case TEST_OBJECT_DETECTION:
+       case TEST_MODEL_OBJECT_DETECTION:
                // 1024 : fixed height size of dumped image, 636 : fixed width size of dumped image.
                ret = VerifyObjectDetectionResults(result, answers, 636, 1024);
                EXPECT_EQ(ret, 1);
                break;
-       case TEST_FACE_DETECTION:
+       case TEST_MODEL_FACE_DETECTION:
                // 1152 : fixed height size of dumped image, 1536 : fixed width size of dumped image.
                ret = VerifyObjectDetectionResults(result, answers, 1152, 1536);
                EXPECT_EQ(ret, 1);
                break;
-       case TEST_FACIAL_LANDMARK_DETECTION:
+       case TEST_MODEL_FACIAL_LANDMARK_DETECTION:
                // 128 : fixed height size of dumped image, 128 : fixed width size of dumped image.
                ret = VerifyFacialLandmarkDetectionResults(result, answers, 128, 128);
                EXPECT_EQ(ret, 1);
                break;
-       case TEST_POSE_ESTIMATION:
+       case TEST_MODEL_POSE_ESTIMATION:
                // 563 : fixed height size of dumped image, 750 : fixed width size of dumped image.
                ret = VerifyPoseEstimationResults(result, answers, 563, 750);
                EXPECT_EQ(ret, 1);
@@ -700,26 +700,26 @@ TEST_P(InferenceEngineDldtTest, Inference)
        FillOutputResult(engine.get(), outputs, result);
 
        switch (test_type) {
-       case TEST_IMAGE_CLASSIFICATION:
+       case TEST_MODEL_IMAGE_CLASSIFICATION:
                ret = VerifyImageClassificationResults(result, answers[0]);
                EXPECT_EQ(ret, 1);
                break;
-       case TEST_OBJECT_DETECTION:
+       case TEST_MODEL_OBJECT_DETECTION:
                // 1024 : fixed height size of dumped image, 636 : fixed width size of dumped image.
                ret = VerifyObjectDetectionResults(result, answers, 636, 1024);
                EXPECT_EQ(ret, 1);
                break;
-       case TEST_FACE_DETECTION:
+       case TEST_MODEL_FACE_DETECTION:
                // 1152 : fixed height size of dumped image, 1536 : fixed width size of dumped image.
                ret = VerifyObjectDetectionResults(result, answers, 1152, 1536);
                EXPECT_EQ(ret, 1);
                break;
-       case TEST_FACIAL_LANDMARK_DETECTION:
+       case TEST_MODEL_FACIAL_LANDMARK_DETECTION:
                // 128 : fixed height size of dumped image, 128 : fixed width size of dumped image.
                ret = VerifyFacialLandmarkDetectionResults(result, answers, 128, 128);
                EXPECT_EQ(ret, 1);
                break;
-       case TEST_POSE_ESTIMATION:
+       case TEST_MODEL_POSE_ESTIMATION:
                // 563 : fixed height size of dumped image, 750 : fixed width size of dumped image.
                ret = VerifyPoseEstimationResults(result, answers, 563, 750);
                EXPECT_EQ(ret, 1);
@@ -740,7 +740,7 @@ INSTANTIATE_TEST_CASE_P(
                                // ARMNN.
                                ParamType_Infer(
                                                "armnn", INFERENCE_TARGET_CPU,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -749,7 +749,7 @@ INSTANTIATE_TEST_CASE_P(
                                // quantized mobilenet based image classification test
                                ParamType_Infer(
                                                "armnn", INFERENCE_TARGET_CPU,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
                                                224, 3, { "input" },
@@ -758,7 +758,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 955 }),
                                // object detection test
                                ParamType_Infer(
-                                               "armnn", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+                                               "armnn", INFERENCE_TARGET_CPU, TEST_MODEL_OBJECT_DETECTION,
                                                INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -770,7 +770,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 451, 474, 714, 969 }),
                                // face detection test
                                ParamType_Infer(
-                                               "armnn", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+                                               "armnn", INFERENCE_TARGET_CPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -782,7 +782,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 727, 225, 960, 555 }),
                                // pose estimation test
                                ParamType_Infer(
-                                               "armnn", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, INFERENCE_ITERATION,
+                                               "armnn", INFERENCE_TARGET_CPU, TEST_MODEL_POSE_ESTIMATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
                                                { "image" },
@@ -792,11 +792,11 @@ INSTANTIATE_TEST_CASE_P(
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
                                // Hand gesture model 1 from AIC
-                               ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
                                                { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
                                // Hand gesture model 2 from AIC
-                               ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
                                                { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
                                                { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
@@ -807,7 +807,7 @@ INSTANTIATE_TEST_CASE_P(
                                // mobilenet based image classification test
                                ParamType_Infer(
                                                "armnn", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -816,7 +816,7 @@ INSTANTIATE_TEST_CASE_P(
                                // quantized mobilenet based image classification test
                                ParamType_Infer(
                                                "armnn", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
                                                224, 3, { "input" },
@@ -825,7 +825,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 955 }),
                                // object detection test
                                ParamType_Infer(
-                                               "armnn", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+                                               "armnn", INFERENCE_TARGET_GPU, TEST_MODEL_OBJECT_DETECTION,
                                                INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -837,7 +837,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 451, 474, 714, 969 }),
                                // face detection test
                                ParamType_Infer(
-                                               "armnn", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+                                               "armnn", INFERENCE_TARGET_GPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -849,7 +849,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 727, 225, 960, 555 }),
                                // pose estimation test
                                ParamType_Infer(
-                                               "armnn", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, INFERENCE_ITERATION,
+                                               "armnn", INFERENCE_TARGET_GPU, TEST_MODEL_POSE_ESTIMATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
                                                { "image" },
@@ -859,11 +859,11 @@ INSTANTIATE_TEST_CASE_P(
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
                                // Hand gesture model 1 from AIC
-                               ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
                                                { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
                                // Hand gesture model 2 from AIC
-                               ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
                                                { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
                                                { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
@@ -878,7 +878,7 @@ INSTANTIATE_TEST_CASE_P(
                                // TFLITE.
                                ParamType_Infer(
                                                "tflite", INFERENCE_TARGET_CPU,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -887,7 +887,7 @@ INSTANTIATE_TEST_CASE_P(
                                // quantized mobilenet based image classification test
                                ParamType_Infer(
                                                "tflite", INFERENCE_TARGET_CPU,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
                                                224, 3, { "input" },
@@ -896,7 +896,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 955 }),
                                // object detection test
                                ParamType_Infer(
-                                               "tflite", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+                                               "tflite", INFERENCE_TARGET_CPU, TEST_MODEL_OBJECT_DETECTION,
                                                INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -908,7 +908,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 451, 474, 714, 969 }),
                                // face detection test
                                ParamType_Infer(
-                                               "tflite", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+                                               "tflite", INFERENCE_TARGET_CPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -920,7 +920,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 727, 225, 960, 555 }),
                                // pose estimation test
                                ParamType_Infer(
-                                               "tflite", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
+                                               "tflite", INFERENCE_TARGET_CPU, TEST_MODEL_POSE_ESTIMATION,
                                                INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
                                                { "image" },
@@ -930,11 +930,11 @@ INSTANTIATE_TEST_CASE_P(
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
                                // Hand gesture model 1 from AIC
-                               ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
                                                { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
                                // Hand gesture model 2 from AIC
-                               ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
                                                { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
                                                { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
@@ -946,7 +946,7 @@ INSTANTIATE_TEST_CASE_P(
                                // mobilenet based image classification test
                                ParamType_Infer(
                                                "tflite", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -955,7 +955,7 @@ INSTANTIATE_TEST_CASE_P(
                                // quantized mobilenet based image classification test
                                ParamType_Infer(
                                                "tflite", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
                                                224, 3, { "input" },
@@ -964,7 +964,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 955 }),
                                // object detection test
                                ParamType_Infer(
-                                               "tflite", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+                                               "tflite", INFERENCE_TARGET_GPU, TEST_MODEL_OBJECT_DETECTION,
                                                INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -976,7 +976,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 451, 474, 714, 969 }),
                                // face detection test
                                ParamType_Infer(
-                                               "tflite", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+                                               "tflite", INFERENCE_TARGET_GPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -988,7 +988,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 727, 225, 960, 555 }),
                                // pose estimation test
                                ParamType_Infer(
-                                               "tflite", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
+                                               "tflite", INFERENCE_TARGET_GPU, TEST_MODEL_POSE_ESTIMATION,
                                                INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
                                                { "image" },
@@ -998,11 +998,11 @@ INSTANTIATE_TEST_CASE_P(
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
                                // Hand gesture model 1 from AIC
-                               ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
                                                { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
                                // Hand gesture model 2 from AIC
-                               ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
                                                { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
                                                { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
@@ -1022,7 +1022,7 @@ INSTANTIATE_TEST_CASE_P(
                                // ONE via MLAPI.
                                ParamType_Infer(
                                                "one", INFERENCE_TARGET_CPU,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -1031,7 +1031,7 @@ INSTANTIATE_TEST_CASE_P(
                                // quantized mobilenet based image classification test
                                ParamType_Infer(
                                                "one", INFERENCE_TARGET_CPU,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
                                                224, 3, { "input" },
@@ -1040,7 +1040,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 955 }),
                                // object detection test
                                ParamType_Infer(
-                                               "one", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+                                               "one", INFERENCE_TARGET_CPU, TEST_MODEL_OBJECT_DETECTION,
                                                INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -1052,7 +1052,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 451, 474, 714, 969 }),
                                // face detection test
                                ParamType_Infer(
-                                               "one", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+                                               "one", INFERENCE_TARGET_CPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -1064,7 +1064,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 727, 225, 960, 555 }),
                                // pose estimation test
                                ParamType_Infer(
-                                               "one", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
+                                               "one", INFERENCE_TARGET_CPU, TEST_MODEL_POSE_ESTIMATION,
                                                INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
                                                { "image" },
@@ -1074,11 +1074,11 @@ INSTANTIATE_TEST_CASE_P(
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
                                // Hand gesture model 1 from AIC
-                               ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
                                                { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
                                // Hand gesture model 2 from AIC
-                               ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
                                                { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
                                                { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
@@ -1090,7 +1090,7 @@ INSTANTIATE_TEST_CASE_P(
                                // mobilenet based image classification test
                                ParamType_Infer(
                                                "one", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -1099,7 +1099,7 @@ INSTANTIATE_TEST_CASE_P(
                                // quantized mobilenet based image classification test
                                ParamType_Infer(
                                                "one", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
                                                224, 3, { "input" },
@@ -1108,7 +1108,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 955 }),
                                // object detection test
                                ParamType_Infer(
-                                               "one", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+                                               "one", INFERENCE_TARGET_GPU, TEST_MODEL_OBJECT_DETECTION,
                                                INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -1120,7 +1120,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 451, 474, 714, 969 }),
                                // face detection test
                                ParamType_Infer(
-                                               "one", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+                                               "one", INFERENCE_TARGET_GPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -1132,7 +1132,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 727, 225, 960, 555 }),
                                // pose estimation test
                                ParamType_Infer(
-                                               "one", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
+                                               "one", INFERENCE_TARGET_GPU, TEST_MODEL_POSE_ESTIMATION,
                                                INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
                                                { "image" },
@@ -1142,11 +1142,11 @@ INSTANTIATE_TEST_CASE_P(
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
                                // Hand gesture model 1 from AIC
-                               ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
                                                { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
                                // Hand gesture model 2 from AIC
-                               ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
                                                { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
                                                { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
@@ -1161,7 +1161,7 @@ INSTANTIATE_TEST_CASE_P(
                                // TFLITE via MLAPI.
                                ParamType_Infer(
                                                "one", INFERENCE_TARGET_CPU,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -1170,7 +1170,7 @@ INSTANTIATE_TEST_CASE_P(
                                // quantized mobilenet based image classification test
                                ParamType_Infer(
                                                "one", INFERENCE_TARGET_CPU,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
                                                224, 3, { "input" },
@@ -1179,7 +1179,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 955 }),
                                // object detection test
                                ParamType_Infer(
-                                               "one", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+                                               "one", INFERENCE_TARGET_CPU, TEST_MODEL_OBJECT_DETECTION,
                                                INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -1191,7 +1191,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 451, 474, 714, 969 }),
                                // face detection test
                                ParamType_Infer(
-                                               "one", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+                                               "one", INFERENCE_TARGET_CPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -1203,7 +1203,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 727, 225, 960, 555 }),
                                // pose estimation test
                                ParamType_Infer(
-                                               "one", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
+                                               "one", INFERENCE_TARGET_CPU, TEST_MODEL_POSE_ESTIMATION,
                                                INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
                                                { "image" },
@@ -1213,11 +1213,11 @@ INSTANTIATE_TEST_CASE_P(
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
                                // Hand gesture model 1 from AIC
-                               ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
                                                { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
                                // Hand gesture model 2 from AIC
-                               ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
                                                { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
                                                { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
@@ -1229,7 +1229,7 @@ INSTANTIATE_TEST_CASE_P(
                                // mobilenet based image classification test
                                ParamType_Infer(
                                                "one", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -1239,7 +1239,7 @@ INSTANTIATE_TEST_CASE_P(
                                // quantized mobilenet based image classification test
                                ParamType_Infer(
                                                "one", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
                                                224, 3, { "input" },
@@ -1249,7 +1249,7 @@ INSTANTIATE_TEST_CASE_P(
 
                                // object detection test
                                ParamType_Infer(
-                                               "one", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+                                               "one", INFERENCE_TARGET_GPU, TEST_MODEL_OBJECT_DETECTION,
                                                INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -1262,7 +1262,7 @@ INSTANTIATE_TEST_CASE_P(
 
                                // face detection test
                                ParamType_Infer(
-                                               "one", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+                                               "one", INFERENCE_TARGET_GPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -1275,7 +1275,7 @@ INSTANTIATE_TEST_CASE_P(
 
                                // pose estimation test
                                ParamType_Infer(
-                                               "one", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
+                                               "one", INFERENCE_TARGET_GPU, TEST_MODEL_POSE_ESTIMATION,
                                                INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
                                                { "image" },
@@ -1285,11 +1285,11 @@ INSTANTIATE_TEST_CASE_P(
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
                                // Hand gesture model 1 from AIC
-                               ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
                                                { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
                                // Hand gesture model 2 from AIC
-                               ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
                                                { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
                                                { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
@@ -1308,7 +1308,7 @@ INSTANTIATE_TEST_CASE_P(
                                // ARMNN.
                                ParamType_CLTuner(
                                                "armnn", INFERENCE_TARGET_GPU, true, true, INFERENCE_ENGINE_CLTUNER_RAPID,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -1316,7 +1316,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 3 }),
                                ParamType_CLTuner(
                                                "armnn", INFERENCE_TARGET_GPU, true, false, INFERENCE_ENGINE_CLTUNER_READ,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -1324,7 +1324,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 3 }),
                                ParamType_CLTuner(
                                                "armnn", INFERENCE_TARGET_GPU, true, true, INFERENCE_ENGINE_CLTUNER_NORMAL,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -1332,7 +1332,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 3 }),
                                ParamType_CLTuner(
                                                "armnn", INFERENCE_TARGET_GPU, true, false, INFERENCE_ENGINE_CLTUNER_READ,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -1340,7 +1340,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 3 }),
                                ParamType_CLTuner(
                                                "armnn", INFERENCE_TARGET_GPU, true, true, INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -1348,7 +1348,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 3 }),
                                ParamType_CLTuner(
                                                "armnn", INFERENCE_TARGET_GPU, true, false, INFERENCE_ENGINE_CLTUNER_READ,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -1365,7 +1365,7 @@ INSTANTIATE_TEST_CASE_P(
                                // squeezenet based image classification test
                                ParamType_Infer(
                                                "opencv", INFERENCE_TARGET_CPU,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification_caffe.bin" },
                                                227, 227, 3, { "data" }, { "prob" },
@@ -1374,7 +1374,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 281 }),
                                ParamType_Infer(
                                                "opencv", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification_caffe.bin" },
                                                227, 227, 3, { "data" }, { "prob" },
@@ -1384,7 +1384,7 @@ INSTANTIATE_TEST_CASE_P(
 
                                // mobilenet-ssd based object detection test
                                ParamType_Infer(
-                                               "opencv", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+                                               "opencv", INFERENCE_TARGET_CPU, TEST_MODEL_OBJECT_DETECTION,
                                                INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection_caffe.bin" }, 300,
                                                300, 3, { "data" }, { "detection_out" },
@@ -1392,7 +1392,7 @@ INSTANTIATE_TEST_CASE_P(
                                                  "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.prototxt" },
                                                { 15, 19, 335, 557 }),
                                ParamType_Infer(
-                                               "opencv", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+                                               "opencv", INFERENCE_TARGET_GPU, TEST_MODEL_OBJECT_DETECTION,
                                                INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection_caffe.bin" }, 300,
                                                300, 3, { "data" }, { "detection_out" },
@@ -1402,7 +1402,7 @@ INSTANTIATE_TEST_CASE_P(
 
                                // mobilenet-ssd based object detection test
                                ParamType_Infer(
-                                               "opencv", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+                                               "opencv", INFERENCE_TARGET_CPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection_caffe.bin" }, 300,
                                                300, 3, { "data" }, { "detection_out" },
@@ -1410,7 +1410,7 @@ INSTANTIATE_TEST_CASE_P(
                                                  "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.prototxt" },
                                                { 733, 233, 965, 539 }),
                                ParamType_Infer(
-                                               "opencv", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+                                               "opencv", INFERENCE_TARGET_GPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection_caffe.bin" }, 300,
                                                300, 3, { "data" }, { "detection_out" },
@@ -1421,7 +1421,7 @@ INSTANTIATE_TEST_CASE_P(
                                // tweakcnn based facial landmark detection test
                                ParamType_Infer(
                                                "opencv", INFERENCE_TARGET_CPU,
-                                               TEST_FACIAL_LANDMARK_DETECTION, INFERENCE_ITERATION,
+                                               TEST_MODEL_FACIAL_LANDMARK_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/faciallandmark_detection_caffe.bin" },
                                                128, 128, 3, { "data" }, { "Sigmoid_fc2" },
@@ -1430,7 +1430,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 53, 45, 85, 46, 66, 64, 54, 78, 82, 79 }),
                                ParamType_Infer(
                                                "opencv", INFERENCE_TARGET_GPU,
-                                               TEST_FACIAL_LANDMARK_DETECTION, INFERENCE_ITERATION,
+                                               TEST_MODEL_FACIAL_LANDMARK_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/faciallandmark_detection_caffe.bin" },
                                                128, 128, 3, { "data" }, { "Sigmoid_fc2" },
@@ -1446,7 +1446,7 @@ INSTANTIATE_TEST_CASE_P(
                                // DLDT
                                ParamType_Infer(
                                                "dldt", INFERENCE_TARGET_CUSTOM,
-                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+                                               TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/dldt_banana_classification.bin" },
                                                224, 224, 3, { "data" }, { "prob" },
index d5b5b92..88f75d1 100644 (file)
@@ -535,7 +535,7 @@ TEST_P(InferenceEngineTestCase_G6, Inference_P)
 
        std::string test_name;
        switch (test_type) {
-       case TEST_IMAGE_CLASSIFICATION:
+       case TEST_MODEL_IMAGE_CLASSIFICATION:
                test_name.append("Image classification");
                break;
        default:
@@ -755,7 +755,7 @@ INSTANTIATE_TEST_CASE_P(
                                // ARMNN.
                                ParamType_Many(
                                                "armnn", INFERENCE_ENGINE_PROFILER_OFF,
-                                               INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TARGET_CPU, TEST_MODEL_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -764,7 +764,7 @@ INSTANTIATE_TEST_CASE_P(
                                // TFLITE.
                                ParamType_Many(
                                                "tflite", INFERENCE_ENGINE_PROFILER_OFF,
-                                               INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TARGET_CPU, TEST_MODEL_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -773,7 +773,7 @@ INSTANTIATE_TEST_CASE_P(
                                // OPENCV.
                                ParamType_Many(
                                                "opencv", INFERENCE_ENGINE_PROFILER_OFF,
-                                               INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TARGET_CPU, TEST_MODEL_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification_caffe.bin" },
                                                227, 227, 3, { "data" }, { "prob" },
@@ -783,7 +783,7 @@ INSTANTIATE_TEST_CASE_P(
                                // ONE.
                                ParamType_Many(
                                                "one", INFERENCE_ENGINE_PROFILER_OFF,
-                                               INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TARGET_CPU, TEST_MODEL_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" },
                                                224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" },
@@ -792,7 +792,7 @@ INSTANTIATE_TEST_CASE_P(
                                // ARMNN.
                                ParamType_Many(
                                                "armnn", INFERENCE_ENGINE_PROFILER_FILE,
-                                               INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TARGET_CPU, TEST_MODEL_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -801,7 +801,7 @@ INSTANTIATE_TEST_CASE_P(
                                // TFLITE.
                                ParamType_Many(
                                                "tflite", INFERENCE_ENGINE_PROFILER_FILE,
-                                               INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TARGET_CPU, TEST_MODEL_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -810,7 +810,7 @@ INSTANTIATE_TEST_CASE_P(
                                // OPENCV.
                                ParamType_Many(
                                                "opencv", INFERENCE_ENGINE_PROFILER_FILE,
-                                               INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TARGET_CPU, TEST_MODEL_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification_caffe.bin" },
                                                227, 227, 3, { "data" }, { "prob" },
@@ -820,7 +820,7 @@ INSTANTIATE_TEST_CASE_P(
                                // ONE.
                                ParamType_Many(
                                                "one", INFERENCE_ENGINE_PROFILER_FILE,
-                                               INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TARGET_CPU, TEST_MODEL_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" },
                                                224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" },
@@ -829,7 +829,7 @@ INSTANTIATE_TEST_CASE_P(
                                // ARMNN.
                                ParamType_Many(
                                                "armnn", INFERENCE_ENGINE_PROFILER_CONSOLE,
-                                               INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TARGET_CPU, TEST_MODEL_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -838,7 +838,7 @@ INSTANTIATE_TEST_CASE_P(
                                // TFLITE.
                                ParamType_Many(
                                                "tflite", INFERENCE_ENGINE_PROFILER_CONSOLE,
-                                               INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TARGET_CPU, TEST_MODEL_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -847,7 +847,7 @@ INSTANTIATE_TEST_CASE_P(
                                // OPENCV.
                                ParamType_Many(
                                                "opencv", INFERENCE_ENGINE_PROFILER_CONSOLE,
-                                               INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TARGET_CPU, TEST_MODEL_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification_caffe.bin" },
                                                227, 227, 3, { "data" }, { "prob" },
@@ -857,7 +857,7 @@ INSTANTIATE_TEST_CASE_P(
                                // ONE.
                                ParamType_Many(
                                                "one", INFERENCE_ENGINE_PROFILER_CONSOLE,
-                                               INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TARGET_CPU, TEST_MODEL_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" },
                                                224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" },
index bd49314..8bc133d 100644 (file)
@@ -40,13 +40,13 @@ enum {
 
 enum {
        TEST_MODEL_MIN = -1,
-       TEST_IMAGE_CLASSIFICATION,
-       TEST_OBJECT_DETECTION,
-       TEST_FACE_DETECTION,
-       TEST_AIC_HAND_GESTURE_1,
-       TEST_AIC_HAND_GESTURE_2,
-       TEST_FACIAL_LANDMARK_DETECTION,
-       TEST_POSE_ESTIMATION,
+       TEST_MODEL_IMAGE_CLASSIFICATION,
+       TEST_MODEL_OBJECT_DETECTION,
+       TEST_MODEL_FACE_DETECTION,
+       TEST_MODEL_AIC_HAND_GESTURE_1,
+       TEST_MODEL_AIC_HAND_GESTURE_2,
+       TEST_MODEL_FACIAL_LANDMARK_DETECTION,
+       TEST_MODEL_POSE_ESTIMATION,
        TEST_MODEL_MAX
 };