test: Use a macro instead of hard-coded count 53/252353/1
authorInki Dae <inki.dae@samsung.com>
Wed, 27 Jan 2021 05:34:04 +0000 (14:34 +0900)
committerInki Dae <inki.dae@samsung.com>
Wed, 27 Jan 2021 05:34:04 +0000 (14:34 +0900)
Change-Id: I1c0a5dd47d67a7a9eca725d932d0a73001ca85e2
Signed-off-by: Inki Dae <inki.dae@samsung.com>
test/src/inference_engine_profiler.cpp

index c5f6715..7f873c4 100644 (file)
@@ -28,6 +28,8 @@
 #include "inference_engine_common_impl.h"
 #include "inference_engine_test_common.h"
 
+#define INFERENCE_ITERATION            10
+
 typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>,
                                   int, int, int, std::vector<std::string>,
                                   std::vector<std::string>, std::vector<std::string>,
@@ -675,7 +677,7 @@ INSTANTIATE_TEST_CASE_P(
                                // ARMNN.
                                ParamType_Infer(
                                                "armnn", INFERENCE_TARGET_CPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -684,7 +686,7 @@ INSTANTIATE_TEST_CASE_P(
                                // quantized mobilenet based image classification test
                                ParamType_Infer(
                                                "armnn", INFERENCE_TARGET_CPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
                                                224, 3, { "input" },
@@ -694,7 +696,7 @@ INSTANTIATE_TEST_CASE_P(
                                // object detection test
                                ParamType_Infer(
                                                "armnn", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
-                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
                                                { "TFLite_Detection_PostProcess",
@@ -705,7 +707,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 451, 474, 714, 969 }),
                                // face detection test
                                ParamType_Infer(
-                                               "armnn", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+                                               "armnn", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -717,7 +719,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 727, 225, 960, 555 }),
                                // pose estimation test
                                ParamType_Infer(
-                                               "armnn", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, 10,
+                                               "armnn", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
                                                { "image" },
@@ -727,11 +729,11 @@ INSTANTIATE_TEST_CASE_P(
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
                                // Hand gesture model 1 from AIC
-                               ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
                                                { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
                                // Hand gesture model 2 from AIC
-                               ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
                                                { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
                                                { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
@@ -742,7 +744,7 @@ INSTANTIATE_TEST_CASE_P(
                                // mobilenet based image classification test
                                ParamType_Infer(
                                                "armnn", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -751,7 +753,7 @@ INSTANTIATE_TEST_CASE_P(
                                // quantized mobilenet based image classification test
                                ParamType_Infer(
                                                "armnn", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
                                                224, 3, { "input" },
@@ -761,7 +763,7 @@ INSTANTIATE_TEST_CASE_P(
                                // object detection test
                                ParamType_Infer(
                                                "armnn", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
-                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
                                                { "TFLite_Detection_PostProcess",
@@ -772,7 +774,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 451, 474, 714, 969 }),
                                // face detection test
                                ParamType_Infer(
-                                               "armnn", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+                                               "armnn", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -784,7 +786,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 727, 225, 960, 555 }),
                                // pose estimation test
                                ParamType_Infer(
-                                               "armnn", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, 10,
+                                               "armnn", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
                                                { "image" },
@@ -794,11 +796,11 @@ INSTANTIATE_TEST_CASE_P(
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
                                // Hand gesture model 1 from AIC
-                               ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
                                                { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
                                // Hand gesture model 2 from AIC
-                               ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
                                                { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
                                                { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
@@ -813,7 +815,7 @@ INSTANTIATE_TEST_CASE_P(
                                // TFLITE.
                                ParamType_Infer(
                                                "tflite", INFERENCE_TARGET_CPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -822,7 +824,7 @@ INSTANTIATE_TEST_CASE_P(
                                // quantized mobilenet based image classification test
                                ParamType_Infer(
                                                "tflite", INFERENCE_TARGET_CPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
                                                224, 3, { "input" },
@@ -832,7 +834,7 @@ INSTANTIATE_TEST_CASE_P(
                                // object detection test
                                ParamType_Infer(
                                                "tflite", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
-                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
                                                { "TFLite_Detection_PostProcess",
@@ -843,7 +845,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 451, 474, 714, 969 }),
                                // face detection test
                                ParamType_Infer(
-                                               "tflite", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+                                               "tflite", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -856,7 +858,7 @@ INSTANTIATE_TEST_CASE_P(
                                // pose estimation test
                                ParamType_Infer(
                                                "tflite", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
-                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
                                                { "image" },
                                                { "Convolutional_Pose_Machine/stage_5_out" },
@@ -865,11 +867,11 @@ INSTANTIATE_TEST_CASE_P(
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
                                // Hand gesture model 1 from AIC
-                               ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
                                                { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
                                // Hand gesture model 2 from AIC
-                               ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
                                                { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
                                                { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
@@ -881,7 +883,7 @@ INSTANTIATE_TEST_CASE_P(
                                // mobilenet based image classification test
                                ParamType_Infer(
                                                "tflite", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -890,7 +892,7 @@ INSTANTIATE_TEST_CASE_P(
                                // quantized mobilenet based image classification test
                                ParamType_Infer(
                                                "tflite", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
                                                224, 3, { "input" },
@@ -900,7 +902,7 @@ INSTANTIATE_TEST_CASE_P(
                                // object detection test
                                ParamType_Infer(
                                                "tflite", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
-                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
                                                { "TFLite_Detection_PostProcess",
@@ -911,7 +913,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 451, 474, 714, 969 }),
                                // face detection test
                                ParamType_Infer(
-                                               "tflite", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+                                               "tflite", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -924,7 +926,7 @@ INSTANTIATE_TEST_CASE_P(
                                // pose estimation test
                                ParamType_Infer(
                                                "tflite", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
-                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
                                                { "image" },
                                                { "Convolutional_Pose_Machine/stage_5_out" },
@@ -933,11 +935,11 @@ INSTANTIATE_TEST_CASE_P(
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
                                // Hand gesture model 1 from AIC
-                               ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
                                                { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
                                // Hand gesture model 2 from AIC
-                               ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
                                                { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
                                                { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
@@ -957,7 +959,7 @@ INSTANTIATE_TEST_CASE_P(
                                // ONE via MLAPI.
                                ParamType_Infer(
                                                "one", INFERENCE_TARGET_CPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -966,7 +968,7 @@ INSTANTIATE_TEST_CASE_P(
                                // quantized mobilenet based image classification test
                                ParamType_Infer(
                                                "one", INFERENCE_TARGET_CPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
                                                224, 3, { "input" },
@@ -976,7 +978,7 @@ INSTANTIATE_TEST_CASE_P(
                                // object detection test
                                ParamType_Infer(
                                                "one", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
-                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
                                                { "TFLite_Detection_PostProcess",
@@ -987,7 +989,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 451, 474, 714, 969 }),
                                // face detection test
                                ParamType_Infer(
-                                               "one", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+                                               "one", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -1000,7 +1002,7 @@ INSTANTIATE_TEST_CASE_P(
                                // pose estimation test
                                ParamType_Infer(
                                                "one", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
-                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
                                                { "image" },
                                                { "Convolutional_Pose_Machine/stage_5_out" },
@@ -1009,11 +1011,11 @@ INSTANTIATE_TEST_CASE_P(
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
                                // Hand gesture model 1 from AIC
-                               ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
                                                { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
                                // Hand gesture model 2 from AIC
-                               ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
                                                { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
                                                { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
@@ -1025,7 +1027,7 @@ INSTANTIATE_TEST_CASE_P(
                                // mobilenet based image classification test
                                ParamType_Infer(
                                                "one", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -1034,7 +1036,7 @@ INSTANTIATE_TEST_CASE_P(
                                // quantized mobilenet based image classification test
                                ParamType_Infer(
                                                "one", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
                                                224, 3, { "input" },
@@ -1044,7 +1046,7 @@ INSTANTIATE_TEST_CASE_P(
                                // object detection test
                                ParamType_Infer(
                                                "one", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
-                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
                                                { "TFLite_Detection_PostProcess",
@@ -1055,7 +1057,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 451, 474, 714, 969 }),
                                // face detection test
                                ParamType_Infer(
-                                               "one", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+                                               "one", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -1068,7 +1070,7 @@ INSTANTIATE_TEST_CASE_P(
                                // pose estimation test
                                ParamType_Infer(
                                                "one", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
-                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
                                                { "image" },
                                                { "Convolutional_Pose_Machine/stage_5_out" },
@@ -1077,11 +1079,11 @@ INSTANTIATE_TEST_CASE_P(
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
                                // Hand gesture model 1 from AIC
-                               ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
                                                { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
                                // Hand gesture model 2 from AIC
-                               ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
                                                { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
                                                { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
@@ -1096,7 +1098,7 @@ INSTANTIATE_TEST_CASE_P(
                                // TFLITE via MLAPI.
                                ParamType_Infer(
                                                "mlapi", INFERENCE_TARGET_CPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -1105,7 +1107,7 @@ INSTANTIATE_TEST_CASE_P(
                                // quantized mobilenet based image classification test
                                ParamType_Infer(
                                                "mlapi", INFERENCE_TARGET_CPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
                                                224, 3, { "input" },
@@ -1115,7 +1117,7 @@ INSTANTIATE_TEST_CASE_P(
                                // object detection test
                                ParamType_Infer(
                                                "mlapi", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
-                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
                                                { "TFLite_Detection_PostProcess",
@@ -1126,7 +1128,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 451, 474, 714, 969 }),
                                // face detection test
                                ParamType_Infer(
-                                               "mlapi", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+                                               "mlapi", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -1139,7 +1141,7 @@ INSTANTIATE_TEST_CASE_P(
                                // pose estimation test
                                ParamType_Infer(
                                                "mlapi", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
-                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
                                                { "image" },
                                                { "Convolutional_Pose_Machine/stage_5_out" },
@@ -1148,11 +1150,11 @@ INSTANTIATE_TEST_CASE_P(
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
                                // Hand gesture model 1 from AIC
-                               ParamType_Infer("mlapi", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("mlapi", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
                                                { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
                                // Hand gesture model 2 from AIC
-                               ParamType_Infer("mlapi", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("mlapi", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
                                                { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
                                                { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
@@ -1164,7 +1166,7 @@ INSTANTIATE_TEST_CASE_P(
                                // mobilenet based image classification test
                                ParamType_Infer(
                                                "mlapi", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
                                                224, 3, { "input_2" }, { "dense_3/Softmax" },
@@ -1174,7 +1176,7 @@ INSTANTIATE_TEST_CASE_P(
                                // quantized mobilenet based image classification test
                                ParamType_Infer(
                                                "mlapi", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
                                                224, 3, { "input" },
@@ -1185,7 +1187,7 @@ INSTANTIATE_TEST_CASE_P(
                                // object detection test
                                ParamType_Infer(
                                                "mlapi", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
-                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
                                                { "TFLite_Detection_PostProcess",
@@ -1197,7 +1199,7 @@ INSTANTIATE_TEST_CASE_P(
 
                                // face detection test
                                ParamType_Infer(
-                                               "mlapi", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+                                               "mlapi", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -1211,7 +1213,7 @@ INSTANTIATE_TEST_CASE_P(
                                // pose estimation test
                                ParamType_Infer(
                                                "mlapi", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
-                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
                                                { "image" },
                                                { "Convolutional_Pose_Machine/stage_5_out" },
@@ -1220,11 +1222,11 @@ INSTANTIATE_TEST_CASE_P(
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
                                // Hand gesture model 1 from AIC
-                               ParamType_Infer("mlapi", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("mlapi", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
                                                { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
                                // Hand gesture model 2 from AIC
-                               ParamType_Infer("mlapi", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("mlapi", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
                                                { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
                                                { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
@@ -1243,7 +1245,7 @@ INSTANTIATE_TEST_CASE_P(
                                // squeezenet based image classification test
                                ParamType_Infer(
                                                "opencv", INFERENCE_TARGET_CPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification_caffe.bin" },
                                                227, 227, 3, { "data" }, { "prob" },
@@ -1252,7 +1254,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 281 }),
                                ParamType_Infer(
                                                "opencv", INFERENCE_TARGET_GPU,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification_caffe.bin" },
                                                227, 227, 3, { "data" }, { "prob" },
@@ -1263,7 +1265,7 @@ INSTANTIATE_TEST_CASE_P(
                                // mobilenet-ssd based object detection test
                                ParamType_Infer(
                                                "opencv", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
-                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection_caffe.bin" }, 300,
                                                300, 3, { "data" }, { "detection_out" },
                                                { "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.caffemodel",
@@ -1271,7 +1273,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 15, 19, 335, 557 }),
                                ParamType_Infer(
                                                "opencv", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
-                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection_caffe.bin" }, 300,
                                                300, 3, { "data" }, { "detection_out" },
                                                { "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.caffemodel",
@@ -1280,7 +1282,7 @@ INSTANTIATE_TEST_CASE_P(
 
                                // mobilenet-ssd based object detection test
                                ParamType_Infer(
-                                               "opencv", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+                                               "opencv", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection_caffe.bin" }, 300,
                                                300, 3, { "data" }, { "detection_out" },
@@ -1288,7 +1290,7 @@ INSTANTIATE_TEST_CASE_P(
                                                  "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.prototxt" },
                                                { 733, 233, 965, 539 }),
                                ParamType_Infer(
-                                               "opencv", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+                                               "opencv", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection_caffe.bin" }, 300,
                                                300, 3, { "data" }, { "detection_out" },
@@ -1299,7 +1301,7 @@ INSTANTIATE_TEST_CASE_P(
                                // tweakcnn based facial landmark detection test
                                ParamType_Infer(
                                                "opencv", INFERENCE_TARGET_CPU,
-                                               TEST_FACIAL_LANDMARK_DETECTION, 10,
+                                               TEST_FACIAL_LANDMARK_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/faciallandmark_detection_caffe.bin" },
                                                128, 128, 3, { "data" }, { "Sigmoid_fc2" },
@@ -1308,7 +1310,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 53, 45, 85, 46, 66, 64, 54, 78, 82, 79 }),
                                ParamType_Infer(
                                                "opencv", INFERENCE_TARGET_GPU,
-                                               TEST_FACIAL_LANDMARK_DETECTION, 10,
+                                               TEST_FACIAL_LANDMARK_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/faciallandmark_detection_caffe.bin" },
                                                128, 128, 3, { "data" }, { "Sigmoid_fc2" },
@@ -1324,7 +1326,7 @@ INSTANTIATE_TEST_CASE_P(
                                // DLDT
                                ParamType_Infer(
                                                "dldt", INFERENCE_TARGET_CUSTOM,
-                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/dldt_banana_classification.bin" },
                                                224, 224, 3, { "data" }, { "prob" },