From: Inki Dae Date: Thu, 4 Feb 2021 09:22:49 +0000 (+0900) Subject: test: Use some macros for test case parameters X-Git-Tag: submit/tizen/20210422.072212~5 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=61f6c3be2be16f9f53e3c9f30773148a38bb9cf7;p=platform%2Fcore%2Fmultimedia%2Finference-engine-interface.git test: Use some macros for test case parameters Added some macros for tflite and cltuner test cases to avoid code duplication. Changelog v1: - Correct macro names like below. *IC_OD* -> *OD* *IC_FD* -> *FD* *IC_PE* -> *PE* Change-Id: I1f8902c2779a4889a315a9402f290502e86002c0 Signed-off-by: Inki Dae --- diff --git a/test/src/inference_engine_profiler.cpp b/test/src/inference_engine_profiler.cpp index 16fccf3..51000f3 100644 --- a/test/src/inference_engine_profiler.cpp +++ b/test/src/inference_engine_profiler.cpp @@ -30,6 +30,89 @@ #define INFERENCE_ITERATION 10 +// Macros for tflite inference test cases. +#define PARAM_TYPE_TFLITE_IC_INFER(backend, device, iter) \ + backend, device, TEST_MODEL_IMAGE_CLASSIFICATION, \ + iter, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, \ + { "/opt/usr/images/image_classification.bin" }, 224,\ + 224, 3, { "input_2" }, { "dense_3/Softmax" }, \ + { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, \ + { 3 } + +#define PARAM_TYPE_TFLITE_IC_Q_INFER(backend, device, iter) \ + backend, device, TEST_MODEL_IMAGE_CLASSIFICATION, \ + iter, INFERENCE_TENSOR_DATA_TYPE_UINT8, \ + { "/opt/usr/images/image_classification_q.bin" }, \ + 224, 224, 3, { "input" }, \ + { "MobilenetV1/Predictions/Reshape_1" }, \ + { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" }, \ + { 955 } + +#define PARAM_TYPE_TFLITE_OD_INFER(backend, device, iter) \ + backend, device, TEST_MODEL_OBJECT_DETECTION, \ + iter, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, \ + { "/opt/usr/images/object_detection.bin" }, \ + 300, 300, 3, \ + { "normalized_input_image_tensor" }, \ + { "TFLite_Detection_PostProcess", \ + "TFLite_Detection_PostProcess:1", \ + "TFLite_Detection_PostProcess:2", \ + "TFLite_Detection_PostProcess:3" }, \ + { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, \ + { 451, 474, 714, 969 } + +#define PARAM_TYPE_TFLITE_FD_INFER(backend, device, iter) \ + backend, device, TEST_MODEL_FACE_DETECTION, \ + iter, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, \ + { "/opt/usr/images/face_detection.bin" }, \ + 300, 300, 3, \ + { "normalized_input_image_tensor" }, \ + { "TFLite_Detection_PostProcess", \ + "TFLite_Detection_PostProcess:1", \ + "TFLite_Detection_PostProcess:2", \ + "TFLite_Detection_PostProcess:3" }, \ + { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, \ + { 727, 225, 960, 555 } + +#define PARAM_TYPE_TFLITE_PE_INFER(backend, device, iter) \ + backend, device, TEST_MODEL_POSE_ESTIMATION, \ + iter, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, \ + { "/opt/usr/images/pose_estimation.bin" }, \ + 192, 192, 3, { "image" }, \ + { "Convolutional_Pose_Machine/stage_5_out" }, \ + { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" }, \ + { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, \ + 351, 382, 382, 382, 76, 146, 170, 193, 216, 146, \ + 123, 99, 287, 381, 451, 287, 381, 475 } + +#define PARAM_TYPE_TFLITE_AICHG_1_INFER(backend, device, iter) \ + backend, device, TEST_MODEL_AIC_HAND_GESTURE_1, \ + iter, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, \ + { "/opt/usr/images/hand.bin" }, 224, 224, 3, \ + { "input" }, \ + { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" }, \ + { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, \ + { 0 } + +#define PARAM_TYPE_TFLITE_AICHG_2_INFER(backend, device, iter) \ + backend, device, TEST_MODEL_AIC_HAND_GESTURE_2, \ + iter, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, \ + { "/opt/usr/images/hand.bin" }, 56, 56, 21, \ + { "input" }, \ + { "mobilenetv2/coord_refine", "mobilenetv2/gesture" }, \ + { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" }, \ + { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39, \ + 78, 36, 82, 42, 82, 44, 83, 45, 35, 37, \ + 61, 36, 59, 36, 52, 39, 35, 32, 40, 34, \ + 62, 39, 70, 40, 58, 41, 34, 42, 34, 41, \ + 38, 38, 12 } + + +// Macros for tflite model based cltuner test cases. +#define PARAM_TYPE_TFLITE_IC_CLTUNER(active, update, mode, backend, device, iter) \ + active, update, mode, \ + PARAM_TYPE_TFLITE_IC_INFER(backend, device, iter) + typedef std::tuple, int, int, int, std::vector, std::vector, std::vector, @@ -37,7 +120,7 @@ typedef std::tuple, ParamType_Infer; -typedef std::tuple, int, int, int, std::vector, std::vector, std::vector, @@ -232,11 +315,11 @@ TEST_P(InferenceEngineTfliteTest, Inference) TEST_P(InferenceEngineTfliteCLTunerTest, Inference) { - std::string backend_name; - int target_devices; bool active; bool update; inference_engine_cltuner_mode_e tuning_mode; + std::string backend_name; + int target_devices; int test_type; int iteration; int tensor_type; @@ -249,7 +332,7 @@ TEST_P(InferenceEngineTfliteCLTunerTest, Inference) std::vector model_paths; std::vector answers; - std::tie(backend_name, target_devices, active, update, tuning_mode, test_type, + std::tie(active, update, tuning_mode, backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers) = GetParam(); @@ -739,279 +822,96 @@ INSTANTIATE_TEST_CASE_P( // mobilenet based image classification test // ARMNN. ParamType_Infer( - "armnn", INFERENCE_TARGET_CPU, - TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/image_classification.bin" }, 224, - 224, 3, { "input_2" }, { "dense_3/Softmax" }, - { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, - { 3 }), + PARAM_TYPE_TFLITE_IC_INFER("armnn", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // quantized mobilenet based image classification test ParamType_Infer( - "armnn", INFERENCE_TARGET_CPU, - TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_UINT8, - { "/opt/usr/images/image_classification_q.bin" }, 224, - 224, 3, { "input" }, - { "MobilenetV1/Predictions/Reshape_1" }, - { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" }, - { 955 }), + PARAM_TYPE_TFLITE_IC_Q_INFER("armnn", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // object detection test ParamType_Infer( - "armnn", INFERENCE_TARGET_CPU, TEST_MODEL_OBJECT_DETECTION, - INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, - { "normalized_input_image_tensor" }, - { "TFLite_Detection_PostProcess", - "TFLite_Detection_PostProcess:1", - "TFLite_Detection_PostProcess:2", - "TFLite_Detection_PostProcess:3" }, - { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, - { 451, 474, 714, 969 }), + PARAM_TYPE_TFLITE_OD_INFER("armnn", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // face detection test ParamType_Infer( - "armnn", INFERENCE_TARGET_CPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, - { "normalized_input_image_tensor" }, - { "TFLite_Detection_PostProcess", - "TFLite_Detection_PostProcess:1", - "TFLite_Detection_PostProcess:2", - "TFLite_Detection_PostProcess:3" }, - { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, - { 727, 225, 960, 555 }), + PARAM_TYPE_TFLITE_FD_INFER("armnn", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // pose estimation test ParamType_Infer( - "armnn", INFERENCE_TARGET_CPU, TEST_MODEL_POSE_ESTIMATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, - { "image" }, - { "Convolutional_Pose_Machine/stage_5_out" }, - { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" }, - { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, - 351, 382, 382, 382, 76, 146, 170, 193, 216, 146, - 123, 99, 287, 381, 451, 287, 381, 475 }), + PARAM_TYPE_TFLITE_PE_INFER("armnn", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // Hand gesture model 1 from AIC - ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" }, - { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }), + ParamType_Infer( + PARAM_TYPE_TFLITE_AICHG_1_INFER("armnn", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // Hand gesture model 2 from AIC - ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" }, - { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" }, - { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39, - 78, 36, 82, 42, 82, 44, 83, 45, 35, 37, - 61, 36, 59, 36, 52, 39, 35, 32, 40, 34, - 62, 39, 70, 40, 58, 41, 34, 42, 34, 41, - 38, 38, 12 }), + ParamType_Infer( + PARAM_TYPE_TFLITE_AICHG_2_INFER("armnn", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), + // mobilenet based image classification test ParamType_Infer( - "armnn", INFERENCE_TARGET_GPU, - TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/image_classification.bin" }, 224, - 224, 3, { "input_2" }, { "dense_3/Softmax" }, - { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, - { 3 }), + PARAM_TYPE_TFLITE_IC_INFER("armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // quantized mobilenet based image classification test ParamType_Infer( - "armnn", INFERENCE_TARGET_GPU, - TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_UINT8, - { "/opt/usr/images/image_classification_q.bin" }, 224, - 224, 3, { "input" }, - { "MobilenetV1/Predictions/Reshape_1" }, - { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" }, - { 955 }), + PARAM_TYPE_TFLITE_IC_Q_INFER("armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // object detection test ParamType_Infer( - "armnn", INFERENCE_TARGET_GPU, TEST_MODEL_OBJECT_DETECTION, - INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, - { "normalized_input_image_tensor" }, - { "TFLite_Detection_PostProcess", - "TFLite_Detection_PostProcess:1", - "TFLite_Detection_PostProcess:2", - "TFLite_Detection_PostProcess:3" }, - { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, - { 451, 474, 714, 969 }), + PARAM_TYPE_TFLITE_OD_INFER("armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // face detection test ParamType_Infer( - "armnn", INFERENCE_TARGET_GPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, - { "normalized_input_image_tensor" }, - { "TFLite_Detection_PostProcess", - "TFLite_Detection_PostProcess:1", - "TFLite_Detection_PostProcess:2", - "TFLite_Detection_PostProcess:3" }, - { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, - { 727, 225, 960, 555 }), + PARAM_TYPE_TFLITE_FD_INFER("armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // pose estimation test ParamType_Infer( - "armnn", INFERENCE_TARGET_GPU, TEST_MODEL_POSE_ESTIMATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, - { "image" }, - { "Convolutional_Pose_Machine/stage_5_out" }, - { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" }, - { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, - 351, 382, 382, 382, 76, 146, 170, 193, 216, 146, - 123, 99, 287, 381, 451, 287, 381, 475 }), + PARAM_TYPE_TFLITE_PE_INFER("armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // Hand gesture model 1 from AIC - ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" }, - { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }), + ParamType_Infer( + PARAM_TYPE_TFLITE_AICHG_1_INFER("armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // Hand gesture model 2 from AIC - ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" }, - { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" }, - { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39, - 78, 36, 82, 42, 82, 44, 83, 45, 35, 37, - 61, 36, 59, 36, 52, 39, 35, 32, 40, 34, - 62, 39, 70, 40, 58, 41, 34, 42, 34, 41, - 38, 38, 12 }), + ParamType_Infer( + PARAM_TYPE_TFLITE_AICHG_2_INFER("armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), /*********************************************************************************/ // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result // mobilenet based image classification test // TFLITE. ParamType_Infer( - "tflite", INFERENCE_TARGET_CPU, - TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/image_classification.bin" }, 224, - 224, 3, { "input_2" }, { "dense_3/Softmax" }, - { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, - { 3 }), + PARAM_TYPE_TFLITE_IC_INFER("tflite", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // quantized mobilenet based image classification test ParamType_Infer( - "tflite", INFERENCE_TARGET_CPU, - TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_UINT8, - { "/opt/usr/images/image_classification_q.bin" }, 224, - 224, 3, { "input" }, - { "MobilenetV1/Predictions/Reshape_1" }, - { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" }, - { 955 }), + PARAM_TYPE_TFLITE_IC_Q_INFER("tflite", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // object detection test ParamType_Infer( - "tflite", INFERENCE_TARGET_CPU, TEST_MODEL_OBJECT_DETECTION, - INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, - { "normalized_input_image_tensor" }, - { "TFLite_Detection_PostProcess", - "TFLite_Detection_PostProcess:1", - "TFLite_Detection_PostProcess:2", - "TFLite_Detection_PostProcess:3" }, - { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, - { 451, 474, 714, 969 }), + PARAM_TYPE_TFLITE_OD_INFER("tflite", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // face detection test ParamType_Infer( - "tflite", INFERENCE_TARGET_CPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, - { "normalized_input_image_tensor" }, - { "TFLite_Detection_PostProcess", - "TFLite_Detection_PostProcess:1", - "TFLite_Detection_PostProcess:2", - "TFLite_Detection_PostProcess:3" }, - { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, - { 727, 225, 960, 555 }), + PARAM_TYPE_TFLITE_FD_INFER("tflite", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // pose estimation test ParamType_Infer( - "tflite", INFERENCE_TARGET_CPU, TEST_MODEL_POSE_ESTIMATION, - INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, - { "image" }, - { "Convolutional_Pose_Machine/stage_5_out" }, - { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" }, - { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, - 351, 382, 382, 382, 76, 146, 170, 193, 216, 146, - 123, 99, 287, 381, 451, 287, 381, 475 }), + PARAM_TYPE_TFLITE_PE_INFER("tflite", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // Hand gesture model 1 from AIC - ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" }, - { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }), + ParamType_Infer( + PARAM_TYPE_TFLITE_AICHG_1_INFER("tflite", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // Hand gesture model 2 from AIC - ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" }, - { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" }, - { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39, - 78, 36, 82, 42, 82, 44, 83, 45, 35, 37, - 61, 36, 59, 36, 52, 39, 35, 32, 40, 34, - 62, 39, 70, 40, 58, 41, 34, 42, 34, 41, - 38, 38, 12 }), + ParamType_Infer( + PARAM_TYPE_TFLITE_AICHG_2_INFER("tflite", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // mobilenet based image classification test ParamType_Infer( - "tflite", INFERENCE_TARGET_GPU, - TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/image_classification.bin" }, 224, - 224, 3, { "input_2" }, { "dense_3/Softmax" }, - { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, - { 3 }), + PARAM_TYPE_TFLITE_IC_INFER("tflite", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // quantized mobilenet based image classification test ParamType_Infer( - "tflite", INFERENCE_TARGET_GPU, - TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_UINT8, - { "/opt/usr/images/image_classification_q.bin" }, 224, - 224, 3, { "input" }, - { "MobilenetV1/Predictions/Reshape_1" }, - { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" }, - { 955 }), + PARAM_TYPE_TFLITE_IC_Q_INFER("tflite", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // object detection test ParamType_Infer( - "tflite", INFERENCE_TARGET_GPU, TEST_MODEL_OBJECT_DETECTION, - INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, - { "normalized_input_image_tensor" }, - { "TFLite_Detection_PostProcess", - "TFLite_Detection_PostProcess:1", - "TFLite_Detection_PostProcess:2", - "TFLite_Detection_PostProcess:3" }, - { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, - { 451, 474, 714, 969 }), + PARAM_TYPE_TFLITE_OD_INFER("tflite", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // face detection test ParamType_Infer( - "tflite", INFERENCE_TARGET_GPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, - { "normalized_input_image_tensor" }, - { "TFLite_Detection_PostProcess", - "TFLite_Detection_PostProcess:1", - "TFLite_Detection_PostProcess:2", - "TFLite_Detection_PostProcess:3" }, - { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, - { 727, 225, 960, 555 }), + PARAM_TYPE_TFLITE_FD_INFER("tflite", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // pose estimation test ParamType_Infer( - "tflite", INFERENCE_TARGET_GPU, TEST_MODEL_POSE_ESTIMATION, - INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, - { "image" }, - { "Convolutional_Pose_Machine/stage_5_out" }, - { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" }, - { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, - 351, 382, 382, 382, 76, 146, 170, 193, 216, 146, - 123, 99, 287, 381, 451, 287, 381, 475 }), + PARAM_TYPE_TFLITE_PE_INFER("tflite", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // Hand gesture model 1 from AIC - ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" }, - { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }), + ParamType_Infer( + PARAM_TYPE_TFLITE_AICHG_1_INFER("tflite", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // Hand gesture model 2 from AIC - ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" }, - { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" }, - { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39, - 78, 36, 82, 42, 82, 44, 83, 45, 35, 37, - 61, 36, 59, 36, 52, 39, 35, 32, 40, 34, - 62, 39, 70, 40, 58, 41, 34, 42, 34, 41, - 38, 38, 12 }) + ParamType_Infer( + PARAM_TYPE_TFLITE_AICHG_2_INFER("tflite", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)) /* TODO */ - )); + )); INSTANTIATE_TEST_CASE_P( Inhouse, InferenceEngineTfliteTest, @@ -1021,282 +921,94 @@ INSTANTIATE_TEST_CASE_P( // mobilenet based image classification test // ONE via MLAPI. ParamType_Infer( - "one", INFERENCE_TARGET_CPU, - TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/image_classification.bin" }, 224, - 224, 3, { "input_2" }, { "dense_3/Softmax" }, - { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, - { 3 }), + PARAM_TYPE_TFLITE_IC_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // quantized mobilenet based image classification test ParamType_Infer( - "one", INFERENCE_TARGET_CPU, - TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_UINT8, - { "/opt/usr/images/image_classification_q.bin" }, 224, - 224, 3, { "input" }, - { "MobilenetV1/Predictions/Reshape_1" }, - { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" }, - { 955 }), + PARAM_TYPE_TFLITE_IC_Q_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // object detection test ParamType_Infer( - "one", INFERENCE_TARGET_CPU, TEST_MODEL_OBJECT_DETECTION, - INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, - { "normalized_input_image_tensor" }, - { "TFLite_Detection_PostProcess", - "TFLite_Detection_PostProcess:1", - "TFLite_Detection_PostProcess:2", - "TFLite_Detection_PostProcess:3" }, - { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, - { 451, 474, 714, 969 }), + PARAM_TYPE_TFLITE_OD_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // face detection test ParamType_Infer( - "one", INFERENCE_TARGET_CPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, - { "normalized_input_image_tensor" }, - { "TFLite_Detection_PostProcess", - "TFLite_Detection_PostProcess:1", - "TFLite_Detection_PostProcess:2", - "TFLite_Detection_PostProcess:3" }, - { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, - { 727, 225, 960, 555 }), + PARAM_TYPE_TFLITE_FD_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // pose estimation test ParamType_Infer( - "one", INFERENCE_TARGET_CPU, TEST_MODEL_POSE_ESTIMATION, - INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, - { "image" }, - { "Convolutional_Pose_Machine/stage_5_out" }, - { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" }, - { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, - 351, 382, 382, 382, 76, 146, 170, 193, 216, 146, - 123, 99, 287, 381, 451, 287, 381, 475 }), + PARAM_TYPE_TFLITE_PE_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // Hand gesture model 1 from AIC - ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" }, - { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }), + ParamType_Infer( + PARAM_TYPE_TFLITE_AICHG_1_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // Hand gesture model 2 from AIC - ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" }, - { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" }, - { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39, - 78, 36, 82, 42, 82, 44, 83, 45, 35, 37, - 61, 36, 59, 36, 52, 39, 35, 32, 40, 34, - 62, 39, 70, 40, 58, 41, 34, 42, 34, 41, - 38, 38, 12 }), + ParamType_Infer( + PARAM_TYPE_TFLITE_AICHG_2_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // mobilenet based image classification test ParamType_Infer( - "one", INFERENCE_TARGET_GPU, - TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/image_classification.bin" }, 224, - 224, 3, { "input_2" }, { "dense_3/Softmax" }, - { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, - { 3 }), + PARAM_TYPE_TFLITE_IC_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // quantized mobilenet based image classification test ParamType_Infer( - "one", INFERENCE_TARGET_GPU, - TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_UINT8, - { "/opt/usr/images/image_classification_q.bin" }, 224, - 224, 3, { "input" }, - { "MobilenetV1/Predictions/Reshape_1" }, - { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" }, - { 955 }), + PARAM_TYPE_TFLITE_IC_Q_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // object detection test ParamType_Infer( - "one", INFERENCE_TARGET_GPU, TEST_MODEL_OBJECT_DETECTION, - INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, - { "normalized_input_image_tensor" }, - { "TFLite_Detection_PostProcess", - "TFLite_Detection_PostProcess:1", - "TFLite_Detection_PostProcess:2", - "TFLite_Detection_PostProcess:3" }, - { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, - { 451, 474, 714, 969 }), + PARAM_TYPE_TFLITE_OD_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // face detection test ParamType_Infer( - "one", INFERENCE_TARGET_GPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, - { "normalized_input_image_tensor" }, - { "TFLite_Detection_PostProcess", - "TFLite_Detection_PostProcess:1", - "TFLite_Detection_PostProcess:2", - "TFLite_Detection_PostProcess:3" }, - { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, - { 727, 225, 960, 555 }), + PARAM_TYPE_TFLITE_FD_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // pose estimation test ParamType_Infer( - "one", INFERENCE_TARGET_GPU, TEST_MODEL_POSE_ESTIMATION, - INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, - { "image" }, - { "Convolutional_Pose_Machine/stage_5_out" }, - { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" }, - { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, - 351, 382, 382, 382, 76, 146, 170, 193, 216, 146, - 123, 99, 287, 381, 451, 287, 381, 475 }), + PARAM_TYPE_TFLITE_PE_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // Hand gesture model 1 from AIC - ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" }, - { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }), + ParamType_Infer( + PARAM_TYPE_TFLITE_AICHG_1_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // Hand gesture model 2 from AIC - ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" }, - { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" }, - { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39, - 78, 36, 82, 42, 82, 44, 83, 45, 35, 37, - 61, 36, 59, 36, 52, 39, 35, 32, 40, 34, - 62, 39, 70, 40, 58, 41, 34, 42, 34, 41, - 38, 38, 12 }), + ParamType_Infer( + PARAM_TYPE_TFLITE_AICHG_2_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), /*********************************************************************************/ // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result // mobilenet based image classification test // TFLITE via MLAPI. ParamType_Infer( - "one", INFERENCE_TARGET_CPU, - TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/image_classification.bin" }, 224, - 224, 3, { "input_2" }, { "dense_3/Softmax" }, - { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, - { 3 }), + PARAM_TYPE_TFLITE_IC_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // quantized mobilenet based image classification test ParamType_Infer( - "one", INFERENCE_TARGET_CPU, - TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_UINT8, - { "/opt/usr/images/image_classification_q.bin" }, 224, - 224, 3, { "input" }, - { "MobilenetV1/Predictions/Reshape_1" }, - { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" }, - { 955 }), + PARAM_TYPE_TFLITE_IC_Q_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // object detection test ParamType_Infer( - "one", INFERENCE_TARGET_CPU, TEST_MODEL_OBJECT_DETECTION, - INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, - { "normalized_input_image_tensor" }, - { "TFLite_Detection_PostProcess", - "TFLite_Detection_PostProcess:1", - "TFLite_Detection_PostProcess:2", - "TFLite_Detection_PostProcess:3" }, - { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, - { 451, 474, 714, 969 }), + PARAM_TYPE_TFLITE_OD_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // face detection test ParamType_Infer( - "one", INFERENCE_TARGET_CPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, - { "normalized_input_image_tensor" }, - { "TFLite_Detection_PostProcess", - "TFLite_Detection_PostProcess:1", - "TFLite_Detection_PostProcess:2", - "TFLite_Detection_PostProcess:3" }, - { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, - { 727, 225, 960, 555 }), + PARAM_TYPE_TFLITE_FD_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // pose estimation test ParamType_Infer( - "one", INFERENCE_TARGET_CPU, TEST_MODEL_POSE_ESTIMATION, - INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, - { "image" }, - { "Convolutional_Pose_Machine/stage_5_out" }, - { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" }, - { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, - 351, 382, 382, 382, 76, 146, 170, 193, 216, 146, - 123, 99, 287, 381, 451, 287, 381, 475 }), + PARAM_TYPE_TFLITE_PE_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // Hand gesture model 1 from AIC - ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" }, - { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }), + ParamType_Infer( + PARAM_TYPE_TFLITE_AICHG_1_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // Hand gesture model 2 from AIC - ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" }, - { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" }, - { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39, - 78, 36, 82, 42, 82, 44, 83, 45, 35, 37, - 61, 36, 59, 36, 52, 39, 35, 32, 40, 34, - 62, 39, 70, 40, 58, 41, 34, 42, 34, 41, - 38, 38, 12 }), + ParamType_Infer( + PARAM_TYPE_TFLITE_AICHG_2_INFER("one", INFERENCE_TARGET_CPU, INFERENCE_ITERATION)), // mobilenet based image classification test ParamType_Infer( - "one", INFERENCE_TARGET_GPU, - TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/image_classification.bin" }, 224, - 224, 3, { "input_2" }, { "dense_3/Softmax" }, - { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, - { 3 }), - + PARAM_TYPE_TFLITE_IC_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // quantized mobilenet based image classification test ParamType_Infer( - "one", INFERENCE_TARGET_GPU, - TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_UINT8, - { "/opt/usr/images/image_classification_q.bin" }, 224, - 224, 3, { "input" }, - { "MobilenetV1/Predictions/Reshape_1" }, - { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" }, - { 955 }), - + PARAM_TYPE_TFLITE_IC_Q_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // object detection test ParamType_Infer( - "one", INFERENCE_TARGET_GPU, TEST_MODEL_OBJECT_DETECTION, - INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, - { "normalized_input_image_tensor" }, - { "TFLite_Detection_PostProcess", - "TFLite_Detection_PostProcess:1", - "TFLite_Detection_PostProcess:2", - "TFLite_Detection_PostProcess:3" }, - { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, - { 451, 474, 714, 969 }), - + PARAM_TYPE_TFLITE_OD_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // face detection test ParamType_Infer( - "one", INFERENCE_TARGET_GPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, - { "normalized_input_image_tensor" }, - { "TFLite_Detection_PostProcess", - "TFLite_Detection_PostProcess:1", - "TFLite_Detection_PostProcess:2", - "TFLite_Detection_PostProcess:3" }, - { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, - { 727, 225, 960, 555 }), - + PARAM_TYPE_TFLITE_FD_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // pose estimation test ParamType_Infer( - "one", INFERENCE_TARGET_GPU, TEST_MODEL_POSE_ESTIMATION, - INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, - { "image" }, - { "Convolutional_Pose_Machine/stage_5_out" }, - { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" }, - { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, - 351, 382, 382, 382, 76, 146, 170, 193, 216, 146, - 123, 99, 287, 381, 451, 287, 381, 475 }), + PARAM_TYPE_TFLITE_PE_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // Hand gesture model 1 from AIC - ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" }, - { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }), + ParamType_Infer( + PARAM_TYPE_TFLITE_AICHG_1_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), // Hand gesture model 2 from AIC - ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" }, - { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" }, - { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39, - 78, 36, 82, 42, 82, 44, 83, 45, 35, 37, - 61, 36, 59, 36, 52, 39, 35, 32, 40, 34, - 62, 39, 70, 40, 58, 41, 34, 42, 34, 41, - 38, 38, 12 }) + ParamType_Infer( + PARAM_TYPE_TFLITE_AICHG_2_INFER("one", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)) /* TODO */ )); @@ -1307,53 +1019,17 @@ INSTANTIATE_TEST_CASE_P( // mobilenet based image classification test // ARMNN. ParamType_CLTuner( - "armnn", INFERENCE_TARGET_GPU, true, true, INFERENCE_ENGINE_CLTUNER_RAPID, - TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/image_classification.bin" }, 224, - 224, 3, { "input_2" }, { "dense_3/Softmax" }, - { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, - { 3 }), + PARAM_TYPE_TFLITE_IC_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_RAPID, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), ParamType_CLTuner( - "armnn", INFERENCE_TARGET_GPU, true, false, INFERENCE_ENGINE_CLTUNER_READ, - TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/image_classification.bin" }, 224, - 224, 3, { "input_2" }, { "dense_3/Softmax" }, - { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, - { 3 }), + PARAM_TYPE_TFLITE_IC_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), ParamType_CLTuner( - "armnn", INFERENCE_TARGET_GPU, true, true, INFERENCE_ENGINE_CLTUNER_NORMAL, - TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/image_classification.bin" }, 224, - 224, 3, { "input_2" }, { "dense_3/Softmax" }, - { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, - { 3 }), + PARAM_TYPE_TFLITE_IC_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_NORMAL, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), ParamType_CLTuner( - "armnn", INFERENCE_TARGET_GPU, true, false, INFERENCE_ENGINE_CLTUNER_READ, - TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/image_classification.bin" }, 224, - 224, 3, { "input_2" }, { "dense_3/Softmax" }, - { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, - { 3 }), + PARAM_TYPE_TFLITE_IC_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), ParamType_CLTuner( - "armnn", INFERENCE_TARGET_GPU, true, true, INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE, - TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/image_classification.bin" }, 224, - 224, 3, { "input_2" }, { "dense_3/Softmax" }, - { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, - { 3 }), + PARAM_TYPE_TFLITE_IC_CLTUNER(true, true, INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)), ParamType_CLTuner( - "armnn", INFERENCE_TARGET_GPU, true, false, INFERENCE_ENGINE_CLTUNER_READ, - TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION, - INFERENCE_TENSOR_DATA_TYPE_FLOAT32, - { "/opt/usr/images/image_classification.bin" }, 224, - 224, 3, { "input_2" }, { "dense_3/Softmax" }, - { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, - { 3 }) + PARAM_TYPE_TFLITE_IC_CLTUNER(true, false, INFERENCE_ENGINE_CLTUNER_READ, "armnn", INFERENCE_TARGET_GPU, INFERENCE_ITERATION)) /* TODO */ ));