From df03a11d589c91c8952a5b56e6e7728de1d6529e Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Fri, 6 Mar 2020 16:33:55 +0900 Subject: [PATCH] test: add a new parameter for tensor data type This patch passes tensor data type to each test case as a parameter to test various model types such as quantization model. Change-Id: I251813a857ad832cdc55f90996eec3f16b64fc82 Signed-off-by: Inki Dae --- test/src/inference_engine_test.cpp | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/test/src/inference_engine_test.cpp b/test/src/inference_engine_test.cpp index 90b3007..55cc16c 100644 --- a/test/src/inference_engine_test.cpp +++ b/test/src/inference_engine_test.cpp @@ -31,7 +31,7 @@ using namespace InferenceEngineInterface::Common; typedef std::tuple ParamType; typedef std::tuple> ParamType_Load; -typedef std::tuple, int, int, int, std::vector, std::vector, std::vector, std::vector> ParamType_Infer; +typedef std::tuple, int, int, int, std::vector, std::vector, std::vector, std::vector> ParamType_Infer; class InferenceEngineCommonTest : public testing::TestWithParam { }; class InferenceEngineCommonTest_2 : public testing::TestWithParam { }; @@ -405,6 +405,7 @@ TEST_P(InferenceEngineCommonTest_3, Inference) std::string backend_name; int target_devices; int test_type; + int tensor_type; std::vector image_paths; int height; int width; @@ -414,7 +415,7 @@ TEST_P(InferenceEngineCommonTest_3, Inference) std::vector model_paths; std::vector answers; - std::tie(backend_name, target_devices, test_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers) = GetParam(); + std::tie(backend_name, target_devices, test_type, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers) = GetParam(); std::string test_name; switch (test_type) { @@ -466,7 +467,7 @@ TEST_P(InferenceEngineCommonTest_3, Inference) inference_engine_tensor_info tensor_info = { { 1, ch, height, width }, (inference_tensor_shape_type_e)TENSOR_SHAPE_NCHW, - (inference_tensor_data_type_e)TENSOR_DATA_TYPE_FLOAT32, + (inference_tensor_data_type_e)tensor_type, (size_t)(1 * ch * height * width) }; @@ -568,19 +569,19 @@ INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest_3, testing::Values( // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result // mobilenet based image classification test - ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }), - ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }), + ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }), + ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }), // object detection test - ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }), - ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }), + ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }), + ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }), // face detection test - ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }), - ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }), + ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }), + ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }), // pose estimation test - ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" }, + ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" }, { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382, 76, 146, 170, 193, 216, 146, 123, 99, 287, 381, 451, 287, 381, 475 }), - ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" }, + ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" }, { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382, 76, 146, 170, 193, 216, 146, 123, 99, 287, 381, 451, 287, 381, 475 }) /* TODO */ -- 2.34.1