test: Add TFLITE backend test cases
authorInki Dae <inki.dae@samsung.com>
Fri, 3 Apr 2020 03:10:25 +0000 (12:10 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 14 Apr 2020 00:42:53 +0000 (09:42 +0900)
Change-Id: Ie27c4cb3b14b5f27883712d6f5dc46f358a02d3d
Signed-off-by: Inki Dae <inki.dae@samsung.com>
test/src/inference_engine_test.cpp

index dbcd54a..4e520c8 100644 (file)
@@ -35,7 +35,7 @@ typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>, in
 
 class InferenceEngineCommonTest : public testing::TestWithParam<ParamType> { };
 class InferenceEngineCommonTest_2 : public testing::TestWithParam<ParamType_Load> { };
-class InferenceEngineCommonTest_3 : public testing::TestWithParam<ParamType_Infer> { };
+class InferenceEngineTfliteTest : public testing::TestWithParam<ParamType_Infer> { };
 
 std::map<std::string, int> Model_Formats = {
        { "caffemodel", INFERENCE_MODEL_CAFFE },
@@ -421,7 +421,7 @@ int VerifyPoseEstimationResults(tensor_t &outputData, std::vector<int> &answers,
        return ret;
 }
 
-TEST_P(InferenceEngineCommonTest_3, Inference)
+TEST_P(InferenceEngineTfliteTest, Inference)
 {
        std::string backend_name;
        int target_devices;
@@ -610,8 +610,12 @@ TEST_P(InferenceEngineCommonTest_3, Inference)
 INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest,
                testing::Values(
                        // parameter order : backend name, target device
+                       // ARMNN.
                        ParamType("armnn", INFERENCE_TARGET_CPU),
-                       ParamType("armnn", INFERENCE_TARGET_GPU)
+                       ParamType("armnn", INFERENCE_TARGET_GPU),
+                       // TFLITE.
+                       ParamType("tflite", INFERENCE_TARGET_CPU),
+                       ParamType("tflite", INFERENCE_TARGET_GPU)
                        /* TODO */
                )
 );
@@ -620,16 +624,21 @@ INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest_2,
                testing::Values(
                        // parameter order : backend name, target device, model path/s
                        // mobilenet based image classification model loading test
+                       // ARMNN.
                        ParamType_Load("armnn", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
-                       ParamType_Load("armnn", INFERENCE_TARGET_GPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" })
+                       ParamType_Load("armnn", INFERENCE_TARGET_GPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
+                       // TFLITE.
+                       ParamType_Load("tflite", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
+                       ParamType_Load("tflite", INFERENCE_TARGET_GPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" })
                        /* TODO */
                )
 );
 
-INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest_3,
+INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTfliteTest,
                testing::Values(
                        // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
                        // mobilenet based image classification test
+                       // ARMNN.
                        ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
                        ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
                        // quantized mobilenet based image classification test
@@ -647,6 +656,27 @@ INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest_3,
                                                           76, 146, 170, 193, 216, 146, 123,  99, 287, 381, 451, 287, 381, 475 }),
                        ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
                                                        { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
+                                                          76, 146, 170, 193, 216, 146, 123,  99, 287, 381, 451, 287, 381, 475 }),
+                       // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+                       // mobilenet based image classification test
+                       // TFLITE.
+                       ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
+                       ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
+                       // quantized mobilenet based image classification test
+                       ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
+                       ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
+                       // object detection test
+                       ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
+                       ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
+                       // face detection test
+                       ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
+                       ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
+                       // pose estimation test
+                       ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+                                                       { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
+                                                          76, 146, 170, 193, 216, 146, 123,  99, 287, 381, 451, 287, 381, 475 }),
+                       ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+                                                       { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
                                                           76, 146, 170, 193, 216, 146, 123,  99, 287, 381, 451, 287, 381, 475 })
                        /* TODO */
                )