test: Add reference model profiling support for ONERT 15/246415/3
authorInki Dae <inki.dae@samsung.com>
Thu, 29 Oct 2020 06:21:31 +0000 (15:21 +0900)
committerInki Dae <inki.dae@samsung.com>
Thu, 29 Oct 2020 07:08:16 +0000 (16:08 +0900)
Change-Id: I48bcc8f959b3a803b75a0ea76cdf01c19b58ed92
Signed-off-by: Inki Dae <inki.dae@samsung.com>
start_profiler.sh
test/src/inference_engine_profiler.cpp

index 5ee2a80..d8dbbd5 100644 (file)
@@ -1,6 +1,6 @@
 #!/bin/sh
 
-CNT=20
+CNT=39
 
 echo "Tflite model test case count = $CNT"
 
@@ -12,7 +12,7 @@ do
 done
 
 # Caffe model
-CNT=8
+CNT=7
 
 echo "Caffe model test case count = $CNT"
 
index c499e77..1101448 100644 (file)
@@ -103,8 +103,18 @@ TEST_P(InferenceEngineTfliteTest, Inference)
        std::cout << test_name << " inference test : backend = " << backend_name
                          << ", target device = " << Target_Formats[target_devices]
                          << std::endl;
+
+       int backend_type = -1;
+
+       // If backend name is "one" then change it to "mlapi"
+       // and set backend_type to INFERENCE_BACKEND_ONE.
+       if (backend_name.compare("one") == 0) {
+               backend_name = "mlapi";
+               backend_type = INFERENCE_BACKEND_ONE;
+       }
+
        inference_engine_config config = { .backend_name = backend_name,
-                                                                          .backend_type = -1,
+                                                                          .backend_type = backend_type,
                                                                           .target_devices = target_devices };
 
        auto engine = std::make_unique<InferenceEngineCommon>();
@@ -119,6 +129,9 @@ TEST_P(InferenceEngineTfliteTest, Inference)
                return;
        }
 
+       if (backend_type == INFERENCE_BACKEND_ONE)
+               backend_name = "one";
+
        ret = engine->DumpProfileToFile("profile_data_" + backend_name +
                                                                        "_tflite_model.txt");
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
@@ -749,6 +762,111 @@ INSTANTIATE_TEST_CASE_P(
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
                                // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
                                // mobilenet based image classification test
+                               // ONE via MLAPI.
+                               ParamType_Infer(
+                                               "one", INFERENCE_TARGET_CPU,
+                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/image_classification.bin" }, 224,
+                                               224, 3, { "input_2" }, { "dense_3/Softmax" },
+                                               { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+                                               { 3 }),
+                               ParamType_Infer(
+                                               "one", INFERENCE_TARGET_GPU,
+                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/image_classification.bin" }, 224,
+                                               224, 3, { "input_2" }, { "dense_3/Softmax" },
+                                               { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+                                               { 3 }),
+                               // quantized mobilenet based image classification test
+                               ParamType_Infer(
+                                               "one", INFERENCE_TARGET_CPU,
+                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_UINT8,
+                                               { "/opt/usr/images/image_classification_q.bin" }, 224,
+                                               224, 3, { "input" },
+                                               { "MobilenetV1/Predictions/Reshape_1" },
+                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
+                                               { 955 }),
+                               ParamType_Infer(
+                                               "one", INFERENCE_TARGET_GPU,
+                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_UINT8,
+                                               { "/opt/usr/images/image_classification_q.bin" }, 224,
+                                               224, 3, { "input" },
+                                               { "MobilenetV1/Predictions/Reshape_1" },
+                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
+                                               { 955 }),
+                               // object detection test
+                               ParamType_Infer(
+                                               "one", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+                                               { "normalized_input_image_tensor" },
+                                               { "TFLite_Detection_PostProcess",
+                                                 "TFLite_Detection_PostProcess:1",
+                                                 "TFLite_Detection_PostProcess:2",
+                                                 "TFLite_Detection_PostProcess:3" },
+                                               { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+                                               { 451, 474, 714, 969 }),
+                               ParamType_Infer(
+                                               "one", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+                                               { "normalized_input_image_tensor" },
+                                               { "TFLite_Detection_PostProcess",
+                                                 "TFLite_Detection_PostProcess:1",
+                                                 "TFLite_Detection_PostProcess:2",
+                                                 "TFLite_Detection_PostProcess:3" },
+                                               { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+                                               { 451, 474, 714, 969 }),
+                               // face detection test
+                               ParamType_Infer(
+                                               "one", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+                                               { "normalized_input_image_tensor" },
+                                               { "TFLite_Detection_PostProcess",
+                                                 "TFLite_Detection_PostProcess:1",
+                                                 "TFLite_Detection_PostProcess:2",
+                                                 "TFLite_Detection_PostProcess:3" },
+                                               { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+                                               { 727, 225, 960, 555 }),
+                               ParamType_Infer(
+                                               "one", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+                                               { "normalized_input_image_tensor" },
+                                               { "TFLite_Detection_PostProcess",
+                                                 "TFLite_Detection_PostProcess:1",
+                                                 "TFLite_Detection_PostProcess:2",
+                                                 "TFLite_Detection_PostProcess:3" },
+                                               { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+                                               { 727, 225, 960, 555 }),
+                               // pose estimation test
+                               ParamType_Infer(
+                                               "one", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
+                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
+                                               { "image" },
+                                               { "Convolutional_Pose_Machine/stage_5_out" },
+                                               { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+                                               { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
+                                                 351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
+                                                 123, 99,  287, 381, 451, 287, 381, 475 }),
+                               ParamType_Infer(
+                                               "one", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
+                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
+                                               { "image" },
+                                               { "Convolutional_Pose_Machine/stage_5_out" },
+                                               { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+                                               { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
+                                                 351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
+                                                 123, 99,  287, 381, 451, 287, 381, 475 }),
+                               // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+                               // mobilenet based image classification test
                                // TFLITE.
                                ParamType_Infer(
                                                "tflite", INFERENCE_TARGET_CPU,