test: enable inference engine profiler
authorInki Dae <inki.dae@samsung.com>
Thu, 2 Apr 2020 00:51:23 +0000 (09:51 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 14 Apr 2020 00:42:53 +0000 (09:42 +0900)
This patch enables the inference engine profiler.
The profile data will be stored to 'dump.txt' file
with Markdown syntax.

Change-Id: Ie70d057244a4a56e9208bd2d2e4d0accecce4122
Signed-off-by: Inki Dae <inki.dae@samsung.com>
test/src/inference_engine_test.cpp

index 31f648b6d5a17cd0a05ea625176a2bcb426ffad2..dce61cba426f80ff36803d108cddf20a1c51a805 100644 (file)
@@ -31,7 +31,7 @@ using namespace InferenceEngineInterface::Common;
 
 typedef std::tuple<std::string, int> ParamType;
 typedef std::tuple<std::string, int, std::vector<std::string>> ParamType_Load;
-typedef std::tuple<std::string, int, int, int, std::vector<std::string>, int, int, int, std::vector<std::string>, std::vector<std::string>, std::vector<std::string>, std::vector<int>> ParamType_Infer;
+typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>, int, int, int, std::vector<std::string>, std::vector<std::string>, std::vector<std::string>, std::vector<int>> ParamType_Infer;
 
 class InferenceEngineCommonTest : public testing::TestWithParam<ParamType> { };
 class InferenceEngineCommonTest_2 : public testing::TestWithParam<ParamType_Load> { };
@@ -424,6 +424,7 @@ TEST_P(InferenceEngineCommonTest_3, Inference)
        std::string backend_name;
        int target_devices;
        int test_type;
+       int iteration;
        int tensor_type;
        std::vector<std::string> image_paths;
        int height;
@@ -434,7 +435,11 @@ TEST_P(InferenceEngineCommonTest_3, Inference)
        std::vector<std::string> model_paths;
        std::vector<int> answers;
 
-       std::tie(backend_name, target_devices, test_type, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers) = GetParam();
+       std::tie(backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers) = GetParam();
+
+       if (iteration < 1) {
+               iteration = 1;
+       }
 
        std::string test_name;
        switch (test_type) {
@@ -468,7 +473,21 @@ TEST_P(InferenceEngineCommonTest_3, Inference)
                return;
        }
 
-       int ret = engine->BindBackend(&config);
+       int ret = engine->EnableProfiler(true);
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               delete engine;
+               ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+               return;
+       }
+
+       ret = engine->DumpProfileToFile("dump.txt");
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               delete engine;
+               ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+               return;
+       }
+
+       ret = engine->BindBackend(&config);
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                delete engine;
                ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
@@ -545,8 +564,10 @@ TEST_P(InferenceEngineCommonTest_3, Inference)
                CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
        }
 
-       ret = engine->Run(inputs, outputs);
-       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+       for (int repeat = 0; repeat < iteration; ++repeat) {
+               ret = engine->Run(inputs, outputs);
+               EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+       }
 
        tensor_t result;
        FillOutputResult(engine, outputs, result);
@@ -607,22 +628,22 @@ INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest_3,
                testing::Values(
                        // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
                        // mobilenet based image classification test
-                       ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
-                       ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
+                       ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
+                       ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
                        // quantized mobilenet based image classification test
-                       ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
-                       ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
+                       ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
+                       ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
                        // object detection test
-                       ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
-                       ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
+                       ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
+                       ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
                        // face detection test
-                       ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
-                       ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
+                       ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
+                       ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
                        // pose estimation test
-                       ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+                       ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
                                                        { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
                                                           76, 146, 170, 193, 216, 146, 123,  99, 287, 381, 451, 287, 381, 475 }),
-                       ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+                       ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
                                                        { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
                                                           76, 146, 170, 193, 216, 146, 123,  99, 287, 381, 451, 287, 381, 475 })
                        /* TODO */