test: Add inference test for armnn backend
authorInki Dae <inki.dae@samsung.com>
Tue, 3 Mar 2020 00:41:39 +0000 (09:41 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 14 Apr 2020 00:42:53 +0000 (09:42 +0900)
Change-Id: I2da5f3676a9286c4ac5f07d181627e15d5f0caba
Signed-off-by: Inki Dae <inki.dae@samsung.com>
test/src/inference_engine_test.cpp

index 7271562b84873b330a3c0cadb56207b121893db0..e4aed683cfe0dbae9dfb2bf3127f941a932224fe 100644 (file)
@@ -27,9 +27,11 @@ using namespace InferenceEngineInterface::Common;
 
 typedef std::tuple<std::string, int> ParamType;
 typedef std::tuple<std::string, int, std::vector<std::string>> ParamType_Load;
+typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>, std::vector<std::string>, std::vector<std::string>> ParamType_Infer;
 
 class InferenceEngineCommonTest : public testing::TestWithParam<ParamType> { };
 class InferenceEngineCommonTest_2 : public testing::TestWithParam<ParamType_Load> { };
+class InferenceEngineCommonTest_3 : public testing::TestWithParam<ParamType_Infer> { };
 
 std::map<std::string, int> Model_Formats = {
     { "caffemodel", INFERENCE_MODEL_CAFFE },
@@ -103,6 +105,70 @@ int GetModelInfo(std::vector <std::string> &model_paths, std::vector<std::string
      return ret;
 }
 
+int PrepareTensorBuffers(InferenceEngineCommon *engine, std::vector<inference_engine_tensor_buffer> &inputs,
+                            std::vector<inference_engine_tensor_buffer> &outputs)
+{
+    int ret = engine->GetInputTensorBuffers(inputs);
+    EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+    if (inputs.empty()) {
+        inference_engine_layer_property input_property;
+        ret = engine->GetInputLayerProperty(input_property);
+        EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+        // If backend is OpenCV then the buffers will be allocated out of this function.
+        if (input_property.tensor_infos.empty()) {
+            return INFERENCE_ENGINE_ERROR_NONE;
+        }
+
+        for (int i = 0; i < (int)input_property.tensor_infos.size(); ++i) {
+                       inference_engine_tensor_info tensor_info = input_property.tensor_infos[i];
+                       inference_engine_tensor_buffer tensor_buffer;
+            tensor_buffer.buffer = NULL;
+                       if (tensor_info.data_type == TENSOR_DATA_TYPE_FLOAT32) {
+                               tensor_buffer.buffer = (void *)(new float[tensor_info.size]);
+                       } else if (tensor_info.data_type == TENSOR_DATA_TYPE_UINT8) {
+                               tensor_buffer.buffer = (void *)(new unsigned char[tensor_info.size]);
+                       }
+
+            EXPECT_TRUE(tensor_buffer.buffer);
+                       tensor_buffer.data_type = tensor_info.data_type;
+                       inputs.push_back(tensor_buffer);
+               }
+    }
+
+    ret = engine->GetOutputTensorBuffers(outputs);
+    EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+    if (outputs.empty()) {
+        inference_engine_layer_property output_property;
+        ret = engine->GetOutputLayerProperty(output_property);
+        EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+        // If backend is OpenCV then the buffers will be allocated out of this function.
+        if (output_property.tensor_infos.empty()) {
+            return INFERENCE_ENGINE_ERROR_NONE;
+        }
+
+        for (int i = 0; i < (int)output_property.tensor_infos.size(); ++i) {
+                       inference_engine_tensor_info tensor_info = output_property.tensor_infos[i];
+                       inference_engine_tensor_buffer tensor_buffer;
+            tensor_buffer.buffer = NULL;
+                       if (tensor_info.data_type == TENSOR_DATA_TYPE_FLOAT32) {
+                               tensor_buffer.buffer = (void *)(new float[tensor_info.size]);
+                       } else if (tensor_info.data_type == TENSOR_DATA_TYPE_UINT8) {
+                               tensor_buffer.buffer = (void *)(new unsigned char[tensor_info.size]);
+                       }
+
+            EXPECT_TRUE(tensor_buffer.buffer);
+                       tensor_buffer.data_type = tensor_info.data_type;
+                       outputs.push_back(tensor_buffer);
+               }
+    }
+
+    return INFERENCE_ENGINE_ERROR_NONE;
+}
+
 TEST_P(InferenceEngineCommonTest_2, Load)
 {
     std::string backend_name;
@@ -137,6 +203,93 @@ TEST_P(InferenceEngineCommonTest_2, Load)
     ASSERT_NE(model_type, -1);
 
     ret = engine->Load(models, (inference_model_format_e)model_type);
+    EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+     engine->UnbindBackend();
+
+    delete engine;
+}
+
+TEST_P(InferenceEngineCommonTest_3, Inference)
+{
+    std::string backend_name;
+    int target_devices;
+    int height;
+    int width;
+    int ch;
+    std::vector<std::string> input_layers;
+    std::vector<std::string> output_layers;
+    std::vector<std::string> model_paths;
+
+    std::tie(backend_name, target_devices, height, width, ch, input_layers, output_layers, model_paths) = GetParam();
+
+    std::cout << "backend : " << backend_name << "\n";
+    std::cout << "target device : " << target_devices << "\n";
+
+    inference_engine_config config = {
+        .backend_name = backend_name,
+        .target_devices = target_devices
+    };
+
+    InferenceEngineCommon *engine = new InferenceEngineCommon(&config);
+    ASSERT_TRUE(engine);
+
+    int ret = engine->BindBackend(&config);
+    ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+    inference_engine_capacity capacity;
+    ret = engine->GetBackendCapacity(&capacity);
+    EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+    ret = engine->SetTargetDevices(target_devices);
+    EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+    std::vector <std::string> models;
+    int model_type = GetModelInfo(model_paths, models);
+    ASSERT_NE(model_type, -1);
+
+    inference_engine_layer_property input_property;
+    std::vector<std::string>::iterator iter;
+
+    for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
+        input_property.layer_names.push_back(*iter);
+    }
+
+    ret = engine->SetInputLayerProperty(input_property);
+    ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+    inference_engine_layer_property output_property;
+
+    for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
+        output_property.layer_names.push_back(*iter);
+    }
+
+    ret = engine->SetOutputLayerProperty(output_property);
+    ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+    ret = engine->Load(models, (inference_model_format_e)model_type);
+    ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+    std::vector <inference_engine_tensor_buffer> inputs, outputs;
+    ret = PrepareTensorBuffers(engine, inputs, outputs);
+    ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+    // If backend is OpenCV then allocate input tensor buffer at here.
+    if (inputs.empty()) {
+        inference_engine_tensor_buffer tensor_buffer;
+        unsigned int tensor_size;
+        if (ch == 3) {
+            tensor_size = height * width * 4;
+                       tensor_buffer.buffer = (void *)(new float[tensor_size]);
+               } else {
+            tensor_size = height * width;
+                       tensor_buffer.buffer = (void *)(new unsigned char[tensor_size]);
+               }
+
+        inputs.push_back(tensor_buffer);
+    }
+
+    ret = engine->Run(inputs, outputs);
     EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
 
     engine->UnbindBackend();
@@ -146,6 +299,7 @@ TEST_P(InferenceEngineCommonTest_2, Load)
 
 INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest,
         testing::Values(
+            // backend name, target device
             ParamType("armnn", INFERENCE_TARGET_CPU),
             ParamType("armnn", INFERENCE_TARGET_GPU),
             ParamType("armnn", INFERENCE_TARGET_GPU | INFERENCE_TARGET_CPU)
@@ -156,9 +310,41 @@ INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest,
 INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest_2,
         testing::Values(
             // backend name, target device, model path/s
+            // mobilenet based image classification model loading test
             ParamType_Load("armnn", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
             ParamType_Load("armnn", INFERENCE_TARGET_GPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
-            ParamType_Load("armnn", INFERENCE_TARGET_GPU | INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" })
+            ParamType_Load("armnn", INFERENCE_TARGET_GPU | INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
+            // object detection model loading test
+            ParamType_Load("armnn", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }),
+            ParamType_Load("armnn", INFERENCE_TARGET_GPU, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }),
+            ParamType_Load("armnn", INFERENCE_TARGET_GPU | INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }),
+            // face detection model loading test
+            ParamType_Load("armnn", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }),
+            ParamType_Load("armnn", INFERENCE_TARGET_GPU, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }),
+            ParamType_Load("armnn", INFERENCE_TARGET_GPU | INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }),
+            // pose estimation model loading test
+            ParamType_Load("armnn", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" }),
+            ParamType_Load("armnn", INFERENCE_TARGET_GPU, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" }),
+            ParamType_Load("armnn", INFERENCE_TARGET_GPU | INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" })
+            /* TODO */
+        )
+    );
+
+INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest_3,
+        testing::Values(
+            // backend name, target device, height, width, channel count, input layer names, output layer names, model path/s
+            // mobilenet based image classification test
+            ParamType_Infer("armnn", INFERENCE_TARGET_CPU, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
+            ParamType_Infer("armnn", INFERENCE_TARGET_GPU, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
+            // object detection test
+            ParamType_Infer("armnn", INFERENCE_TARGET_CPU, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }),
+            ParamType_Infer("armnn", INFERENCE_TARGET_GPU, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }),
+            // face detection test
+            ParamType_Infer("armnn", INFERENCE_TARGET_CPU, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }),
+            ParamType_Infer("armnn", INFERENCE_TARGET_GPU, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }),
+            // pose estimation test
+            ParamType_Infer("armnn", INFERENCE_TARGET_CPU, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" }),
+            ParamType_Infer("armnn", INFERENCE_TARGET_GPU, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" })
             /* TODO */
         )
     );