From: Inki Dae Date: Tue, 3 Mar 2020 00:41:39 +0000 (+0900) Subject: test: Add inference test for armnn backend X-Git-Tag: submit/tizen/20200423.063253~48 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=48b869f1ab69d1a73dd7aa9b476d38e5ab4c7b3e;p=platform%2Fcore%2Fmultimedia%2Finference-engine-interface.git test: Add inference test for armnn backend Change-Id: I2da5f3676a9286c4ac5f07d181627e15d5f0caba Signed-off-by: Inki Dae --- diff --git a/test/src/inference_engine_test.cpp b/test/src/inference_engine_test.cpp index 7271562..e4aed68 100644 --- a/test/src/inference_engine_test.cpp +++ b/test/src/inference_engine_test.cpp @@ -27,9 +27,11 @@ using namespace InferenceEngineInterface::Common; typedef std::tuple ParamType; typedef std::tuple> ParamType_Load; +typedef std::tuple, std::vector, std::vector> ParamType_Infer; class InferenceEngineCommonTest : public testing::TestWithParam { }; class InferenceEngineCommonTest_2 : public testing::TestWithParam { }; +class InferenceEngineCommonTest_3 : public testing::TestWithParam { }; std::map Model_Formats = { { "caffemodel", INFERENCE_MODEL_CAFFE }, @@ -103,6 +105,70 @@ int GetModelInfo(std::vector &model_paths, std::vector &inputs, + std::vector &outputs) +{ + int ret = engine->GetInputTensorBuffers(inputs); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + if (inputs.empty()) { + inference_engine_layer_property input_property; + ret = engine->GetInputLayerProperty(input_property); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + // If backend is OpenCV then the buffers will be allocated out of this function. + if (input_property.tensor_infos.empty()) { + return INFERENCE_ENGINE_ERROR_NONE; + } + + for (int i = 0; i < (int)input_property.tensor_infos.size(); ++i) { + inference_engine_tensor_info tensor_info = input_property.tensor_infos[i]; + inference_engine_tensor_buffer tensor_buffer; + tensor_buffer.buffer = NULL; + if (tensor_info.data_type == TENSOR_DATA_TYPE_FLOAT32) { + tensor_buffer.buffer = (void *)(new float[tensor_info.size]); + } else if (tensor_info.data_type == TENSOR_DATA_TYPE_UINT8) { + tensor_buffer.buffer = (void *)(new unsigned char[tensor_info.size]); + } + + EXPECT_TRUE(tensor_buffer.buffer); + tensor_buffer.data_type = tensor_info.data_type; + inputs.push_back(tensor_buffer); + } + } + + ret = engine->GetOutputTensorBuffers(outputs); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + if (outputs.empty()) { + inference_engine_layer_property output_property; + ret = engine->GetOutputLayerProperty(output_property); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + // If backend is OpenCV then the buffers will be allocated out of this function. + if (output_property.tensor_infos.empty()) { + return INFERENCE_ENGINE_ERROR_NONE; + } + + for (int i = 0; i < (int)output_property.tensor_infos.size(); ++i) { + inference_engine_tensor_info tensor_info = output_property.tensor_infos[i]; + inference_engine_tensor_buffer tensor_buffer; + tensor_buffer.buffer = NULL; + if (tensor_info.data_type == TENSOR_DATA_TYPE_FLOAT32) { + tensor_buffer.buffer = (void *)(new float[tensor_info.size]); + } else if (tensor_info.data_type == TENSOR_DATA_TYPE_UINT8) { + tensor_buffer.buffer = (void *)(new unsigned char[tensor_info.size]); + } + + EXPECT_TRUE(tensor_buffer.buffer); + tensor_buffer.data_type = tensor_info.data_type; + outputs.push_back(tensor_buffer); + } + } + + return INFERENCE_ENGINE_ERROR_NONE; +} + TEST_P(InferenceEngineCommonTest_2, Load) { std::string backend_name; @@ -137,6 +203,93 @@ TEST_P(InferenceEngineCommonTest_2, Load) ASSERT_NE(model_type, -1); ret = engine->Load(models, (inference_model_format_e)model_type); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + engine->UnbindBackend(); + + delete engine; +} + +TEST_P(InferenceEngineCommonTest_3, Inference) +{ + std::string backend_name; + int target_devices; + int height; + int width; + int ch; + std::vector input_layers; + std::vector output_layers; + std::vector model_paths; + + std::tie(backend_name, target_devices, height, width, ch, input_layers, output_layers, model_paths) = GetParam(); + + std::cout << "backend : " << backend_name << "\n"; + std::cout << "target device : " << target_devices << "\n"; + + inference_engine_config config = { + .backend_name = backend_name, + .target_devices = target_devices + }; + + InferenceEngineCommon *engine = new InferenceEngineCommon(&config); + ASSERT_TRUE(engine); + + int ret = engine->BindBackend(&config); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + inference_engine_capacity capacity; + ret = engine->GetBackendCapacity(&capacity); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + ret = engine->SetTargetDevices(target_devices); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + std::vector models; + int model_type = GetModelInfo(model_paths, models); + ASSERT_NE(model_type, -1); + + inference_engine_layer_property input_property; + std::vector::iterator iter; + + for (iter = input_layers.begin(); iter != input_layers.end(); iter++) { + input_property.layer_names.push_back(*iter); + } + + ret = engine->SetInputLayerProperty(input_property); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + inference_engine_layer_property output_property; + + for (iter = output_layers.begin(); iter != output_layers.end(); iter++) { + output_property.layer_names.push_back(*iter); + } + + ret = engine->SetOutputLayerProperty(output_property); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + ret = engine->Load(models, (inference_model_format_e)model_type); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + std::vector inputs, outputs; + ret = PrepareTensorBuffers(engine, inputs, outputs); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + // If backend is OpenCV then allocate input tensor buffer at here. + if (inputs.empty()) { + inference_engine_tensor_buffer tensor_buffer; + unsigned int tensor_size; + if (ch == 3) { + tensor_size = height * width * 4; + tensor_buffer.buffer = (void *)(new float[tensor_size]); + } else { + tensor_size = height * width; + tensor_buffer.buffer = (void *)(new unsigned char[tensor_size]); + } + + inputs.push_back(tensor_buffer); + } + + ret = engine->Run(inputs, outputs); EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); engine->UnbindBackend(); @@ -146,6 +299,7 @@ TEST_P(InferenceEngineCommonTest_2, Load) INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest, testing::Values( + // backend name, target device ParamType("armnn", INFERENCE_TARGET_CPU), ParamType("armnn", INFERENCE_TARGET_GPU), ParamType("armnn", INFERENCE_TARGET_GPU | INFERENCE_TARGET_CPU) @@ -156,9 +310,41 @@ INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest, INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest_2, testing::Values( // backend name, target device, model path/s + // mobilenet based image classification model loading test ParamType_Load("armnn", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }), ParamType_Load("armnn", INFERENCE_TARGET_GPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }), - ParamType_Load("armnn", INFERENCE_TARGET_GPU | INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }) + ParamType_Load("armnn", INFERENCE_TARGET_GPU | INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }), + // object detection model loading test + ParamType_Load("armnn", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }), + ParamType_Load("armnn", INFERENCE_TARGET_GPU, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }), + ParamType_Load("armnn", INFERENCE_TARGET_GPU | INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }), + // face detection model loading test + ParamType_Load("armnn", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }), + ParamType_Load("armnn", INFERENCE_TARGET_GPU, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }), + ParamType_Load("armnn", INFERENCE_TARGET_GPU | INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }), + // pose estimation model loading test + ParamType_Load("armnn", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" }), + ParamType_Load("armnn", INFERENCE_TARGET_GPU, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" }), + ParamType_Load("armnn", INFERENCE_TARGET_GPU | INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" }) + /* TODO */ + ) + ); + +INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest_3, + testing::Values( + // backend name, target device, height, width, channel count, input layer names, output layer names, model path/s + // mobilenet based image classification test + ParamType_Infer("armnn", INFERENCE_TARGET_CPU, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }), + ParamType_Infer("armnn", INFERENCE_TARGET_GPU, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }), + // object detection test + ParamType_Infer("armnn", INFERENCE_TARGET_CPU, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }), + ParamType_Infer("armnn", INFERENCE_TARGET_GPU, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }), + // face detection test + ParamType_Infer("armnn", INFERENCE_TARGET_CPU, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }), + ParamType_Infer("armnn", INFERENCE_TARGET_GPU, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }), + // pose estimation test + ParamType_Infer("armnn", INFERENCE_TARGET_CPU, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" }), + ParamType_Infer("armnn", INFERENCE_TARGET_GPU, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" }) /* TODO */ ) );