From 234c2d0885e6d3861b0569e24c18cbda72bee028 Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Tue, 12 May 2020 14:38:49 +0900 Subject: [PATCH] test: mitigate code duplication with lambda functions Inference engine initialization is common to all test cases so move the initialization code to lambda funtions and use them instead. Change-Id: Ied4bfd77c19ceb3855f6e070a548cda6418e547e Signed-off-by: Inki Dae --- test/src/inference_engine_tc.cpp | 113 ++++++++++++++----------------- 1 file changed, 49 insertions(+), 64 deletions(-) diff --git a/test/src/inference_engine_tc.cpp b/test/src/inference_engine_tc.cpp index 2c1441e..9b2293c 100644 --- a/test/src/inference_engine_tc.cpp +++ b/test/src/inference_engine_tc.cpp @@ -28,45 +28,53 @@ #include "inference_engine_common_impl.h" #include "inference_engine_test_common.h" -typedef std::tuple ParamType_Bind; -typedef std::tuple> ParamType_Load; -typedef std::tuple, int, int, int, std::vector, std::vector, std::vector, std::vector> ParamType_Infer; +typedef std::tuple ParamType_One; +typedef std::tuple> ParamType_Three; +typedef std::tuple, int, int, int, std::vector, std::vector, std::vector, std::vector> ParamType_Many; -class InferenceEngineTestCase_G1 : public testing::TestWithParam { }; -class InferenceEngineTestCase_G2 : public testing::TestWithParam { }; -class InferenceEngineTestCase_G3 : public testing::TestWithParam { }; +class InferenceEngineTestCase_G1 : public testing::TestWithParam { }; +class InferenceEngineTestCase_G3 : public testing::TestWithParam { }; +class InferenceEngineTestCase_G4 : public testing::TestWithParam { }; + +static auto InferenceEngineInit_One_Param = [](InferenceEngineCommon *engine, std::string &backend_name) -> int { + inference_engine_config config = { backend_name, 0 }; + + return engine->BindBackend(&config); +}; + +static auto InferenceEngineInit_Two_Params = [](InferenceEngineCommon *engine, std::string &backend_name, int &target_devices) -> int { + inference_engine_config config = { backend_name, target_devices }; + + int ret = engine->BindBackend(&config); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + return ret; + + inference_engine_capacity capacity; + ret = engine->GetBackendCapacity(&capacity); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + return ret; + + return engine->SetTargetDevices(config.target_devices); +}; TEST_P(InferenceEngineTestCase_G1, Bind_P) { std::string backend_name; - int target_devices; - - std::tie(backend_name, target_devices) = GetParam(); - std::cout <<"Bind test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl; + std::tie(backend_name) = GetParam(); - inference_engine_config config = { - .backend_name = backend_name, - .target_devices = target_devices - }; + std::cout <<"backend = " << backend_name << std::endl; auto engine = std::make_unique(); ASSERT_TRUE(engine); - int ret = engine->BindBackend(&config); - if (ret != INFERENCE_ENGINE_ERROR_NONE) { - ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); - return; - } - - inference_engine_capacity capacity; - ret = engine->GetBackendCapacity(&capacity); - EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + int ret = InferenceEngineInit_One_Param(engine.get(), backend_name); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); engine->UnbindBackend(); } -TEST_P(InferenceEngineTestCase_G2, Load_P) +TEST_P(InferenceEngineTestCase_G3, Load_P) { std::string backend_name; int target_devices; @@ -74,21 +82,13 @@ TEST_P(InferenceEngineTestCase_G2, Load_P) std::tie(backend_name, target_devices, model_paths) = GetParam(); - std::cout <<"Load test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl; - - inference_engine_config config = { - .backend_name = backend_name, - .target_devices = target_devices - }; + std::cout <<"backend = " << backend_name << std::endl; auto engine = std::make_unique(); ASSERT_TRUE(engine); - int ret = engine->BindBackend(&config); - if (ret != INFERENCE_ENGINE_ERROR_NONE) { - ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); - return; - } + int ret = InferenceEngineInit_One_Param(engine.get(), backend_name); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); inference_engine_capacity capacity; ret = engine->GetBackendCapacity(&capacity); @@ -110,7 +110,7 @@ TEST_P(InferenceEngineTestCase_G2, Load_P) engine->UnbindBackend(); } -TEST_P(InferenceEngineTestCase_G3, Inference) +TEST_P(InferenceEngineTestCase_G4, Inference_P) { std::string backend_name; int target_devices; @@ -141,11 +141,7 @@ TEST_P(InferenceEngineTestCase_G3, Inference) return; } - std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl; - inference_engine_config config = { - .backend_name = backend_name, - .target_devices = target_devices - }; + std::cout <<"backend = " << backend_name << std::endl; auto engine = std::make_unique(); if (engine == nullptr) { @@ -153,18 +149,8 @@ TEST_P(InferenceEngineTestCase_G3, Inference) return; } - int ret = engine->BindBackend(&config); - if (ret != INFERENCE_ENGINE_ERROR_NONE) { - ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); - return; - } - - inference_engine_capacity capacity; - ret = engine->GetBackendCapacity(&capacity); - EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); - - ret = engine->SetTargetDevices(target_devices); - EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + int ret = InferenceEngineInit_Two_Params(engine.get(), backend_name, target_devices); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); std::vector models; int model_type = GetModelInfo(model_paths, models); @@ -243,36 +229,35 @@ TEST_P(InferenceEngineTestCase_G3, Inference) INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G1, testing::Values( - // parameter order : backend name, target device + // parameter order : backend name // ARMNN. - ParamType_Bind("armnn", INFERENCE_TARGET_CPU), + ParamType_One("armnn"), // TFLITE. - ParamType_Bind("tflite", INFERENCE_TARGET_CPU) + ParamType_One("tflite") /* TODO */ - ) ); -INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G2, +INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G3, testing::Values( // parameter order : backend name, target device, model path/s // mobilenet based image classification model loading test // ARMNN. - ParamType_Load("armnn", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }), + ParamType_Three("armnn", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }), // TFLITE. - ParamType_Load("tflite", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }) + ParamType_Three("tflite", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }) /* TODO */ ) ); -INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G3, +INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G4, testing::Values( // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result // mobilenet based image classification test // ARMNN. - ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }), + ParamType_Many("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }), // TFLITE. - ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }) + ParamType_Many("tflite", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }) /* TODO */ ) -); +); \ No newline at end of file -- 2.34.1