From: Inki Dae Date: Wed, 13 May 2020 06:20:28 +0000 (+0900) Subject: test: Add set input and output layer test cases X-Git-Tag: submit/tizen/20200602.011936~10 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=547f9d5750bef6628949b20b90e8003cf42b97b3;p=platform%2Fcore%2Fmultimedia%2Finference-engine-interface.git test: Add set input and output layer test cases This patch adds below new test cases, - SetInputLayer positive and negative tests. - SetOutputLayer positive and negative tests. Change-Id: I3ec64d821b747b5c7a4365d7f56e1d16a15ec2a7 Signed-off-by: Inki Dae --- diff --git a/test/src/inference_engine_tc.cpp b/test/src/inference_engine_tc.cpp index 5cfd884..0110fef 100644 --- a/test/src/inference_engine_tc.cpp +++ b/test/src/inference_engine_tc.cpp @@ -31,12 +31,15 @@ typedef std::tuple ParamType_One; typedef std::tuple ParamType_Two; typedef std::tuple> ParamType_Three; +typedef std::tuple> ParamType_Six; typedef std::tuple, int, int, int, std::vector, std::vector, std::vector, std::vector> ParamType_Many; class InferenceEngineTestCase_G1 : public testing::TestWithParam { }; class InferenceEngineTestCase_G2 : public testing::TestWithParam { }; class InferenceEngineTestCase_G3 : public testing::TestWithParam { }; -class InferenceEngineTestCase_G4 : public testing::TestWithParam { }; +class InferenceEngineTestCase_G4 : public testing::TestWithParam { }; +class InferenceEngineTestCase_G5 : public testing::TestWithParam { }; +class InferenceEngineTestCase_G6 : public testing::TestWithParam { }; static auto InferenceEngineInit_One_Param = [](InferenceEngineCommon *engine, std::string &backend_name) -> int { inference_engine_config config = { backend_name, 0 }; @@ -264,7 +267,195 @@ TEST_P(InferenceEngineTestCase_G2, Load_N2) EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PATH); } -TEST_P(InferenceEngineTestCase_G4, Inference_P) +TEST_P(InferenceEngineTestCase_G4, SetInputLayer_P) +{ + std::string backend_name; + int tensor_type; + size_t height; + size_t width; + size_t ch; + std::vector input_layers; + + std::tie(backend_name, tensor_type, height, width, ch, input_layers) = GetParam(); + + std::cout <<"backend = " << backend_name << std::endl; + + auto engine = std::make_unique(); + ASSERT_TRUE(engine); + + int ret = InferenceEngineInit_One_Param(engine.get(), backend_name); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + inference_engine_layer_property input_property; + std::vector::iterator iter; + + for (iter = input_layers.begin(); iter != input_layers.end(); iter++) { + inference_engine_tensor_info tensor_info = { + { 1, ch, height, width }, + (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW, + (inference_tensor_data_type_e)tensor_type, + (size_t)(1 * ch * height * width) + }; + + input_property.layer_names.push_back(*iter); + input_property.tensor_infos.push_back(tensor_info); + } + ret = engine->SetInputLayerProperty(input_property); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); +} + +TEST_P(InferenceEngineTestCase_G5, SetInputLayer_N1) +{ + std::string backend_name; + int tensor_type; + size_t height; + size_t width; + size_t ch; + std::vector output_layers; + + std::tie(backend_name, tensor_type, height, width, ch, output_layers) = GetParam(); + + std::cout <<"backend = " << backend_name << std::endl; + + auto engine = std::make_unique(); + ASSERT_TRUE(engine); + + int ret = InferenceEngineInit_One_Param(engine.get(), backend_name); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + inference_engine_layer_property output_property; + std::vector::iterator iter; + + for (iter = output_layers.begin(); iter != output_layers.end(); iter++) { + inference_engine_tensor_info tensor_info = { + { 1, ch, height, width }, + (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW, + (inference_tensor_data_type_e)tensor_type, + (size_t)(1 * ch * height * width) + }; + + output_property.layer_names.push_back(*iter); + output_property.tensor_infos.push_back(tensor_info); + } + ret = engine->SetInputLayerProperty(output_property); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PARAMETER); +} + +TEST_P(InferenceEngineTestCase_G1, SetInputLayer_N2) +{ + std::string backend_name; + + std::tie(backend_name) = GetParam(); + + std::cout <<"backend = " << backend_name << std::endl; + + auto engine = std::make_unique(); + ASSERT_TRUE(engine); + + int ret = InferenceEngineInit_One_Param(engine.get(), backend_name); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + inference_engine_layer_property input_property; + + ret = engine->SetInputLayerProperty(input_property); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PARAMETER); +} + +TEST_P(InferenceEngineTestCase_G4, SetOutputLayer_P) +{ + std::string backend_name; + int tensor_type; + size_t height; + size_t width; + size_t ch; + std::vector output_layers; + + std::tie(backend_name, tensor_type, height, width, ch, output_layers) = GetParam(); + + std::cout <<"backend = " << backend_name << std::endl; + + auto engine = std::make_unique(); + ASSERT_TRUE(engine); + + int ret = InferenceEngineInit_One_Param(engine.get(), backend_name); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + inference_engine_layer_property output_property; + std::vector::iterator iter; + + for (iter = output_layers.begin(); iter != output_layers.end(); iter++) { + inference_engine_tensor_info tensor_info = { + { 1, ch, height, width }, + (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW, + (inference_tensor_data_type_e)tensor_type, + (size_t)(1 * ch * height * width) + }; + + output_property.layer_names.push_back(*iter); + output_property.tensor_infos.push_back(tensor_info); + } + ret = engine->SetOutputLayerProperty(output_property); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); +} + +TEST_P(InferenceEngineTestCase_G5, SetOutputLayer_N1) +{ + std::string backend_name; + int tensor_type; + size_t height; + size_t width; + size_t ch; + std::vector output_layers; + + std::tie(backend_name, tensor_type, height, width, ch, output_layers) = GetParam(); + + std::cout <<"backend = " << backend_name << std::endl; + + auto engine = std::make_unique(); + ASSERT_TRUE(engine); + + int ret = InferenceEngineInit_One_Param(engine.get(), backend_name); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + inference_engine_layer_property output_property; + std::vector::iterator iter; + + for (iter = output_layers.begin(); iter != output_layers.end(); iter++) { + inference_engine_tensor_info tensor_info = { + { 1, ch, height, width }, + (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW, + (inference_tensor_data_type_e)tensor_type, + (size_t)(1 * ch * height * width) + }; + + output_property.layer_names.push_back(*iter); + output_property.tensor_infos.push_back(tensor_info); + } + ret = engine->SetOutputLayerProperty(output_property); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PARAMETER); +} + +TEST_P(InferenceEngineTestCase_G1, SetOutputLayer_N2) +{ + std::string backend_name; + + std::tie(backend_name) = GetParam(); + + std::cout <<"backend = " << backend_name << std::endl; + + auto engine = std::make_unique(); + ASSERT_TRUE(engine); + + int ret = InferenceEngineInit_One_Param(engine.get(), backend_name); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + inference_engine_layer_property output_property; + + ret = engine->SetOutputLayerProperty(output_property); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PARAMETER); +} + +TEST_P(InferenceEngineTestCase_G6, Inference_P) { std::string backend_name; int target_devices; @@ -416,6 +607,38 @@ INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G3, ); INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G4, + testing::Values( + // parameter order : backend name, input data type, height, width, channel count, layer name + // set input and output layer positive test + // ARMNN. + ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 3, { "test_name" }), + // TFLITE. + ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 3, { "test_name" }) + /* TODO */ + ) +); + +INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G5, + testing::Values( + // parameter order : backend name, input data type, height, width, channel count, layer name + // set input and output layer negative test + // ARMNN. + ParamType_Six("armnn", -1, 224, 224, 3, { "test_name" }), + ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 0, 224, 3, { "test_name" }), + ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 0, 3, { "test_name" }), + ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 0, { "test_name" }), + ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 3, { "" }), + // TFLITE. + ParamType_Six("tflite", -1, 224, 224, 3, { "test_name" }), + ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 0, 224, 3, { "test_name" }), + ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 0, 3, { "test_name" }), + ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 0, { "test_name" }), + ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 3, { "" }) + /* TODO */ + ) +); + +INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G6, testing::Values( // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result // mobilenet based image classification test