test: Add set input and output layer test cases 32/233232/3
authorInki Dae <inki.dae@samsung.com>
Wed, 13 May 2020 06:20:28 +0000 (15:20 +0900)
committerInki Dae <inki.dae@samsung.com>
Wed, 13 May 2020 08:54:10 +0000 (17:54 +0900)
This patch adds below new test cases,
 - SetInputLayer positive and negative tests.
 - SetOutputLayer positive and negative tests.

Change-Id: I3ec64d821b747b5c7a4365d7f56e1d16a15ec2a7
Signed-off-by: Inki Dae <inki.dae@samsung.com>
test/src/inference_engine_tc.cpp

index 5cfd884..0110fef 100644 (file)
 typedef std::tuple<std::string> ParamType_One;
 typedef std::tuple<std::string, int> ParamType_Two;
 typedef std::tuple<std::string, int, std::vector<std::string>> ParamType_Three;
+typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>> ParamType_Six;
 typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>, int, int, int, std::vector<std::string>, std::vector<std::string>, std::vector<std::string>, std::vector<int>> ParamType_Many;
 
 class InferenceEngineTestCase_G1 : public testing::TestWithParam<ParamType_One> { };
 class InferenceEngineTestCase_G2 : public testing::TestWithParam<ParamType_Two> { };
 class InferenceEngineTestCase_G3 : public testing::TestWithParam<ParamType_Three> { };
-class InferenceEngineTestCase_G4 : public testing::TestWithParam<ParamType_Many> { };
+class InferenceEngineTestCase_G4 : public testing::TestWithParam<ParamType_Six> { };
+class InferenceEngineTestCase_G5 : public testing::TestWithParam<ParamType_Six> { };
+class InferenceEngineTestCase_G6 : public testing::TestWithParam<ParamType_Many> { };
 
 static auto InferenceEngineInit_One_Param = [](InferenceEngineCommon *engine, std::string &backend_name) -> int {
        inference_engine_config config = { backend_name, 0 };
@@ -264,7 +267,195 @@ TEST_P(InferenceEngineTestCase_G2, Load_N2)
        EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PATH);
 }
 
-TEST_P(InferenceEngineTestCase_G4, Inference_P)
+TEST_P(InferenceEngineTestCase_G4, SetInputLayer_P)
+{
+       std::string backend_name;
+       int tensor_type;
+       size_t height;
+       size_t width;
+       size_t ch;
+       std::vector<std::string> input_layers;
+
+       std::tie(backend_name, tensor_type, height, width, ch, input_layers) = GetParam();
+
+       std::cout <<"backend = " << backend_name << std::endl;
+
+       auto engine = std::make_unique<InferenceEngineCommon>();
+       ASSERT_TRUE(engine);
+
+       int ret = InferenceEngineInit_One_Param(engine.get(), backend_name);
+       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       inference_engine_layer_property input_property;
+       std::vector<std::string>::iterator iter;
+
+       for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
+               inference_engine_tensor_info tensor_info = {
+                       { 1, ch, height, width },
+                       (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
+                       (inference_tensor_data_type_e)tensor_type,
+                       (size_t)(1 * ch * height * width)
+               };
+
+               input_property.layer_names.push_back(*iter);
+               input_property.tensor_infos.push_back(tensor_info);
+    }
+       ret = engine->SetInputLayerProperty(input_property);
+       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+}
+
+TEST_P(InferenceEngineTestCase_G5, SetInputLayer_N1)
+{
+       std::string backend_name;
+       int tensor_type;
+       size_t height;
+       size_t width;
+       size_t ch;
+       std::vector<std::string> output_layers;
+
+       std::tie(backend_name, tensor_type, height, width, ch, output_layers) = GetParam();
+
+       std::cout <<"backend = " << backend_name << std::endl;
+
+       auto engine = std::make_unique<InferenceEngineCommon>();
+       ASSERT_TRUE(engine);
+
+       int ret = InferenceEngineInit_One_Param(engine.get(), backend_name);
+       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       inference_engine_layer_property output_property;
+       std::vector<std::string>::iterator iter;
+
+       for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
+               inference_engine_tensor_info tensor_info = {
+                       { 1, ch, height, width },
+                       (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
+                       (inference_tensor_data_type_e)tensor_type,
+                       (size_t)(1 * ch * height * width)
+               };
+
+               output_property.layer_names.push_back(*iter);
+               output_property.tensor_infos.push_back(tensor_info);
+    }
+       ret = engine->SetInputLayerProperty(output_property);
+       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PARAMETER);
+}
+
+TEST_P(InferenceEngineTestCase_G1, SetInputLayer_N2)
+{
+       std::string backend_name;
+
+       std::tie(backend_name) = GetParam();
+
+       std::cout <<"backend = " << backend_name << std::endl;
+
+       auto engine = std::make_unique<InferenceEngineCommon>();
+       ASSERT_TRUE(engine);
+
+       int ret = InferenceEngineInit_One_Param(engine.get(), backend_name);
+       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       inference_engine_layer_property input_property;
+
+       ret = engine->SetInputLayerProperty(input_property);
+       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PARAMETER);
+}
+
+TEST_P(InferenceEngineTestCase_G4, SetOutputLayer_P)
+{
+       std::string backend_name;
+       int tensor_type;
+       size_t height;
+       size_t width;
+       size_t ch;
+       std::vector<std::string> output_layers;
+
+       std::tie(backend_name, tensor_type, height, width, ch, output_layers) = GetParam();
+
+       std::cout <<"backend = " << backend_name << std::endl;
+
+       auto engine = std::make_unique<InferenceEngineCommon>();
+       ASSERT_TRUE(engine);
+
+       int ret = InferenceEngineInit_One_Param(engine.get(), backend_name);
+       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       inference_engine_layer_property output_property;
+       std::vector<std::string>::iterator iter;
+
+       for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
+               inference_engine_tensor_info tensor_info = {
+                       { 1, ch, height, width },
+                       (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
+                       (inference_tensor_data_type_e)tensor_type,
+                       (size_t)(1 * ch * height * width)
+               };
+
+               output_property.layer_names.push_back(*iter);
+               output_property.tensor_infos.push_back(tensor_info);
+    }
+       ret = engine->SetOutputLayerProperty(output_property);
+       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+}
+
+TEST_P(InferenceEngineTestCase_G5, SetOutputLayer_N1)
+{
+       std::string backend_name;
+       int tensor_type;
+       size_t height;
+       size_t width;
+       size_t ch;
+       std::vector<std::string> output_layers;
+
+       std::tie(backend_name, tensor_type, height, width, ch, output_layers) = GetParam();
+
+       std::cout <<"backend = " << backend_name << std::endl;
+
+       auto engine = std::make_unique<InferenceEngineCommon>();
+       ASSERT_TRUE(engine);
+
+       int ret = InferenceEngineInit_One_Param(engine.get(), backend_name);
+       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       inference_engine_layer_property output_property;
+       std::vector<std::string>::iterator iter;
+
+       for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
+               inference_engine_tensor_info tensor_info = {
+                       { 1, ch, height, width },
+                       (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
+                       (inference_tensor_data_type_e)tensor_type,
+                       (size_t)(1 * ch * height * width)
+               };
+
+               output_property.layer_names.push_back(*iter);
+               output_property.tensor_infos.push_back(tensor_info);
+    }
+       ret = engine->SetOutputLayerProperty(output_property);
+       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PARAMETER);
+}
+
+TEST_P(InferenceEngineTestCase_G1, SetOutputLayer_N2)
+{
+       std::string backend_name;
+
+       std::tie(backend_name) = GetParam();
+
+       std::cout <<"backend = " << backend_name << std::endl;
+
+       auto engine = std::make_unique<InferenceEngineCommon>();
+       ASSERT_TRUE(engine);
+
+       int ret = InferenceEngineInit_One_Param(engine.get(), backend_name);
+       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       inference_engine_layer_property output_property;
+
+       ret = engine->SetOutputLayerProperty(output_property);
+       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PARAMETER);
+}
+
+TEST_P(InferenceEngineTestCase_G6, Inference_P)
 {
        std::string backend_name;
        int target_devices;
@@ -417,6 +608,38 @@ INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G3,
 
 INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G4,
                testing::Values(
+                       // parameter order : backend name, input data type, height, width, channel count, layer name
+                       // set input and output layer positive test
+                       // ARMNN.
+                       ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 3, { "test_name" }),
+                       // TFLITE.
+                       ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 3, { "test_name" })
+                       /* TODO */
+               )
+);
+
+INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G5,
+               testing::Values(
+                       // parameter order : backend name, input data type, height, width, channel count, layer name
+                       // set input and output layer negative test
+                       // ARMNN.
+                       ParamType_Six("armnn", -1, 224, 224, 3, { "test_name" }),
+                       ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 0, 224, 3, { "test_name" }),
+                       ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 0, 3, { "test_name" }),
+                       ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 0, { "test_name" }),
+                       ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 3, { "" }),
+                       // TFLITE.
+                       ParamType_Six("tflite", -1, 224, 224, 3, { "test_name" }),
+                       ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 0, 224, 3, { "test_name" }),
+                       ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 0, 3, { "test_name" }),
+                       ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 0, { "test_name" }),
+                       ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 3, { "" })
+                       /* TODO */
+               )
+);
+
+INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G6,
+               testing::Values(
                        // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
                        // mobilenet based image classification test
                        // ARMNN.