typedef std::tuple<std::string> ParamType_One;
typedef std::tuple<std::string, int> ParamType_Two;
typedef std::tuple<std::string, int, std::vector<std::string>> ParamType_Three;
+typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>> ParamType_Six;
typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>, int, int, int, std::vector<std::string>, std::vector<std::string>, std::vector<std::string>, std::vector<int>> ParamType_Many;
class InferenceEngineTestCase_G1 : public testing::TestWithParam<ParamType_One> { };
class InferenceEngineTestCase_G2 : public testing::TestWithParam<ParamType_Two> { };
class InferenceEngineTestCase_G3 : public testing::TestWithParam<ParamType_Three> { };
-class InferenceEngineTestCase_G4 : public testing::TestWithParam<ParamType_Many> { };
+class InferenceEngineTestCase_G4 : public testing::TestWithParam<ParamType_Six> { };
+class InferenceEngineTestCase_G5 : public testing::TestWithParam<ParamType_Six> { };
+class InferenceEngineTestCase_G6 : public testing::TestWithParam<ParamType_Many> { };
static auto InferenceEngineInit_One_Param = [](InferenceEngineCommon *engine, std::string &backend_name) -> int {
inference_engine_config config = { backend_name, 0 };
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PATH);
}
-TEST_P(InferenceEngineTestCase_G4, Inference_P)
+TEST_P(InferenceEngineTestCase_G4, SetInputLayer_P)
+{
+ std::string backend_name;
+ int tensor_type;
+ size_t height;
+ size_t width;
+ size_t ch;
+ std::vector<std::string> input_layers;
+
+ std::tie(backend_name, tensor_type, height, width, ch, input_layers) = GetParam();
+
+ std::cout <<"backend = " << backend_name << std::endl;
+
+ auto engine = std::make_unique<InferenceEngineCommon>();
+ ASSERT_TRUE(engine);
+
+ int ret = InferenceEngineInit_One_Param(engine.get(), backend_name);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ inference_engine_layer_property input_property;
+ std::vector<std::string>::iterator iter;
+
+ for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
+ inference_engine_tensor_info tensor_info = {
+ { 1, ch, height, width },
+ (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
+ (inference_tensor_data_type_e)tensor_type,
+ (size_t)(1 * ch * height * width)
+ };
+
+ input_property.layer_names.push_back(*iter);
+ input_property.tensor_infos.push_back(tensor_info);
+ }
+ ret = engine->SetInputLayerProperty(input_property);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+}
+
+TEST_P(InferenceEngineTestCase_G5, SetInputLayer_N1)
+{
+ std::string backend_name;
+ int tensor_type;
+ size_t height;
+ size_t width;
+ size_t ch;
+ std::vector<std::string> output_layers;
+
+ std::tie(backend_name, tensor_type, height, width, ch, output_layers) = GetParam();
+
+ std::cout <<"backend = " << backend_name << std::endl;
+
+ auto engine = std::make_unique<InferenceEngineCommon>();
+ ASSERT_TRUE(engine);
+
+ int ret = InferenceEngineInit_One_Param(engine.get(), backend_name);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ inference_engine_layer_property output_property;
+ std::vector<std::string>::iterator iter;
+
+ for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
+ inference_engine_tensor_info tensor_info = {
+ { 1, ch, height, width },
+ (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
+ (inference_tensor_data_type_e)tensor_type,
+ (size_t)(1 * ch * height * width)
+ };
+
+ output_property.layer_names.push_back(*iter);
+ output_property.tensor_infos.push_back(tensor_info);
+ }
+ ret = engine->SetInputLayerProperty(output_property);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PARAMETER);
+}
+
+TEST_P(InferenceEngineTestCase_G1, SetInputLayer_N2)
+{
+ std::string backend_name;
+
+ std::tie(backend_name) = GetParam();
+
+ std::cout <<"backend = " << backend_name << std::endl;
+
+ auto engine = std::make_unique<InferenceEngineCommon>();
+ ASSERT_TRUE(engine);
+
+ int ret = InferenceEngineInit_One_Param(engine.get(), backend_name);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ inference_engine_layer_property input_property;
+
+ ret = engine->SetInputLayerProperty(input_property);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PARAMETER);
+}
+
+TEST_P(InferenceEngineTestCase_G4, SetOutputLayer_P)
+{
+ std::string backend_name;
+ int tensor_type;
+ size_t height;
+ size_t width;
+ size_t ch;
+ std::vector<std::string> output_layers;
+
+ std::tie(backend_name, tensor_type, height, width, ch, output_layers) = GetParam();
+
+ std::cout <<"backend = " << backend_name << std::endl;
+
+ auto engine = std::make_unique<InferenceEngineCommon>();
+ ASSERT_TRUE(engine);
+
+ int ret = InferenceEngineInit_One_Param(engine.get(), backend_name);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ inference_engine_layer_property output_property;
+ std::vector<std::string>::iterator iter;
+
+ for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
+ inference_engine_tensor_info tensor_info = {
+ { 1, ch, height, width },
+ (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
+ (inference_tensor_data_type_e)tensor_type,
+ (size_t)(1 * ch * height * width)
+ };
+
+ output_property.layer_names.push_back(*iter);
+ output_property.tensor_infos.push_back(tensor_info);
+ }
+ ret = engine->SetOutputLayerProperty(output_property);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+}
+
+TEST_P(InferenceEngineTestCase_G5, SetOutputLayer_N1)
+{
+ std::string backend_name;
+ int tensor_type;
+ size_t height;
+ size_t width;
+ size_t ch;
+ std::vector<std::string> output_layers;
+
+ std::tie(backend_name, tensor_type, height, width, ch, output_layers) = GetParam();
+
+ std::cout <<"backend = " << backend_name << std::endl;
+
+ auto engine = std::make_unique<InferenceEngineCommon>();
+ ASSERT_TRUE(engine);
+
+ int ret = InferenceEngineInit_One_Param(engine.get(), backend_name);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ inference_engine_layer_property output_property;
+ std::vector<std::string>::iterator iter;
+
+ for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
+ inference_engine_tensor_info tensor_info = {
+ { 1, ch, height, width },
+ (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
+ (inference_tensor_data_type_e)tensor_type,
+ (size_t)(1 * ch * height * width)
+ };
+
+ output_property.layer_names.push_back(*iter);
+ output_property.tensor_infos.push_back(tensor_info);
+ }
+ ret = engine->SetOutputLayerProperty(output_property);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PARAMETER);
+}
+
+TEST_P(InferenceEngineTestCase_G1, SetOutputLayer_N2)
+{
+ std::string backend_name;
+
+ std::tie(backend_name) = GetParam();
+
+ std::cout <<"backend = " << backend_name << std::endl;
+
+ auto engine = std::make_unique<InferenceEngineCommon>();
+ ASSERT_TRUE(engine);
+
+ int ret = InferenceEngineInit_One_Param(engine.get(), backend_name);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ inference_engine_layer_property output_property;
+
+ ret = engine->SetOutputLayerProperty(output_property);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PARAMETER);
+}
+
+TEST_P(InferenceEngineTestCase_G6, Inference_P)
{
std::string backend_name;
int target_devices;
);
INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G4,
+ testing::Values(
+ // parameter order : backend name, input data type, height, width, channel count, layer name
+ // set input and output layer positive test
+ // ARMNN.
+ ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 3, { "test_name" }),
+ // TFLITE.
+ ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 3, { "test_name" })
+ /* TODO */
+ )
+);
+
+INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G5,
+ testing::Values(
+ // parameter order : backend name, input data type, height, width, channel count, layer name
+ // set input and output layer negative test
+ // ARMNN.
+ ParamType_Six("armnn", -1, 224, 224, 3, { "test_name" }),
+ ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 0, 224, 3, { "test_name" }),
+ ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 0, 3, { "test_name" }),
+ ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 0, { "test_name" }),
+ ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 3, { "" }),
+ // TFLITE.
+ ParamType_Six("tflite", -1, 224, 224, 3, { "test_name" }),
+ ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 0, 224, 3, { "test_name" }),
+ ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 0, 3, { "test_name" }),
+ ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 0, { "test_name" }),
+ ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 3, { "" })
+ /* TODO */
+ )
+);
+
+INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G6,
testing::Values(
// parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
// mobilenet based image classification test