#include "inference_engine_common_impl.h"
#include "inference_engine_test_common.h"
-typedef std::tuple<std::string, int> ParamType_Bind;
-typedef std::tuple<std::string, int, std::vector<std::string>> ParamType_Load;
-typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>, int, int, int, std::vector<std::string>, std::vector<std::string>, std::vector<std::string>, std::vector<int>> ParamType_Infer;
+typedef std::tuple<std::string> ParamType_One;
+typedef std::tuple<std::string, int, std::vector<std::string>> ParamType_Three;
+typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>, int, int, int, std::vector<std::string>, std::vector<std::string>, std::vector<std::string>, std::vector<int>> ParamType_Many;
-class InferenceEngineTestCase_G1 : public testing::TestWithParam<ParamType_Bind> { };
-class InferenceEngineTestCase_G2 : public testing::TestWithParam<ParamType_Load> { };
-class InferenceEngineTestCase_G3 : public testing::TestWithParam<ParamType_Infer> { };
+class InferenceEngineTestCase_G1 : public testing::TestWithParam<ParamType_One> { };
+class InferenceEngineTestCase_G3 : public testing::TestWithParam<ParamType_Three> { };
+class InferenceEngineTestCase_G4 : public testing::TestWithParam<ParamType_Many> { };
+
+static auto InferenceEngineInit_One_Param = [](InferenceEngineCommon *engine, std::string &backend_name) -> int {
+ inference_engine_config config = { backend_name, 0 };
+
+ return engine->BindBackend(&config);
+};
+
+static auto InferenceEngineInit_Two_Params = [](InferenceEngineCommon *engine, std::string &backend_name, int &target_devices) -> int {
+ inference_engine_config config = { backend_name, target_devices };
+
+ int ret = engine->BindBackend(&config);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE)
+ return ret;
+
+ inference_engine_capacity capacity;
+ ret = engine->GetBackendCapacity(&capacity);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE)
+ return ret;
+
+ return engine->SetTargetDevices(config.target_devices);
+};
TEST_P(InferenceEngineTestCase_G1, Bind_P)
{
std::string backend_name;
- int target_devices;
-
- std::tie(backend_name, target_devices) = GetParam();
- std::cout <<"Bind test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
+ std::tie(backend_name) = GetParam();
- inference_engine_config config = {
- .backend_name = backend_name,
- .target_devices = target_devices
- };
+ std::cout <<"backend = " << backend_name << std::endl;
auto engine = std::make_unique<InferenceEngineCommon>();
ASSERT_TRUE(engine);
- int ret = engine->BindBackend(&config);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- inference_engine_capacity capacity;
- ret = engine->GetBackendCapacity(&capacity);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ int ret = InferenceEngineInit_One_Param(engine.get(), backend_name);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
engine->UnbindBackend();
}
-TEST_P(InferenceEngineTestCase_G2, Load_P)
+TEST_P(InferenceEngineTestCase_G3, Load_P)
{
std::string backend_name;
int target_devices;
std::tie(backend_name, target_devices, model_paths) = GetParam();
- std::cout <<"Load test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
-
- inference_engine_config config = {
- .backend_name = backend_name,
- .target_devices = target_devices
- };
+ std::cout <<"backend = " << backend_name << std::endl;
auto engine = std::make_unique<InferenceEngineCommon>();
ASSERT_TRUE(engine);
- int ret = engine->BindBackend(&config);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
+ int ret = InferenceEngineInit_One_Param(engine.get(), backend_name);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
inference_engine_capacity capacity;
ret = engine->GetBackendCapacity(&capacity);
engine->UnbindBackend();
}
-TEST_P(InferenceEngineTestCase_G3, Inference)
+TEST_P(InferenceEngineTestCase_G4, Inference_P)
{
std::string backend_name;
int target_devices;
return;
}
- std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
- inference_engine_config config = {
- .backend_name = backend_name,
- .target_devices = target_devices
- };
+ std::cout <<"backend = " << backend_name << std::endl;
auto engine = std::make_unique<InferenceEngineCommon>();
if (engine == nullptr) {
return;
}
- int ret = engine->BindBackend(&config);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- inference_engine_capacity capacity;
- ret = engine->GetBackendCapacity(&capacity);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
- ret = engine->SetTargetDevices(target_devices);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ int ret = InferenceEngineInit_Two_Params(engine.get(), backend_name, target_devices);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
std::vector <std::string> models;
int model_type = GetModelInfo(model_paths, models);
INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G1,
testing::Values(
- // parameter order : backend name, target device
+ // parameter order : backend name
// ARMNN.
- ParamType_Bind("armnn", INFERENCE_TARGET_CPU),
+ ParamType_One("armnn"),
// TFLITE.
- ParamType_Bind("tflite", INFERENCE_TARGET_CPU)
+ ParamType_One("tflite")
/* TODO */
-
)
);
-INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G2,
+INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G3,
testing::Values(
// parameter order : backend name, target device, model path/s
// mobilenet based image classification model loading test
// ARMNN.
- ParamType_Load("armnn", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
+ ParamType_Three("armnn", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
// TFLITE.
- ParamType_Load("tflite", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" })
+ ParamType_Three("tflite", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" })
/* TODO */
)
);
-INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G3,
+INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G4,
testing::Values(
// parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
// mobilenet based image classification test
// ARMNN.
- ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
+ ParamType_Many("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
// TFLITE.
- ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 })
+ ParamType_Many("tflite", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 })
/* TODO */
)
-);
+);
\ No newline at end of file