{ "onnx", INFERENCE_MODEL_ONNX }
};
+std::map<int, std::string> Target_Formats = {
+ { INFERENCE_TARGET_CPU, "cpu" },
+ { INFERENCE_TARGET_GPU, "gpu" },
+ { INFERENCE_TARGET_CUSTOM, "custom" }
+};
+
enum {
TEST_IMAGE_CLASSIFICATION = 0,
TEST_OBJECT_DETECTION,
std::tie(backend_name, target_devices) = GetParam();
- std::cout <<"Bind test : backend = " << backend_name << ", target device = " << target_devices << "\n";
+ std::cout <<"Bind test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
inference_engine_config config = {
.backend_name = backend_name,
close(fd);
}
+
TEST_P(InferenceEngineCommonTest_2, Load)
{
std::string backend_name;
std::tie(backend_name, target_devices, model_paths) = GetParam();
- std::cout <<"Load test : backend = " << backend_name << ", target device = " << (target_devices == INFERENCE_TARGET_CPU ? "CPU" : "GPU") << "\n";
+ std::cout <<"Load test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
inference_engine_config config = {
.backend_name = backend_name,
break;
}
- std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << (target_devices == INFERENCE_TARGET_CPU ? "CPU" : "GPU") << "\n";
-
+ std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
inference_engine_config config = {
.backend_name = backend_name,
.target_devices = target_devices
break;
}
- std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << (target_devices == INFERENCE_TARGET_CPU ? "CPU" : "GPU") << "\n";
-
+ std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
inference_engine_config config = {
.backend_name = backend_name,
.target_devices = target_devices