class InferenceEngineCommonTest_3 : public testing::TestWithParam<ParamType_Infer> { };
std::map<std::string, int> Model_Formats = {
- { "caffemodel", INFERENCE_MODEL_CAFFE },
- { "pb", INFERENCE_MODEL_TF },
- { "tflite", INFERENCE_MODEL_TFLITE },
- { "t7", INFERENCE_MODEL_TORCH },
- { "weights", INFERENCE_MODEL_DARKNET },
- { "bin", INFERENCE_MODEL_DLDT },
- { "onnx", INFERENCE_MODEL_ONNX }
+ { "caffemodel", INFERENCE_MODEL_CAFFE },
+ { "pb", INFERENCE_MODEL_TF },
+ { "tflite", INFERENCE_MODEL_TFLITE },
+ { "t7", INFERENCE_MODEL_TORCH },
+ { "weights", INFERENCE_MODEL_DARKNET },
+ { "bin", INFERENCE_MODEL_DLDT },
+ { "onnx", INFERENCE_MODEL_ONNX }
};
enum {
- TEST_IMAGE_CLASSIFICATION = 0,
- TEST_OBJECT_DETECTION,
- TEST_FACE_DETECTION,
- TEST_FACILA_LANDMARK_DETECTION,
- TEST_POSE_ESTIMATION
+ TEST_IMAGE_CLASSIFICATION = 0,
+ TEST_OBJECT_DETECTION,
+ TEST_FACE_DETECTION,
+ TEST_FACILA_LANDMARK_DETECTION,
+ TEST_POSE_ESTIMATION
};
TEST_P(InferenceEngineCommonTest, Bind)
{
- std::string backend_name;
- int target_devices;
+ std::string backend_name;
+ int target_devices;
- std::tie(backend_name, target_devices) = GetParam();
+ std::tie(backend_name, target_devices) = GetParam();
- std::cout <<"Bind test : backend = " << backend_name << ", target device = " << target_devices << "\n";
+ std::cout <<"Bind test : backend = " << backend_name << ", target device = " << target_devices << "\n";
- inference_engine_config config = {
- .backend_name = backend_name,
- .target_devices = target_devices
- };
+ inference_engine_config config = {
+ .backend_name = backend_name,
+ .target_devices = target_devices
+ };
- InferenceEngineCommon *engine = new InferenceEngineCommon(&config);
- ASSERT_TRUE(engine);
+ InferenceEngineCommon *engine = new InferenceEngineCommon(&config);
+ ASSERT_TRUE(engine);
- int ret = engine->BindBackend(&config);
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ int ret = engine->BindBackend(&config);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- inference_engine_capacity capacity;
- ret = engine->GetBackendCapacity(&capacity);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ inference_engine_capacity capacity;
+ ret = engine->GetBackendCapacity(&capacity);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- engine->UnbindBackend();
+ engine->UnbindBackend();
- delete engine;
+ delete engine;
}
int GetModelInfo(std::vector <std::string> &model_paths, std::vector<std::string> &models)
{
- std::string model_path = model_paths[0];
- std::string ext_str = model_path.substr(model_path.find_last_of(".") + 1);
- std::map<std::string, int>::iterator key = Model_Formats.find(ext_str);
- int ret = key != Model_Formats.end() ? key->second : -1;
- EXPECT_NE(ret, -1);
-
- if (ret == -1) {
- return ret;
- }
+ std::string model_path = model_paths[0];
+ std::string ext_str = model_path.substr(model_path.find_last_of(".") + 1);
+ std::map<std::string, int>::iterator key = Model_Formats.find(ext_str);
+ int ret = key != Model_Formats.end() ? key->second : -1;
+ EXPECT_NE(ret, -1);
+
+ if (ret == -1) {
+ return ret;
+ }
switch (ret) {
case INFERENCE_MODEL_CAFFE:
break;
}
- return ret;
+ return ret;
}
int PrepareTensorBuffers(InferenceEngineCommon *engine, std::vector<inference_engine_tensor_buffer> &inputs,
std::vector<inference_engine_tensor_buffer> &outputs)
{
- int ret = engine->GetInputTensorBuffers(inputs);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ int ret = engine->GetInputTensorBuffers(inputs);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- if (inputs.empty()) {
- inference_engine_layer_property input_property;
- ret = engine->GetInputLayerProperty(input_property);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ if (inputs.empty()) {
+ inference_engine_layer_property input_property;
+ ret = engine->GetInputLayerProperty(input_property);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- // If backend is OpenCV then the buffers will be allocated out of this function.
- if (input_property.tensor_infos.empty()) {
- return INFERENCE_ENGINE_ERROR_NONE;
- }
+ // If backend is OpenCV then the buffers will be allocated out of this function.
+ if (input_property.tensor_infos.empty()) {
+ return INFERENCE_ENGINE_ERROR_NONE;
+ }
- for (int i = 0; i < (int)input_property.tensor_infos.size(); ++i) {
+ for (int i = 0; i < (int)input_property.tensor_infos.size(); ++i) {
inference_engine_tensor_info tensor_info = input_property.tensor_infos[i];
inference_engine_tensor_buffer tensor_buffer;
if (tensor_info.data_type == TENSOR_DATA_TYPE_FLOAT32) {
tensor_buffer.buffer = (void *)(new float[tensor_info.size]);
- tensor_buffer.size = tensor_info.size * 4;
+ tensor_buffer.size = tensor_info.size * 4;
} else if (tensor_info.data_type == TENSOR_DATA_TYPE_UINT8) {
tensor_buffer.buffer = (void *)(new unsigned char[tensor_info.size]);
- tensor_buffer.size = tensor_info.size;
+ tensor_buffer.size = tensor_info.size;
}
EXPECT_TRUE(tensor_buffer.buffer);
tensor_buffer.data_type = tensor_info.data_type;
inputs.push_back(tensor_buffer);
}
- }
+ }
- ret = engine->GetOutputTensorBuffers(outputs);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ ret = engine->GetOutputTensorBuffers(outputs);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- if (outputs.empty()) {
- inference_engine_layer_property output_property;
- ret = engine->GetOutputLayerProperty(output_property);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ if (outputs.empty()) {
+ inference_engine_layer_property output_property;
+ ret = engine->GetOutputLayerProperty(output_property);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- // If backend is OpenCV then the buffers will be allocated out of this function.
- if (output_property.tensor_infos.empty()) {
- return INFERENCE_ENGINE_ERROR_NONE;
- }
+ // If backend is OpenCV then the buffers will be allocated out of this function.
+ if (output_property.tensor_infos.empty()) {
+ return INFERENCE_ENGINE_ERROR_NONE;
+ }
- for (int i = 0; i < (int)output_property.tensor_infos.size(); ++i) {
+ for (int i = 0; i < (int)output_property.tensor_infos.size(); ++i) {
inference_engine_tensor_info tensor_info = output_property.tensor_infos[i];
inference_engine_tensor_buffer tensor_buffer;
if (tensor_info.data_type == TENSOR_DATA_TYPE_FLOAT32) {
tensor_buffer.buffer = (void *)(new float[tensor_info.size]);
- tensor_buffer.size = tensor_info.size * 4;
+ tensor_buffer.size = tensor_info.size * 4;
} else if (tensor_info.data_type == TENSOR_DATA_TYPE_UINT8) {
tensor_buffer.buffer = (void *)(new unsigned char[tensor_info.size]);
- tensor_buffer.size = tensor_info.size;
+ tensor_buffer.size = tensor_info.size;
}
EXPECT_TRUE(tensor_buffer.buffer);
tensor_buffer.data_type = tensor_info.data_type;
outputs.push_back(tensor_buffer);
}
- }
+ }
- return INFERENCE_ENGINE_ERROR_NONE;
+ return INFERENCE_ENGINE_ERROR_NONE;
}
void CleanupTensorBuffers(std::vector<inference_engine_tensor_buffer> &inputs, std::vector<inference_engine_tensor_buffer> &outputs)
void CopyFileToMemory(const char *file_name, inference_engine_tensor_buffer &buffer, unsigned int size)
{
- int fd = open(file_name, O_RDONLY);
- ASSERT_NE(fd, -1);
+ int fd = open(file_name, O_RDONLY);
+ ASSERT_NE(fd, -1);
- int num = read(fd, buffer.buffer, size);
- ASSERT_NE(num, -1);
+ int num = read(fd, buffer.buffer, size);
+ ASSERT_NE(num, -1);
- close(fd);
+ close(fd);
}
TEST_P(InferenceEngineCommonTest_2, Load)
{
- std::string backend_name;
- int target_devices;
- std::vector<std::string> model_paths;
+ std::string backend_name;
+ int target_devices;
+ std::vector<std::string> model_paths;
- std::tie(backend_name, target_devices, model_paths) = GetParam();
+ std::tie(backend_name, target_devices, model_paths) = GetParam();
- std::cout <<"Load test : backend = " << backend_name << ", target device = " << (target_devices == INFERENCE_TARGET_CPU ? "CPU" : "GPU") << "\n";
+ std::cout <<"Load test : backend = " << backend_name << ", target device = " << (target_devices == INFERENCE_TARGET_CPU ? "CPU" : "GPU") << "\n";
- inference_engine_config config = {
- .backend_name = backend_name,
- .target_devices = target_devices
- };
+ inference_engine_config config = {
+ .backend_name = backend_name,
+ .target_devices = target_devices
+ };
- InferenceEngineCommon *engine = new InferenceEngineCommon(&config);
- ASSERT_TRUE(engine);
+ InferenceEngineCommon *engine = new InferenceEngineCommon(&config);
+ ASSERT_TRUE(engine);
- int ret = engine->BindBackend(&config);
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ int ret = engine->BindBackend(&config);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- inference_engine_capacity capacity;
- ret = engine->GetBackendCapacity(&capacity);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ inference_engine_capacity capacity;
+ ret = engine->GetBackendCapacity(&capacity);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- ret = engine->SetTargetDevices(target_devices);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ ret = engine->SetTargetDevices(target_devices);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- std::vector <std::string> models;
- int model_type = GetModelInfo(model_paths, models);
- ASSERT_NE(model_type, -1);
+ std::vector <std::string> models;
+ int model_type = GetModelInfo(model_paths, models);
+ ASSERT_NE(model_type, -1);
- ret = engine->Load(models, (inference_model_format_e)model_type);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ ret = engine->Load(models, (inference_model_format_e)model_type);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- engine->UnbindBackend();
+ engine->UnbindBackend();
- delete engine;
+ delete engine;
}
void FillOutputResult(InferenceEngineCommon *engine, std::vector<inference_engine_tensor_buffer> &outputs, tensor_t &outputData)
{
- inference_engine_layer_property property;
- int ret = engine->GetOutputLayerProperty(property);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ inference_engine_layer_property property;
+ int ret = engine->GetOutputLayerProperty(property);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
for (int i = 0; i < (int)property.tensor_infos.size(); ++i) {
inference_engine_tensor_info tensor_info = property.tensor_infos[i];
int VerifyImageClassificationResults(tensor_t &outputData, int answer)
{
- std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
- std::vector<void*> inferResults(outputData.data.begin(), outputData.data.end());
+ std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
+ std::vector<void*> inferResults(outputData.data.begin(), outputData.data.end());
- int idx = -1;
+ int idx = -1;
int count = inferDimInfo[0][1];
float value = 0.0f;
float *prediction = reinterpret_cast<float*>(inferResults[0]);
for (int i = 0; i < count; ++i) {
- if (value < prediction[i]) {
- value = prediction[i];
- idx = i;
- }
+ if (value < prediction[i]) {
+ value = prediction[i];
+ idx = i;
+ }
}
- return idx == answer;
+ return idx == answer;
}
int VerifyObjectDetectionResults(tensor_t &outputData, std::vector<int> &answers, int height, int width)
{
- std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
- std::vector<void*> inferResults(outputData.data.begin(), outputData.data.end());
- float *boxes = reinterpret_cast<float *>(inferResults[0]);
- float *scores = reinterpret_cast<float *>(inferResults[2]);
-
- int num_of_detections = (int)(*reinterpret_cast<float *>(inferResults[3]));
- int left = 0, top = 0, right = 0, bottom = 0;
- float max_score = 0.0f;
-
- for (int i = 0; i < num_of_detections; ++i) {
- if (max_score < scores[i]) {
- max_score = scores[i];
-
- left = (int)(boxes[i * 4 + 1] * width);
- top = (int)(boxes[i * 4 + 0] * height);
- right = (int)(boxes[i * 4 + 3] * width);
- bottom = (int)(boxes[i * 4 + 2] * height);
- }
- }
+ std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
+ std::vector<void*> inferResults(outputData.data.begin(), outputData.data.end());
+ float *boxes = reinterpret_cast<float *>(inferResults[0]);
+ float *scores = reinterpret_cast<float *>(inferResults[2]);
+
+ int num_of_detections = (int)(*reinterpret_cast<float *>(inferResults[3]));
+ int left = 0, top = 0, right = 0, bottom = 0;
+ float max_score = 0.0f;
+
+ for (int i = 0; i < num_of_detections; ++i) {
+ if (max_score < scores[i]) {
+ max_score = scores[i];
+
+ left = (int)(boxes[i * 4 + 1] * width);
+ top = (int)(boxes[i * 4 + 0] * height);
+ right = (int)(boxes[i * 4 + 3] * width);
+ bottom = (int)(boxes[i * 4 + 2] * height);
+ }
+ }
- return (answers[0] == left && answers[1] == top && answers[2] == right && answers[3] == bottom);
+ return (answers[0] == left && answers[1] == top && answers[2] == right && answers[3] == bottom);
}
int VerifyPoseEstimationResults(tensor_t &outputData, std::vector<int> &answers, int height, int width)
{
- std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
- std::vector<void*> inferResults(outputData.data.begin(), outputData.data.end());
- std::vector<int> result_x, result_y;
-
- const int heat_map_width = 96, heat_map_height = 96;
- int num_of_pose = inferDimInfo[0][3];
- float *data = static_cast<float *>(inferResults[0]);
-
- float ratio_x = (float)width / (float)inferDimInfo[0][2];
- float ratio_y = (float)height / (float)inferDimInfo[0][1];
-
- for (int idx = 0; idx < num_of_pose; ++idx) {
- float max_score = 0.0f;
- int max_x = 0, max_y = 0;
-
- for (int y = 0; y < heat_map_height; ++y) {
- for (int x = 0; x < heat_map_width; ++x) {
- // head_map[Yy][Xx][Kidx] = (Yy * heat_map_height * num_of_pose) + (Xx * num_of_pose) + Kidx
- float score = data[(y * heat_map_width * num_of_pose) + (x * num_of_pose) + idx];
- if (score > max_score) {
- max_score = score;
- max_x = x;
- max_y = y;
- }
- }
- }
-
- result_x.push_back((int)((float)(max_x + 1) * ratio_x));
- result_y.push_back((int)((float)(max_y + 1) * ratio_y));
- }
+ std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
+ std::vector<void*> inferResults(outputData.data.begin(), outputData.data.end());
+ std::vector<int> result_x, result_y;
+
+ const int heat_map_width = 96, heat_map_height = 96;
+ int num_of_pose = inferDimInfo[0][3];
+ float *data = static_cast<float *>(inferResults[0]);
+
+ float ratio_x = (float)width / (float)inferDimInfo[0][2];
+ float ratio_y = (float)height / (float)inferDimInfo[0][1];
+
+ for (int idx = 0; idx < num_of_pose; ++idx) {
+ float max_score = 0.0f;
+ int max_x = 0, max_y = 0;
+
+ for (int y = 0; y < heat_map_height; ++y) {
+ for (int x = 0; x < heat_map_width; ++x) {
+ // head_map[Yy][Xx][Kidx] = (Yy * heat_map_height * num_of_pose) + (Xx * num_of_pose) + Kidx
+ float score = data[(y * heat_map_width * num_of_pose) + (x * num_of_pose) + idx];
+ if (score > max_score) {
+ max_score = score;
+ max_x = x;
+ max_y = y;
+ }
+ }
+ }
- int ret = 1;
- for (int i = 0; i < num_of_pose; ++i) {
- if (result_x[i] != answers[i] || result_y[i] != answers[num_of_pose + i]) {
- ret = 0;
- break;
- }
- }
+ result_x.push_back((int)((float)(max_x + 1) * ratio_x));
+ result_y.push_back((int)((float)(max_y + 1) * ratio_y));
+ }
+
+ int ret = 1;
+ for (int i = 0; i < num_of_pose; ++i) {
+ if (result_x[i] != answers[i] || result_y[i] != answers[num_of_pose + i]) {
+ ret = 0;
+ break;
+ }
+ }
- return ret;
+ return ret;
}
TEST_P(InferenceEngineCommonTest_3, Inference)
{
- std::string backend_name;
- int target_devices;
- int test_type;
+ std::string backend_name;
+ int target_devices;
+ int test_type;
int tensor_type;
- std::vector<std::string> image_paths;
- int height;
- int width;
- int ch;
- std::vector<std::string> input_layers;
- std::vector<std::string> output_layers;
- std::vector<std::string> model_paths;
- std::vector<int> answers;
-
- std::tie(backend_name, target_devices, test_type, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers) = GetParam();
-
- std::string test_name;
- switch (test_type) {
- case TEST_IMAGE_CLASSIFICATION:
- test_name.append("Image classification");
- break;
- case TEST_OBJECT_DETECTION:
- test_name.append("Object detection");
- break;
- case TEST_FACE_DETECTION:
- test_name.append("Face detection");
- break;
- case TEST_FACILA_LANDMARK_DETECTION:
- test_name.append("Facila landmark detection");
- break;
- case TEST_POSE_ESTIMATION:
- test_name.append("Pose estimation");
- break;
- }
+ std::vector<std::string> image_paths;
+ int height;
+ int width;
+ int ch;
+ std::vector<std::string> input_layers;
+ std::vector<std::string> output_layers;
+ std::vector<std::string> model_paths;
+ std::vector<int> answers;
+
+ std::tie(backend_name, target_devices, test_type, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers) = GetParam();
+
+ std::string test_name;
+ switch (test_type) {
+ case TEST_IMAGE_CLASSIFICATION:
+ test_name.append("Image classification");
+ break;
+ case TEST_OBJECT_DETECTION:
+ test_name.append("Object detection");
+ break;
+ case TEST_FACE_DETECTION:
+ test_name.append("Face detection");
+ break;
+ case TEST_FACILA_LANDMARK_DETECTION:
+ test_name.append("Facila landmark detection");
+ break;
+ case TEST_POSE_ESTIMATION:
+ test_name.append("Pose estimation");
+ break;
+ }
- std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << (target_devices == INFERENCE_TARGET_CPU ? "CPU" : "GPU") << "\n";
+ std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << (target_devices == INFERENCE_TARGET_CPU ? "CPU" : "GPU") << "\n";
- inference_engine_config config = {
- .backend_name = backend_name,
- .target_devices = target_devices
- };
+ inference_engine_config config = {
+ .backend_name = backend_name,
+ .target_devices = target_devices
+ };
- InferenceEngineCommon *engine = new InferenceEngineCommon(&config);
- ASSERT_TRUE(engine);
+ InferenceEngineCommon *engine = new InferenceEngineCommon(&config);
+ ASSERT_TRUE(engine);
- int ret = engine->BindBackend(&config);
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ int ret = engine->BindBackend(&config);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- inference_engine_capacity capacity;
- ret = engine->GetBackendCapacity(&capacity);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ inference_engine_capacity capacity;
+ ret = engine->GetBackendCapacity(&capacity);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- ret = engine->SetTargetDevices(target_devices);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ ret = engine->SetTargetDevices(target_devices);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- std::vector <std::string> models;
- int model_type = GetModelInfo(model_paths, models);
- ASSERT_NE(model_type, -1);
+ std::vector <std::string> models;
+ int model_type = GetModelInfo(model_paths, models);
+ ASSERT_NE(model_type, -1);
- inference_engine_layer_property input_property;
- std::vector<std::string>::iterator iter;
+ inference_engine_layer_property input_property;
+ std::vector<std::string>::iterator iter;
- for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
+ for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
inference_engine_tensor_info tensor_info = {
{ 1, ch, height, width },
(inference_tensor_shape_type_e)TENSOR_SHAPE_NCHW,
input_property.tensor_infos.push_back(tensor_info);
}
- ret = engine->SetInputLayerProperty(input_property);
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ ret = engine->SetInputLayerProperty(input_property);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- inference_engine_layer_property output_property;
+ inference_engine_layer_property output_property;
- for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
- output_property.layer_names.push_back(*iter);
- }
+ for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
+ output_property.layer_names.push_back(*iter);
+ }
- ret = engine->SetOutputLayerProperty(output_property);
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ ret = engine->SetOutputLayerProperty(output_property);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- ret = engine->Load(models, (inference_model_format_e)model_type);
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ ret = engine->Load(models, (inference_model_format_e)model_type);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- std::vector<inference_engine_tensor_buffer> inputs, outputs;
- ret = PrepareTensorBuffers(engine, inputs, outputs);
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ std::vector<inference_engine_tensor_buffer> inputs, outputs;
+ ret = PrepareTensorBuffers(engine, inputs, outputs);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- // Copy input image tensor data from a given file to input tensor buffer.
- for (int i = 0; i < (int)image_paths.size(); ++i) {
- CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
- }
+ // Copy input image tensor data from a given file to input tensor buffer.
+ for (int i = 0; i < (int)image_paths.size(); ++i) {
+ CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
+ }
- ret = engine->Run(inputs, outputs);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
- tensor_t result;
- FillOutputResult(engine, outputs, result);
-
- switch (test_type) {
- case TEST_IMAGE_CLASSIFICATION:
- ret = VerifyImageClassificationResults(result, answers[0]);
- EXPECT_EQ(ret, 1);
- break;
- case TEST_OBJECT_DETECTION:
- // 1072 : fixed height size of dumped image, 1608 : fixed width size of dumped image.
- ret = VerifyObjectDetectionResults(result, answers, 1072, 1608);
- EXPECT_EQ(ret, 1);
- break;
- case TEST_FACE_DETECTION:
- // 1152 : fixed height size of dumped image, 1536 : fixed width size of dumped image.
- ret = VerifyObjectDetectionResults(result, answers, 1152, 1536);
- EXPECT_EQ(ret, 1);
- break;
- case TEST_FACILA_LANDMARK_DETECTION:
- // TODO.
- break;
- case TEST_POSE_ESTIMATION:
- // 563 : fixed height size of dumped image, 750 : fixed width size of dumped image.
- ret = VerifyPoseEstimationResults(result, answers, 563, 750);
- EXPECT_EQ(ret, 1);
- break;
- }
+ ret = engine->Run(inputs, outputs);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ tensor_t result;
+ FillOutputResult(engine, outputs, result);
+
+ switch (test_type) {
+ case TEST_IMAGE_CLASSIFICATION:
+ ret = VerifyImageClassificationResults(result, answers[0]);
+ EXPECT_EQ(ret, 1);
+ break;
+ case TEST_OBJECT_DETECTION:
+ // 1072 : fixed height size of dumped image, 1608 : fixed width size of dumped image.
+ ret = VerifyObjectDetectionResults(result, answers, 1072, 1608);
+ EXPECT_EQ(ret, 1);
+ break;
+ case TEST_FACE_DETECTION:
+ // 1152 : fixed height size of dumped image, 1536 : fixed width size of dumped image.
+ ret = VerifyObjectDetectionResults(result, answers, 1152, 1536);
+ EXPECT_EQ(ret, 1);
+ break;
+ case TEST_FACILA_LANDMARK_DETECTION:
+ // TODO.
+ break;
+ case TEST_POSE_ESTIMATION:
+ // 563 : fixed height size of dumped image, 750 : fixed width size of dumped image.
+ ret = VerifyPoseEstimationResults(result, answers, 563, 750);
+ EXPECT_EQ(ret, 1);
+ break;
+ }
- CleanupTensorBuffers(inputs, outputs);
+ CleanupTensorBuffers(inputs, outputs);
- engine->UnbindBackend();
+ engine->UnbindBackend();
- delete engine;
+ delete engine;
}
INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest,
- testing::Values(
- // parameter order : backend name, target device
- ParamType("armnn", INFERENCE_TARGET_CPU),
- ParamType("armnn", INFERENCE_TARGET_GPU)
- /* TODO */
- )
- );
+ testing::Values(
+ // parameter order : backend name, target device
+ ParamType("armnn", INFERENCE_TARGET_CPU),
+ ParamType("armnn", INFERENCE_TARGET_GPU)
+ /* TODO */
+ )
+);
INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest_2,
- testing::Values(
- // parameter order : backend name, target device, model path/s
- // mobilenet based image classification model loading test
- ParamType_Load("armnn", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
- ParamType_Load("armnn", INFERENCE_TARGET_GPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" })
- /* TODO */
- )
- );
+ testing::Values(
+ // parameter order : backend name, target device, model path/s
+ // mobilenet based image classification model loading test
+ ParamType_Load("armnn", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
+ ParamType_Load("armnn", INFERENCE_TARGET_GPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" })
+ /* TODO */
+ )
+);
INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest_3,
- testing::Values(
- // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
- // mobilenet based image classification test
- ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
- ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
- // quantized mobilenet based image classification test
- ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
- ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
- // object detection test
- ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
- ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
- // face detection test
- ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
- ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
- // pose estimation test
- ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
- { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
- 76, 146, 170, 193, 216, 146, 123, 99, 287, 381, 451, 287, 381, 475 }),
- ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
- { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
- 76, 146, 170, 193, 216, 146, 123, 99, 287, 381, 451, 287, 381, 475 })
- /* TODO */
- )
- );
\ No newline at end of file
+ testing::Values(
+ // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+ // mobilenet based image classification test
+ ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
+ ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
+ // quantized mobilenet based image classification test
+ ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
+ ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
+ // object detection test
+ ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
+ ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
+ // face detection test
+ ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
+ ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
+ // pose estimation test
+ ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+ { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
+ 76, 146, 170, 193, 216, 146, 123, 99, 287, 381, 451, 287, 381, 475 }),
+ ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+ { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
+ 76, 146, 170, 193, 216, 146, 123, 99, 287, 381, 451, 287, 381, 475 })
+ /* TODO */
+ )
+);
\ No newline at end of file