--- /dev/null
+/**
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string.h>
+#include <tuple>
+#include <map>
+#include <fcntl.h>
+#include <unistd.h>
+#include <queue>
+#include <algorithm>
+
+#include "gtest/gtest.h"
+
+#include "inference_engine_error.h"
+#include "inference_engine_common_impl.h"
+
+using namespace InferenceEngineInterface::Common;
+
+typedef std::tuple<std::string, int> ParamType_Bind;
+typedef std::tuple<std::string, int, std::vector<std::string>> ParamType_Load;
+typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>, int, int, int, std::vector<std::string>, std::vector<std::string>, std::vector<std::string>, std::vector<int>> ParamType_Infer;
+
+class InferenceEngineTestCase_G1 : public testing::TestWithParam<ParamType_Bind> { };
+class InferenceEngineTestCase_G2 : public testing::TestWithParam<ParamType_Load> { };
+class InferenceEngineTestCase_G3 : public testing::TestWithParam<ParamType_Infer> { };
+
+std::map<std::string, int> Model_Formats = {
+ { "caffemodel", INFERENCE_MODEL_CAFFE },
+ { "pb", INFERENCE_MODEL_TF },
+ { "tflite", INFERENCE_MODEL_TFLITE },
+ { "t7", INFERENCE_MODEL_TORCH },
+ { "weights", INFERENCE_MODEL_DARKNET },
+ { "xml", INFERENCE_MODEL_DLDT },
+ { "onnx", INFERENCE_MODEL_ONNX }
+};
+
+std::map<int, std::string> Target_Formats = {
+ { INFERENCE_TARGET_CPU, "cpu" },
+ { INFERENCE_TARGET_GPU, "gpu" },
+ { INFERENCE_TARGET_CUSTOM, "custom" }
+};
+
+enum {
+ TEST_IMAGE_CLASSIFICATION = 0,
+ TEST_OBJECT_DETECTION,
+ TEST_FACE_DETECTION,
+ TEST_FACIAL_LANDMARK_DETECTION,
+ TEST_POSE_ESTIMATION
+};
+
+TEST_P(InferenceEngineTestCase_G1, Bind_P)
+{
+ std::string backend_name;
+ int target_devices;
+
+ std::tie(backend_name, target_devices) = GetParam();
+
+ std::cout <<"Bind test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
+
+ inference_engine_config config = {
+ .backend_name = backend_name,
+ .target_devices = target_devices
+ };
+
+ InferenceEngineCommon *engine = new InferenceEngineCommon(&config);
+ ASSERT_TRUE(engine);
+
+ int ret = engine->BindBackend(&config);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ inference_engine_capacity capacity;
+ ret = engine->GetBackendCapacity(&capacity);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ engine->UnbindBackend();
+
+ delete engine;
+}
+
+int GetModelInfo(std::vector <std::string> &model_paths, std::vector<std::string> &models)
+{
+ std::string model_path = model_paths[0];
+ std::string ext_str = model_path.substr(model_path.find_last_of(".") + 1);
+ std::map<std::string, int>::iterator key = Model_Formats.find(ext_str);
+ int ret = key != Model_Formats.end() ? key->second : -1;
+ EXPECT_NE(ret, -1);
+
+ if (ret == -1) {
+ return ret;
+ }
+
+ switch (ret) {
+ case INFERENCE_MODEL_CAFFE:
+ case INFERENCE_MODEL_TF:
+ case INFERENCE_MODEL_DARKNET:
+ case INFERENCE_MODEL_DLDT:
+ case INFERENCE_MODEL_ONNX:
+ models.push_back(model_paths[0]);
+ models.push_back(model_paths[1]);
+ break;
+ case INFERENCE_MODEL_TFLITE:
+ case INFERENCE_MODEL_TORCH:
+ models.push_back(model_paths[0]);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int PrepareTensorBuffers(InferenceEngineCommon *engine, std::vector<inference_engine_tensor_buffer> &inputs,
+ std::vector<inference_engine_tensor_buffer> &outputs)
+{
+ int ret = engine->GetInputTensorBuffers(inputs);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ if (inputs.empty()) {
+ inference_engine_layer_property input_property;
+ ret = engine->GetInputLayerProperty(input_property);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ // If backend is OpenCV then the buffers will be allocated out of this function.
+ if (input_property.tensor_infos.empty()) {
+ return INFERENCE_ENGINE_ERROR_NONE;
+ }
+
+ for (int i = 0; i < (int)input_property.tensor_infos.size(); ++i) {
+ inference_engine_tensor_info tensor_info = input_property.tensor_infos[i];
+ inference_engine_tensor_buffer tensor_buffer;
+ if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
+ tensor_buffer.buffer = (void *)(new float[tensor_info.size]);
+ tensor_buffer.size = tensor_info.size * 4;
+ } else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
+ tensor_buffer.buffer = (void *)(new unsigned char[tensor_info.size]);
+ tensor_buffer.size = tensor_info.size;
+ }
+
+ EXPECT_TRUE(tensor_buffer.buffer);
+ tensor_buffer.owner_is_backend = 0;
+ tensor_buffer.data_type = tensor_info.data_type;
+ inputs.push_back(tensor_buffer);
+ }
+ }
+
+ ret = engine->GetOutputTensorBuffers(outputs);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ if (outputs.empty()) {
+ inference_engine_layer_property output_property;
+ ret = engine->GetOutputLayerProperty(output_property);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ // If backend is OpenCV then the buffers will be allocated out of this function.
+ if (output_property.tensor_infos.empty()) {
+ return INFERENCE_ENGINE_ERROR_NONE;
+ }
+
+ for (int i = 0; i < (int)output_property.tensor_infos.size(); ++i) {
+ inference_engine_tensor_info tensor_info = output_property.tensor_infos[i];
+ inference_engine_tensor_buffer tensor_buffer;
+ if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
+ tensor_buffer.buffer = (void *)(new float[tensor_info.size]);
+ tensor_buffer.size = tensor_info.size * 4;
+ } else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
+ tensor_buffer.buffer = (void *)(new unsigned char[tensor_info.size]);
+ tensor_buffer.size = tensor_info.size;
+ }
+
+ EXPECT_TRUE(tensor_buffer.buffer);
+ tensor_buffer.owner_is_backend = 0;
+ tensor_buffer.data_type = tensor_info.data_type;
+ outputs.push_back(tensor_buffer);
+ }
+ }
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+void CleanupTensorBuffers(std::vector<inference_engine_tensor_buffer> &inputs, std::vector<inference_engine_tensor_buffer> &outputs)
+{
+ if (!inputs.empty()) {
+ std::vector<inference_engine_tensor_buffer>::iterator iter;
+ for (iter = inputs.begin(); iter != inputs.end(); iter++) {
+ inference_engine_tensor_buffer tensor_buffer = *iter;
+
+ // If tensor buffer owner is a backend then skip to release the tensor buffer.
+ // This tensor buffer will be released by the backend.
+ if (tensor_buffer.owner_is_backend) {
+ continue;
+ }
+
+ if (tensor_buffer.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32)
+ delete[] (float *)tensor_buffer.buffer;
+ else
+ delete[] (unsigned char *)tensor_buffer.buffer;
+ }
+ std::vector<inference_engine_tensor_buffer>().swap(inputs);
+ }
+
+ if (!outputs.empty()) {
+ std::vector<inference_engine_tensor_buffer>::iterator iter;
+ for (iter = outputs.begin(); iter != outputs.end(); iter++) {
+ inference_engine_tensor_buffer tensor_buffer = *iter;
+
+ // If tensor buffer owner is a backend then skip to release the tensor buffer.
+ // This tensor buffer will be released by the backend.
+ if (tensor_buffer.owner_is_backend) {
+ continue;
+ }
+
+ if (tensor_buffer.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32)
+ delete[] (float *)tensor_buffer.buffer;
+ else
+ delete[] (unsigned char *)tensor_buffer.buffer;
+ }
+ std::vector<inference_engine_tensor_buffer>().swap(outputs);
+ }
+}
+
+void CopyFileToMemory(const char *file_name, inference_engine_tensor_buffer &buffer, unsigned int size)
+{
+ int fd = open(file_name, O_RDONLY);
+ if (fd == -1) {
+ ASSERT_NE(fd, -1);
+ return;
+ }
+
+ int num = read(fd, buffer.buffer, size);
+ if (num == -1) {
+ close(fd);
+ ASSERT_NE(num, -1);
+ return;
+ }
+
+ close(fd);
+}
+
+
+TEST_P(InferenceEngineTestCase_G2, Load_P)
+{
+ std::string backend_name;
+ int target_devices;
+ std::vector<std::string> model_paths;
+
+ std::tie(backend_name, target_devices, model_paths) = GetParam();
+
+ std::cout <<"Load test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
+
+ inference_engine_config config = {
+ .backend_name = backend_name,
+ .target_devices = target_devices
+ };
+
+ InferenceEngineCommon *engine = new InferenceEngineCommon(&config);
+ ASSERT_TRUE(engine);
+
+ int ret = engine->BindBackend(&config);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ inference_engine_capacity capacity;
+ ret = engine->GetBackendCapacity(&capacity);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ ret = engine->SetTargetDevices(target_devices);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ std::vector <std::string> models;
+ int model_type = GetModelInfo(model_paths, models);
+ if (model_type == -1) {
+ delete engine;
+ ASSERT_NE(model_type, -1);
+ return;
+ }
+
+ ret = engine->Load(models, (inference_model_format_e)model_type);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ engine->UnbindBackend();
+
+ delete engine;
+}
+
+void FillOutputResult(InferenceEngineCommon *engine, std::vector<inference_engine_tensor_buffer> &outputs, tensor_t &outputData)
+{
+ inference_engine_layer_property property;
+ int ret = engine->GetOutputLayerProperty(property);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ for (int i = 0; i < (int)property.tensor_infos.size(); ++i) {
+ inference_engine_tensor_info tensor_info = property.tensor_infos[i];
+
+ std::vector<int> tmpDimInfo;
+ for (int i = 0; i < (int)tensor_info.shape.size(); i++) {
+ tmpDimInfo.push_back(tensor_info.shape[i]);
+ }
+
+ outputData.dimInfo.push_back(tmpDimInfo);
+
+ // Normalize output tensor data converting it to float type in case of quantized model.
+ if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
+ unsigned char *ori_buf = (unsigned char *)outputs[i].buffer;
+ float *new_buf = new float[tensor_info.size];
+ ASSERT_TRUE(new_buf);
+
+ for (int j = 0; j < (int)tensor_info.size; j++) {
+ new_buf[j] = (float)ori_buf[j] / 255.0f;
+ }
+
+ // replace original buffer with new one, and release origin one.
+ outputs[i].buffer = new_buf;
+ if (!outputs[i].owner_is_backend) {
+ delete[] ori_buf;
+ }
+ }
+
+ outputData.data.push_back((void *)outputs[i].buffer);
+ }
+}
+
+int VerifyImageClassificationResults(tensor_t &outputData, int answer)
+{
+ std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
+ std::vector<void*> inferResults(outputData.data.begin(), outputData.data.end());
+
+ int idx = -1;
+ int count = inferDimInfo[0][1];
+ float value = 0.0f;
+
+ float *prediction = reinterpret_cast<float*>(inferResults[0]);
+ for (int i = 0; i < count; ++i) {
+ if (value < prediction[i]) {
+ value = prediction[i];
+ idx = i;
+ }
+ }
+
+ return idx == answer;
+}
+
+TEST_P(InferenceEngineTestCase_G3, Inference)
+{
+ std::string backend_name;
+ int target_devices;
+ int test_type;
+ int iteration;
+ int tensor_type;
+ std::vector<std::string> image_paths;
+ size_t height;
+ size_t width;
+ size_t ch;
+ std::vector<std::string> input_layers;
+ std::vector<std::string> output_layers;
+ std::vector<std::string> model_paths;
+ std::vector<int> answers;
+
+ std::tie(backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers) = GetParam();
+
+ if (iteration < 1) {
+ iteration = 1;
+ }
+
+ std::string test_name;
+ switch (test_type) {
+ case TEST_IMAGE_CLASSIFICATION:
+ test_name.append("Image classification");
+ break;
+ default:
+ return;
+ }
+
+ std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
+ inference_engine_config config = {
+ .backend_name = backend_name,
+ .target_devices = target_devices
+ };
+
+ InferenceEngineCommon *engine = new InferenceEngineCommon(&config);
+ if (engine == nullptr) {
+ ASSERT_TRUE(engine);
+ return;
+ }
+
+ int ret = engine->BindBackend(&config);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ inference_engine_capacity capacity;
+ ret = engine->GetBackendCapacity(&capacity);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ ret = engine->SetTargetDevices(target_devices);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ std::vector <std::string> models;
+ int model_type = GetModelInfo(model_paths, models);
+ if (model_type == -1) {
+ delete engine;
+ ASSERT_NE(model_type, -1);
+ return;
+ }
+
+ inference_engine_layer_property input_property;
+ std::vector<std::string>::iterator iter;
+
+ for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
+ inference_engine_tensor_info tensor_info = {
+ { 1, ch, height, width },
+ (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
+ (inference_tensor_data_type_e)tensor_type,
+ (size_t)(1 * ch * height * width)
+ };
+
+ input_property.layer_names.push_back(*iter);
+ input_property.tensor_infos.push_back(tensor_info);
+ }
+
+ ret = engine->SetInputLayerProperty(input_property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ inference_engine_layer_property output_property;
+
+ for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
+ output_property.layer_names.push_back(*iter);
+ }
+
+ ret = engine->SetOutputLayerProperty(output_property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ ret = engine->Load(models, (inference_model_format_e)model_type);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ std::vector<inference_engine_tensor_buffer> inputs, outputs;
+ ret = PrepareTensorBuffers(engine, inputs, outputs);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete engine;
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ return;
+ }
+
+ // Copy input image tensor data from a given file to input tensor buffer.
+ for (int i = 0; i < (int)image_paths.size(); ++i) {
+ CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
+ }
+
+ for (int repeat = 0; repeat < iteration; ++repeat) {
+ ret = engine->Run(inputs, outputs);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ }
+
+ tensor_t result;
+ FillOutputResult(engine, outputs, result);
+
+ ret = VerifyImageClassificationResults(result, answers[0]);
+ EXPECT_EQ(ret, 1);
+
+ CleanupTensorBuffers(inputs, outputs);
+
+ engine->UnbindBackend();
+ models.clear();
+
+ delete engine;
+}
+
+INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G1,
+ testing::Values(
+ // parameter order : backend name, target device
+ // ARMNN.
+ ParamType_Bind("armnn", INFERENCE_TARGET_CPU),
+ // TFLITE.
+ ParamType_Bind("tflite", INFERENCE_TARGET_CPU)
+ /* TODO */
+
+ )
+);
+
+INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G2,
+ testing::Values(
+ // parameter order : backend name, target device, model path/s
+ // mobilenet based image classification model loading test
+ // ARMNN.
+ ParamType_Load("armnn", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
+ // TFLITE.
+ ParamType_Load("tflite", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" })
+ /* TODO */
+ )
+);
+
+INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G3,
+ testing::Values(
+ // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+ // mobilenet based image classification test
+ // ARMNN.
+ ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
+ // TFLITE.
+ ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 })
+ /* TODO */
+ )
+);
using namespace InferenceEngineInterface::Common;
-typedef std::tuple<std::string, int> ParamType;
-typedef std::tuple<std::string, int, std::vector<std::string>> ParamType_Load;
typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>, int, int, int, std::vector<std::string>, std::vector<std::string>, std::vector<std::string>, std::vector<int>> ParamType_Infer;
-class InferenceEngineCommonTest : public testing::TestWithParam<ParamType> { };
-class InferenceEngineCommonTest_2 : public testing::TestWithParam<ParamType_Load> { };
class InferenceEngineTfliteTest : public testing::TestWithParam<ParamType_Infer> { };
class InferenceEngineCaffeTest : public testing::TestWithParam<ParamType_Infer> { };
class InferenceEngineDldtTest : public testing::TestWithParam<ParamType_Infer> { };
TEST_POSE_ESTIMATION
};
-TEST_P(InferenceEngineCommonTest, Bind)
-{
- std::string backend_name;
- int target_devices;
-
- std::tie(backend_name, target_devices) = GetParam();
-
- std::cout <<"Bind test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
-
- inference_engine_config config = {
- .backend_name = backend_name,
- .target_devices = target_devices
- };
-
- InferenceEngineCommon *engine = new InferenceEngineCommon(&config);
- ASSERT_TRUE(engine);
-
- int ret = engine->BindBackend(&config);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- inference_engine_capacity capacity;
- ret = engine->GetBackendCapacity(&capacity);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
- engine->UnbindBackend();
-
- delete engine;
-}
-
int GetModelInfo(std::vector <std::string> &model_paths, std::vector<std::string> &models)
{
std::string model_path = model_paths[0];
close(fd);
}
-
-TEST_P(InferenceEngineCommonTest_2, Load)
-{
- std::string backend_name;
- int target_devices;
- std::vector<std::string> model_paths;
-
- std::tie(backend_name, target_devices, model_paths) = GetParam();
-
- std::cout <<"Load test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
-
- inference_engine_config config = {
- .backend_name = backend_name,
- .target_devices = target_devices
- };
-
- InferenceEngineCommon *engine = new InferenceEngineCommon(&config);
- ASSERT_TRUE(engine);
-
- int ret = engine->BindBackend(&config);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete engine;
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- inference_engine_capacity capacity;
- ret = engine->GetBackendCapacity(&capacity);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
- ret = engine->SetTargetDevices(target_devices);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
- std::vector <std::string> models;
- int model_type = GetModelInfo(model_paths, models);
- if (model_type == -1) {
- delete engine;
- ASSERT_NE(model_type, -1);
- return;
- }
-
- ret = engine->Load(models, (inference_model_format_e)model_type);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
- engine->UnbindBackend();
-
- delete engine;
-}
-
void FillOutputResult(InferenceEngineCommon *engine, std::vector<inference_engine_tensor_buffer> &outputs, tensor_t &outputData)
{
inference_engine_layer_property property;
// replace original buffer with new one, and release origin one.
outputs[i].buffer = new_buf;
if (!outputs[i].owner_is_backend) {
- delete[] ori_buf;
+ delete[] ori_buf;
}
}
delete engine;
}
-INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest,
- testing::Values(
- // parameter order : backend name, target device
- // ARMNN.
- ParamType("armnn", INFERENCE_TARGET_CPU),
- ParamType("armnn", INFERENCE_TARGET_GPU),
- // TFLITE.
- ParamType("tflite", INFERENCE_TARGET_CPU),
- ParamType("tflite", INFERENCE_TARGET_GPU),
- // DLDT.
- ParamType("dldt", INFERENCE_TARGET_CUSTOM),
- // OPENCV.
- ParamType("opencv", INFERENCE_TARGET_CPU),
- ParamType("opencv", INFERENCE_TARGET_GPU)
- /* TODO */
-
- )
-);
-
-INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest_2,
- testing::Values(
- // parameter order : backend name, target device, model path/s
- // mobilenet based image classification model loading test
- // ARMNN.
- ParamType_Load("armnn", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
- ParamType_Load("armnn", INFERENCE_TARGET_GPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
- // TFLITE.
- ParamType_Load("tflite", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
- ParamType_Load("tflite", INFERENCE_TARGET_GPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
- // DLDT.
- ParamType_Load("dldt", INFERENCE_TARGET_CUSTOM, { "/usr/share/capi-media-vision/models/IC/dldt/googlenet-v1.xml", "/usr/share/capi-media-vision/models/IC/dldt/googlenet-v1.bin" }),
- // OPENCV.
- ParamType_Load("opencv", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel", "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" }),
- ParamType_Load("opencv", INFERENCE_TARGET_GPU, { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel", "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" })
- /* TODO */
- )
-);
INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTfliteTest,
testing::Values(