#include "inference_engine_common_impl.h"
#include "inference_engine_test_common.h"
+static std::map<std::string, int> BackendTypes
+ = {
+ { "opencv", INFERENCE_BACKEND_OPENCV },
+ { "tflite-native", INFERENCE_BACKEND_TFLITE_NATIVE },
+ { "tflite", INFERENCE_BACKEND_TFLITE },
+ { "armnn", INFERENCE_BACKEND_ARMNN },
+ { "one", INFERENCE_BACKEND_ONE },
+ { "vivante", INFERENCE_BACKEND_VIVANTE }
+ };
+
typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>,
int, int, int, std::vector<std::string>,
std::vector<std::string>, std::vector<std::string>,
return;
}
+ std::map<std::string, int>::iterator type = BackendTypes.find(backend_name);
+ if (type == BackendTypes.end()) {
+ ASSERT_TRUE(0);
+ return;
+ }
+
+ config.backend_type = type->second;
+
ret = engine->BindBackend(&config);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
return;
}
+
+ std::map<std::string, int>::iterator type = BackendTypes.find(backend_name);
+ if (type == BackendTypes.end()) {
+ ASSERT_TRUE(0);
+ return;
+ }
+
+ config.backend_type = type->second;
+
ret = engine->BindBackend(&config);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
return;
}
+ std::map<std::string, int>::iterator type = BackendTypes.find(backend_name);
+ if (type == BackendTypes.end()) {
+ ASSERT_TRUE(0);
+ return;
+ }
+
+ config.backend_type = type->second;
+
ret = engine->BindBackend(&config);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
// parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
// mobilenet based image classification test
// TFLITE.
+ ParamType_Infer(
+ "tflite-native", INFERENCE_TARGET_CPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ ParamType_Infer(
+ "tflite-native", INFERENCE_TARGET_CPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ // quantized mobilenet based image classification test
+ ParamType_Infer(
+ "tflite-native", INFERENCE_TARGET_CPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_UINT8,
+ { "/opt/usr/images/image_classification_q.bin" }, 224,
+ 224, 3, { "input" },
+ { "MobilenetV1/Predictions/Reshape_1" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" },
+ { 955 }),
+ ParamType_Infer(
+ "tflite-native", INFERENCE_TARGET_CPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_UINT8,
+ { "/opt/usr/images/image_classification_q.bin" }, 224,
+ 224, 3, { "input" },
+ { "MobilenetV1/Predictions/Reshape_1" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" },
+ { 955 }),
+ // object detection test
+ ParamType_Infer(
+ "tflite-native", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+ { "normalized_input_image_tensor" },
+ { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" },
+ { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+ { 451, 474, 714, 969 }),
+ ParamType_Infer(
+ "tflite-native", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+ { "normalized_input_image_tensor" },
+ { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" },
+ { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+ { 451, 474, 714, 969 }),
+ // face detection test
+ ParamType_Infer(
+ "tflite-native", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+ { "normalized_input_image_tensor" },
+ { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" },
+ { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+ { 727, 225, 960, 555 }),
+ ParamType_Infer(
+ "tflite-native", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+ { "normalized_input_image_tensor" },
+ { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" },
+ { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+ { 727, 225, 960, 555 }),
+ // pose estimation test
+ ParamType_Infer(
+ "tflite-native", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
+ { "image" },
+ { "Convolutional_Pose_Machine/stage_5_out" },
+ { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+ { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
+ 351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
+ 123, 99, 287, 381, 451, 287, 381, 475 }),
+ ParamType_Infer(
+ "tflite-native", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
+ { "image" },
+ { "Convolutional_Pose_Machine/stage_5_out" },
+ { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+ { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
+ 351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
+ 123, 99, 287, 381, 451, 287, 381, 475 }),
+ // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+ // mobilenet based image classification test
+ // TFLITE(through MLAPI).
ParamType_Infer(
"tflite", INFERENCE_TARGET_CPU,
TEST_IMAGE_CLASSIFICATION, 10,
{ "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
{ 3 }),
ParamType_Infer(
- "tflite", INFERENCE_TARGET_GPU,
+ "tflite", INFERENCE_TARGET_CPU,
TEST_IMAGE_CLASSIFICATION, 10,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
{ "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" },
{ 955 }),
ParamType_Infer(
- "tflite", INFERENCE_TARGET_GPU,
+ "tflite", INFERENCE_TARGET_CPU,
TEST_IMAGE_CLASSIFICATION, 10,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" }, 224,
{ "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
{ 451, 474, 714, 969 }),
ParamType_Infer(
- "tflite", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+ "tflite", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
{ 727, 225, 960, 555 }),
ParamType_Infer(
- "tflite", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+ "tflite", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
ParamType_Infer(
- "tflite", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
+ "tflite", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "image" },
INFERENCE_ENGINE_PROFILER_MAX
};
+static std::map<std::string, int> BackendTypes
+ = {
+ { "opencv", INFERENCE_BACKEND_OPENCV },
+ { "tflite-native", INFERENCE_BACKEND_TFLITE_NATIVE },
+ { "armnn", INFERENCE_BACKEND_ARMNN },
+ { "tflite", INFERENCE_BACKEND_TFLITE },
+ { "one", INFERENCE_BACKEND_ONE },
+ { "vivante", INFERENCE_BACKEND_VIVANTE }
+ };
+
typedef std::tuple<std::string> ParamType_One;
typedef std::tuple<std::string, int> ParamType_Two;
typedef std::tuple<std::string, int, std::vector<std::string> > ParamType_Three;
int &target_devices) -> int {
inference_engine_config config = { backend_name, 0, target_devices };
- // backend_type is valid only in case backend_name is "mlapi".
- if (!backend_name.compare("mlapi")) {
- if (!(target_devices & INFERENCE_TARGET_CUSTOM))
- config.backend_type = INFERENCE_BACKEND_ONE;
- else
- config.backend_type = INFERENCE_BACKEND_MLAPI;
+ std::map<std::string, int>::iterator type = BackendTypes.find(backend_name);
+ if (type == BackendTypes.end()) {
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
+ config.backend_type = type->second;
+
int ret = engine->BindBackend(&config);
if (ret != INFERENCE_ENGINE_ERROR_NONE)
return ret;
// OPENCV
ParamType_One("opencv"),
// ML Single API for NNStreamer.
- ParamType_One("mlapi")
+ ParamType_One("one")
/* TODO */
));
// OPENCV.
ParamType_Two("opencv", INFERENCE_TARGET_CPU),
// ML Single API for NNStreamer.
- ParamType_Two("mlapi", INFERENCE_TARGET_CPU)
+ ParamType_Two("one", INFERENCE_TARGET_CPU)
/* TODO */
));
"/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" }),
// ML Single API for NNStreamer.
ParamType_Three(
- "mlapi", INFERENCE_TARGET_CPU,
+ "one", INFERENCE_TARGET_CPU,
{ "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_model.tflite" })
/* TODO */
));
ParamType_Six("opencv", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224,
224, 3, { "test_name" }),
// ML Single API for NNStreamer.
- ParamType_Six("mlapi", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
+ ParamType_Six("one", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
224, 3, { "test_name" })
/* TODO */
));
ParamType_Six("opencv", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224,
224, 3, { "" }),
// ML Single API for NNStreamer.
- ParamType_Six("mlapi", -1, 224, 224, 3, { "test_name" }),
- ParamType_Six("mlapi", INFERENCE_TENSOR_DATA_TYPE_UINT8, 0,
+ ParamType_Six("one", -1, 224, 224, 3, { "test_name" }),
+ ParamType_Six("one", INFERENCE_TENSOR_DATA_TYPE_UINT8, 0,
224, 3, { "test_name" }),
- ParamType_Six("mlapi", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
+ ParamType_Six("one", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
0, 3, { "test_name" }),
- ParamType_Six("mlapi", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
+ ParamType_Six("one", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
224, 0, { "test_name" }),
- ParamType_Six("mlapi", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
+ ParamType_Six("one", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
224, 3, { "" })
/* TODO */
));
{ 281 }),
// ML Single API for NNStreamer.
ParamType_Many(
- "mlapi", INFERENCE_ENGINE_PROFILER_OFF,
+ "one", INFERENCE_ENGINE_PROFILER_OFF,
INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" },
{ 281 }),
// ML Single API for NNStreamer.
ParamType_Many(
- "mlapi", INFERENCE_ENGINE_PROFILER_FILE,
+ "one", INFERENCE_ENGINE_PROFILER_FILE,
INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" },
{ 281 }),
// ML Single API for NNStreamer.
ParamType_Many(
- "mlapi", INFERENCE_ENGINE_PROFILER_CONSOLE,
+ "one", INFERENCE_ENGINE_PROFILER_CONSOLE,
INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" },
// ML Single API for NNStreamer with On-device Neural Engine tensor filter.
ParamType_One_Int(INFERENCE_BACKEND_ONE),
// ML Single API for NNStreamer with Vinvante NPU.
- ParamType_One_Int(INFERENCE_BACKEND_MLAPI)
+ ParamType_One_Int(INFERENCE_BACKEND_VIVANTE)
/* TODO */
));