MV_INFERENCE_BACKEND_NONE = -1, /**< None */
MV_INFERENCE_BACKEND_OPENCV, /**< OpenCV */
MV_INFERENCE_BACKEND_TFLITE, /**< TensorFlow-Lite */
+ MV_INFERENCE_BACKEND_ARMNN, /**< ARMNN */
MV_INFERENCE_BACKEND_MAX /**< Backend MAX */
} mv_inference_backend_type_e;
mBackend = NULL;
mSupportedInferenceBackend.insert(std::make_pair(MV_INFERENCE_BACKEND_OPENCV, std::make_pair("opencv", false)));
mSupportedInferenceBackend.insert(std::make_pair(MV_INFERENCE_BACKEND_TFLITE, std::make_pair("tflite", false)));
+ mSupportedInferenceBackend.insert(std::make_pair(MV_INFERENCE_BACKEND_ARMNN, std::make_pair("armnn", false)));
CheckSupportedInferenceBackend();
}
if ( (backendType > MV_INFERENCE_BACKEND_NONE && backendType < MV_INFERENCE_BACKEND_MAX)
- && (backendType != MV_INFERENCE_BACKEND_TFLITE)) {
+ && (backendType != MV_INFERENCE_BACKEND_TFLITE) && (backendType != MV_INFERENCE_BACKEND_ARMNN)) {
if ( access(modelConfigFilePath, F_OK)) {
LOGE("modelConfigFilePath in [%s] ", modelConfigFilePath);
ret = MEDIA_VISION_ERROR_INVALID_PATH;