MV_INFERENCE_BACKEND_ARMNN, /**< ARMNN (Since 6.0) */
MV_INFERENCE_BACKEND_MLAPI, /**< ML Single API of NNStreamer (Since 6.0) */
MV_INFERENCE_BACKEND_ONE, /**< On-device Neural Engine (Since 6.0) */
+ MV_INFERENCE_BACKEND_NNTRAINER, /**< NNTrainer (Since 7.0) */
+ MV_INFERENCE_BACKEND_SNPE, /**< SNPE Engine (Since 7.0) */
MV_INFERENCE_BACKEND_MAX /**< Backend MAX */
} mv_inference_backend_type_e;
{
LOGI("ENTER");
+ // Mediavision can support several inference engines via ML Single API
+ // "mlapi" means that the inference backend is used via ML Single API.
mSupportedInferenceBackend.insert(std::make_pair(
MV_INFERENCE_BACKEND_OPENCV, std::make_pair("opencv", false)));
mSupportedInferenceBackend.insert(std::make_pair(
MV_INFERENCE_BACKEND_MLAPI, std::make_pair("mlapi", false)));
mSupportedInferenceBackend.insert(std::make_pair(
MV_INFERENCE_BACKEND_ONE, std::make_pair("mlapi", false)));
+ mSupportedInferenceBackend.insert(std::make_pair(
+ MV_INFERENCE_BACKEND_SNPE, std::make_pair("mlapi", false)));
CheckSupportedInferenceBackend();
std::make_pair<std::string, int>("onnx", INFERENCE_MODEL_ONNX));
mModelFormats.insert(std::make_pair<std::string, int>(
"nb", INFERENCE_MODEL_VIVANTE));
+ mModelFormats.insert(std::make_pair<std::string, int>(
+ "dlc", INFERENCE_MODEL_SNPE));
LOGI("LEAVE");
}
break;
case INFERENCE_MODEL_TFLITE:
case INFERENCE_MODEL_TORCH:
+ case INFERENCE_MODEL_SNPE:
models.push_back(mConfig.mWeightFilePath);
break;
default: