*/
#include <inference_engine_error.h>
+#include "inference_engine_private_type.h"
#include "inference_engine_mlapi_private.h"
#include <fstream>
inference_backend_type_e type =
*(static_cast<inference_backend_type_e *>(data));
- if (INFERENCE_BACKEND_NONE >= type || INFERENCE_BACKEND_MAX <= type) {
+ if (INFERENCE_BACKEND_NONE >= type || INFERENCE_BACKEND_MAX <= type ||
+ INFERENCE_BACKEND_OPENCV == type) {
LOGE("Invalid backend type.");
return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
}
ml_nnfw_hw_e nnfw_hw;
switch (mPluginType) {
- case INFERENCE_BACKEND_MLAPI:
- // For now, backend type is MLAPI and target device type is CUSTOM then
- // we will use Vivante NPU.
- // TODO. other NPU should be considered later. I.e., SRNPU.
- if ((mTargetDevice & INFERENCE_TARGET_CUSTOM) ==
- INFERENCE_TARGET_CUSTOM) {
- nnfw_type = ML_NNFW_TYPE_VIVANTE;
- nnfw_hw = ML_NNFW_HW_ANY;
- LOGI("Vivante tensor filter will be used.");
- } else {
- LOGE("Invalid target device type.");
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
- }
+ case INFERENCE_BACKEND_NPU_VIVANTE:
+ nnfw_type = ML_NNFW_TYPE_VIVANTE;
+ nnfw_hw = ML_NNFW_HW_ANY;
+ LOGI("Vivante tensor filter will be used.");
if (access(model_str.c_str(), R_OK) ||
access(model_paths[1].c_str(), R_OK)) {
case INFERENCE_BACKEND_ONE:
case INFERENCE_BACKEND_ARMNN:
case INFERENCE_BACKEND_TFLITE:
- if (mPluginType == INFERENCE_BACKEND_ONE)
+ if (mPluginType == INFERENCE_BACKEND_ONE) {
nnfw_type = ML_NNFW_TYPE_NNFW;
- if (mPluginType == INFERENCE_BACKEND_ARMNN)
+
+ if (mTargetDevice == INFERENCE_TARGET_CPU) {
+ nnfw_hw = ML_NNFW_HW_CPU_NEON;
+ LOGI("Target device is NEON.");
+ } else if (mTargetDevice == INFERENCE_TARGET_GPU) {
+ nnfw_hw = ML_NNFW_HW_GPU;
+ LOGI("Target device is GPU");
+ } else {
+ LOGE("Invalid inference target device type.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ LOGI("NNFW tensor filter will be used.");
+ }
+
+ if (mPluginType == INFERENCE_BACKEND_ARMNN) {
nnfw_type = ML_NNFW_TYPE_ARMNN;
- if (mPluginType == INFERENCE_BACKEND_TFLITE)
+ LOGI("ARMNN tensor filter will be used.");
+ }
+
+ if (mPluginType == INFERENCE_BACKEND_TFLITE) {
nnfw_type = ML_NNFW_TYPE_TENSORFLOW_LITE;
+ LOGI("TFLITE tensor filter will be used.");
+ }
if (access(model_str.c_str(), R_OK)) {
LOGE("model file path in [%s]", model_str.c_str());
return INFERENCE_ENGINE_ERROR_INVALID_PATH;
}
- if (mTargetDevice == INFERENCE_TARGET_CPU) {
- nnfw_hw = ML_NNFW_HW_CPU_NEON;
- LOGI("Target device is NEON.");
- } else if (mTargetDevice == INFERENCE_TARGET_GPU) {
- nnfw_hw = ML_NNFW_HW_GPU;
- LOGI("Target device is GPU");
- } else {
- LOGE("Invalid inference target device type.");
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
- }
-
- LOGI("NNFW tensor filter will be used.");
break;
// TODO.
default:
}
// TODO. flag supported accel device types according to a given ML Single API of nnstreamer backend.
- if (mPluginType == INFERENCE_BACKEND_MLAPI) {
+ if (mPluginType == INFERENCE_BACKEND_NPU_VIVANTE) {
capacity->supported_accel_devices = INFERENCE_TARGET_CUSTOM;
} else {
capacity->supported_accel_devices = INFERENCE_TARGET_GPU |