From: Inki Dae Date: Wed, 14 Oct 2020 06:38:48 +0000 (+0900) Subject: Update various tensor filters support X-Git-Tag: submit/tizen/20201104.021236~2 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=d7871b14f803a8d44e1ac835e49ce17131b05275;p=platform%2Fcore%2Fmultimedia%2Finference-engine-mlapi.git Update various tensor filters support Change-Id: I2ea104cae60ba5a9049fcc8eaa1b0ec78a220112 Signed-off-by: Inki Dae --- diff --git a/src/inference_engine_mlapi.cpp b/src/inference_engine_mlapi.cpp index d683b0e..7f10204 100644 --- a/src/inference_engine_mlapi.cpp +++ b/src/inference_engine_mlapi.cpp @@ -15,6 +15,7 @@ */ #include +#include "inference_engine_private_type.h" #include "inference_engine_mlapi_private.h" #include @@ -70,7 +71,8 @@ namespace MLAPIImpl inference_backend_type_e type = *(static_cast(data)); - if (INFERENCE_BACKEND_NONE >= type || INFERENCE_BACKEND_MAX <= type) { + if (INFERENCE_BACKEND_NONE >= type || INFERENCE_BACKEND_MAX <= type || + INFERENCE_BACKEND_OPENCV == type) { LOGE("Invalid backend type."); return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; } @@ -120,19 +122,10 @@ namespace MLAPIImpl ml_nnfw_hw_e nnfw_hw; switch (mPluginType) { - case INFERENCE_BACKEND_MLAPI: - // For now, backend type is MLAPI and target device type is CUSTOM then - // we will use Vivante NPU. - // TODO. other NPU should be considered later. I.e., SRNPU. - if ((mTargetDevice & INFERENCE_TARGET_CUSTOM) == - INFERENCE_TARGET_CUSTOM) { - nnfw_type = ML_NNFW_TYPE_VIVANTE; - nnfw_hw = ML_NNFW_HW_ANY; - LOGI("Vivante tensor filter will be used."); - } else { - LOGE("Invalid target device type."); - return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; - } + case INFERENCE_BACKEND_NPU_VIVANTE: + nnfw_type = ML_NNFW_TYPE_VIVANTE; + nnfw_hw = ML_NNFW_HW_ANY; + LOGI("Vivante tensor filter will be used."); if (access(model_str.c_str(), R_OK) || access(model_paths[1].c_str(), R_OK)) { @@ -148,30 +141,38 @@ namespace MLAPIImpl case INFERENCE_BACKEND_ONE: case INFERENCE_BACKEND_ARMNN: case INFERENCE_BACKEND_TFLITE: - if (mPluginType == INFERENCE_BACKEND_ONE) + if (mPluginType == INFERENCE_BACKEND_ONE) { nnfw_type = ML_NNFW_TYPE_NNFW; - if (mPluginType == INFERENCE_BACKEND_ARMNN) + + if (mTargetDevice == INFERENCE_TARGET_CPU) { + nnfw_hw = ML_NNFW_HW_CPU_NEON; + LOGI("Target device is NEON."); + } else if (mTargetDevice == INFERENCE_TARGET_GPU) { + nnfw_hw = ML_NNFW_HW_GPU; + LOGI("Target device is GPU"); + } else { + LOGE("Invalid inference target device type."); + return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; + } + + LOGI("NNFW tensor filter will be used."); + } + + if (mPluginType == INFERENCE_BACKEND_ARMNN) { nnfw_type = ML_NNFW_TYPE_ARMNN; - if (mPluginType == INFERENCE_BACKEND_TFLITE) + LOGI("ARMNN tensor filter will be used."); + } + + if (mPluginType == INFERENCE_BACKEND_TFLITE) { nnfw_type = ML_NNFW_TYPE_TENSORFLOW_LITE; + LOGI("TFLITE tensor filter will be used."); + } if (access(model_str.c_str(), R_OK)) { LOGE("model file path in [%s]", model_str.c_str()); return INFERENCE_ENGINE_ERROR_INVALID_PATH; } - if (mTargetDevice == INFERENCE_TARGET_CPU) { - nnfw_hw = ML_NNFW_HW_CPU_NEON; - LOGI("Target device is NEON."); - } else if (mTargetDevice == INFERENCE_TARGET_GPU) { - nnfw_hw = ML_NNFW_HW_GPU; - LOGI("Target device is GPU"); - } else { - LOGE("Invalid inference target device type."); - return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; - } - - LOGI("NNFW tensor filter will be used."); break; // TODO. default: @@ -591,7 +592,7 @@ namespace MLAPIImpl } // TODO. flag supported accel device types according to a given ML Single API of nnstreamer backend. - if (mPluginType == INFERENCE_BACKEND_MLAPI) { + if (mPluginType == INFERENCE_BACKEND_NPU_VIVANTE) { capacity->supported_accel_devices = INFERENCE_TARGET_CUSTOM; } else { capacity->supported_accel_devices = INFERENCE_TARGET_GPU |