From: Inki Dae Date: Tue, 2 Jun 2020 09:13:04 +0000 (+0900) Subject: Change a backend type from VIVANTE to MLAPI X-Git-Tag: submit/tizen/20200626.050805~9 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=a460f46a80290d6419b4bea74843fff35a9ea98c;p=platform%2Fcore%2Fmultimedia%2Finference-engine-mlapi.git Change a backend type from VIVANTE to MLAPI Signed-off-by: Inki Dae --- diff --git a/src/inference_engine_nnstreamer.cpp b/src/inference_engine_nnstreamer.cpp index e3ad84a..ddfb784 100644 --- a/src/inference_engine_nnstreamer.cpp +++ b/src/inference_engine_nnstreamer.cpp @@ -57,7 +57,7 @@ int InferenceMLAPI::SetPluginType(int type) { LOGI("ENTER"); - if (INFERENCE_BACKEND_NNFW != type && INFERENCE_BACKEND_VIVANTE != type) { + if (INFERENCE_BACKEND_NNFW != type && INFERENCE_BACKEND_MLAPI != type) { LOGE("Invalid backend type."); return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; } @@ -111,10 +111,18 @@ int InferenceMLAPI::Load(std::vector model_paths, inference_model_f ml_nnfw_hw_e nnfw_hw; switch (mPluginType) { - case INFERENCE_BACKEND_VIVANTE: - nnfw_type = ML_NNFW_TYPE_VIVANTE; - nnfw_hw = ML_NNFW_HW_ANY; - LOGI("Vivante tensor filter will be used."); + case INFERENCE_BACKEND_MLAPI: + // For now, backend type is MLAPI and target device type is CUSTOM then + // we will use Vivante NPU. + // TODO. other NPU should be considered later. I.e., SRNPU. + if ((mTargetDevice & INFERENCE_TARGET_CUSTOM) == INFERENCE_TARGET_CUSTOM) { + nnfw_type = ML_NNFW_TYPE_VIVANTE; + nnfw_hw = ML_NNFW_HW_ANY; + LOGI("Vivante tensor filter will be used."); + } else { + LOGE("Invalid target device type."); + return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; + } break; case INFERENCE_BACKEND_NNFW: nnfw_type = ML_NNFW_TYPE_NNFW;