From a460f46a80290d6419b4bea74843fff35a9ea98c Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Tue, 2 Jun 2020 18:13:04 +0900 Subject: [PATCH] Change a backend type from VIVANTE to MLAPI Signed-off-by: Inki Dae --- src/inference_engine_nnstreamer.cpp | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/src/inference_engine_nnstreamer.cpp b/src/inference_engine_nnstreamer.cpp index e3ad84a..ddfb784 100644 --- a/src/inference_engine_nnstreamer.cpp +++ b/src/inference_engine_nnstreamer.cpp @@ -57,7 +57,7 @@ int InferenceMLAPI::SetPluginType(int type) { LOGI("ENTER"); - if (INFERENCE_BACKEND_NNFW != type && INFERENCE_BACKEND_VIVANTE != type) { + if (INFERENCE_BACKEND_NNFW != type && INFERENCE_BACKEND_MLAPI != type) { LOGE("Invalid backend type."); return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; } @@ -111,10 +111,18 @@ int InferenceMLAPI::Load(std::vector model_paths, inference_model_f ml_nnfw_hw_e nnfw_hw; switch (mPluginType) { - case INFERENCE_BACKEND_VIVANTE: - nnfw_type = ML_NNFW_TYPE_VIVANTE; - nnfw_hw = ML_NNFW_HW_ANY; - LOGI("Vivante tensor filter will be used."); + case INFERENCE_BACKEND_MLAPI: + // For now, backend type is MLAPI and target device type is CUSTOM then + // we will use Vivante NPU. + // TODO. other NPU should be considered later. I.e., SRNPU. + if ((mTargetDevice & INFERENCE_TARGET_CUSTOM) == INFERENCE_TARGET_CUSTOM) { + nnfw_type = ML_NNFW_TYPE_VIVANTE; + nnfw_hw = ML_NNFW_HW_ANY; + LOGI("Vivante tensor filter will be used."); + } else { + LOGE("Invalid target device type."); + return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; + } break; case INFERENCE_BACKEND_NNFW: nnfw_type = ML_NNFW_TYPE_NNFW; -- 2.34.1