Change a backend type from VIVANTE to MLAPI
authorInki Dae <inki.dae@samsung.com>
Tue, 2 Jun 2020 09:13:04 +0000 (18:13 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 2 Jun 2020 09:13:04 +0000 (18:13 +0900)
Signed-off-by: Inki Dae <inki.dae@samsung.com>
src/inference_engine_nnstreamer.cpp

index e3ad84a..ddfb784 100644 (file)
@@ -57,7 +57,7 @@ int InferenceMLAPI::SetPluginType(int type)
 {
     LOGI("ENTER");
 
-       if (INFERENCE_BACKEND_NNFW != type && INFERENCE_BACKEND_VIVANTE != type) {
+       if (INFERENCE_BACKEND_NNFW != type && INFERENCE_BACKEND_MLAPI != type) {
                LOGE("Invalid backend type.");
                return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
        }
@@ -111,10 +111,18 @@ int InferenceMLAPI::Load(std::vector<std::string> model_paths, inference_model_f
        ml_nnfw_hw_e nnfw_hw;
 
        switch (mPluginType) {
-       case INFERENCE_BACKEND_VIVANTE:
-               nnfw_type = ML_NNFW_TYPE_VIVANTE;
-               nnfw_hw = ML_NNFW_HW_ANY;
-               LOGI("Vivante tensor filter will be used.");
+       case INFERENCE_BACKEND_MLAPI:
+               // For now, backend type is MLAPI and target device type is CUSTOM then
+               // we will use Vivante NPU.
+               // TODO. other NPU should be considered later. I.e., SRNPU.
+               if ((mTargetDevice & INFERENCE_TARGET_CUSTOM) == INFERENCE_TARGET_CUSTOM) {
+                       nnfw_type = ML_NNFW_TYPE_VIVANTE;
+                       nnfw_hw = ML_NNFW_HW_ANY;
+                       LOGI("Vivante tensor filter will be used.");
+               } else {
+                       LOGE("Invalid target device type.");
+                       return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+               }
                break;
        case INFERENCE_BACKEND_NNFW:
                nnfw_type = ML_NNFW_TYPE_NNFW;