Update various tensor filters support 50/246250/2
authorInki Dae <inki.dae@samsung.com>
Wed, 14 Oct 2020 06:38:48 +0000 (15:38 +0900)
committerInki Dae <inki.dae@samsung.com>
Wed, 28 Oct 2020 23:04:52 +0000 (08:04 +0900)
Change-Id: I2ea104cae60ba5a9049fcc8eaa1b0ec78a220112
Signed-off-by: Inki Dae <inki.dae@samsung.com>
src/inference_engine_mlapi.cpp

index d683b0e..7f10204 100644 (file)
@@ -15,6 +15,7 @@
  */
 
 #include <inference_engine_error.h>
+#include "inference_engine_private_type.h"
 #include "inference_engine_mlapi_private.h"
 
 #include <fstream>
@@ -70,7 +71,8 @@ namespace MLAPIImpl
                inference_backend_type_e type =
                                *(static_cast<inference_backend_type_e *>(data));
 
-               if (INFERENCE_BACKEND_NONE >= type || INFERENCE_BACKEND_MAX <= type) {
+               if (INFERENCE_BACKEND_NONE >= type || INFERENCE_BACKEND_MAX <= type ||
+                               INFERENCE_BACKEND_OPENCV == type) {
                        LOGE("Invalid backend type.");
                        return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
                }
@@ -120,19 +122,10 @@ namespace MLAPIImpl
                ml_nnfw_hw_e nnfw_hw;
 
                switch (mPluginType) {
-               case INFERENCE_BACKEND_MLAPI:
-                       // For now, backend type is MLAPI and target device type is CUSTOM then
-                       // we will use Vivante NPU.
-                       // TODO. other NPU should be considered later. I.e., SRNPU.
-                       if ((mTargetDevice & INFERENCE_TARGET_CUSTOM) ==
-                               INFERENCE_TARGET_CUSTOM) {
-                               nnfw_type = ML_NNFW_TYPE_VIVANTE;
-                               nnfw_hw = ML_NNFW_HW_ANY;
-                               LOGI("Vivante tensor filter will be used.");
-                       } else {
-                               LOGE("Invalid target device type.");
-                               return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
-                       }
+               case INFERENCE_BACKEND_NPU_VIVANTE:
+                       nnfw_type = ML_NNFW_TYPE_VIVANTE;
+                       nnfw_hw = ML_NNFW_HW_ANY;
+                       LOGI("Vivante tensor filter will be used.");
 
                        if (access(model_str.c_str(), R_OK) ||
                                        access(model_paths[1].c_str(), R_OK)) {
@@ -148,30 +141,38 @@ namespace MLAPIImpl
                case INFERENCE_BACKEND_ONE:
                case INFERENCE_BACKEND_ARMNN:
                case INFERENCE_BACKEND_TFLITE:
-                       if (mPluginType == INFERENCE_BACKEND_ONE)
+                       if (mPluginType == INFERENCE_BACKEND_ONE) {
                                nnfw_type = ML_NNFW_TYPE_NNFW;
-                       if (mPluginType == INFERENCE_BACKEND_ARMNN)
+
+                               if (mTargetDevice == INFERENCE_TARGET_CPU) {
+                                       nnfw_hw = ML_NNFW_HW_CPU_NEON;
+                                       LOGI("Target device is NEON.");
+                               } else if (mTargetDevice == INFERENCE_TARGET_GPU) {
+                                       nnfw_hw = ML_NNFW_HW_GPU;
+                                       LOGI("Target device is GPU");
+                               } else {
+                                       LOGE("Invalid inference target device type.");
+                                       return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+                               }
+
+                               LOGI("NNFW tensor filter will be used.");
+                       }
+
+                       if (mPluginType == INFERENCE_BACKEND_ARMNN) {
                                nnfw_type = ML_NNFW_TYPE_ARMNN;
-                       if (mPluginType == INFERENCE_BACKEND_TFLITE)
+                               LOGI("ARMNN tensor filter will be used.");
+                       }
+
+                       if (mPluginType == INFERENCE_BACKEND_TFLITE) {
                                nnfw_type = ML_NNFW_TYPE_TENSORFLOW_LITE;
+                               LOGI("TFLITE tensor filter will be used.");
+                       }
 
                        if (access(model_str.c_str(), R_OK)) {
                                LOGE("model file path in [%s]", model_str.c_str());
                                return INFERENCE_ENGINE_ERROR_INVALID_PATH;
                        }
 
-                       if (mTargetDevice == INFERENCE_TARGET_CPU) {
-                               nnfw_hw = ML_NNFW_HW_CPU_NEON;
-                               LOGI("Target device is NEON.");
-                       } else if (mTargetDevice == INFERENCE_TARGET_GPU) {
-                               nnfw_hw = ML_NNFW_HW_GPU;
-                               LOGI("Target device is GPU");
-                       } else {
-                               LOGE("Invalid inference target device type.");
-                               return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
-                       }
-
-                       LOGI("NNFW tensor filter will be used.");
                        break;
                // TODO.
                default:
@@ -591,7 +592,7 @@ namespace MLAPIImpl
                }
 
                // TODO. flag supported accel device types according to a given ML Single API of nnstreamer backend.
-               if (mPluginType == INFERENCE_BACKEND_MLAPI) {
+               if (mPluginType == INFERENCE_BACKEND_NPU_VIVANTE) {
                        capacity->supported_accel_devices = INFERENCE_TARGET_CUSTOM;
                } else {
                        capacity->supported_accel_devices = INFERENCE_TARGET_GPU |