mlapi backend path refactoring tizen_refactoring
authorInki Dae <inki.dae@samsung.com>
Thu, 18 Mar 2021 12:04:25 +0000 (21:04 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 23 Mar 2021 06:51:18 +0000 (15:51 +0900)
Change-Id: If2f310171671e911717792538cc6977c46f2bcd8
Signed-off-by: Inki Dae <inki.dae@samsung.com>
include/inference_engine_common_impl.h
src/inference_engine_common_impl.cpp

index c927ee62a5174d8aa8e03c2d7662592d1f974d03..07183f717b61da6dd6f539e05483a9f4202ec9e4 100644 (file)
@@ -23,6 +23,7 @@
 #include "inference_engine_common.h"
 #include "inference_engine_type.h"
 #include "inference_engine_common_profiler.h"
+#include <iniparser.h>
 
 using namespace InferenceEngineInterface::Profiler;
 
@@ -245,6 +246,8 @@ namespace Common
                int DumpProfileToFile(const std::string filename = "dump.txt");
 
        private:
+               int GetApiFrameworkType(dictionary *dict, const char *section_name);
+               int GetNpuBackendType(dictionary *dict, const char *section_name);
                int InitBackendEngine(const std::string &backend_path,
                                                          int backend_type, int device_type);
                int CheckTensorBuffers(
@@ -258,9 +261,6 @@ namespace Common
                // In default, we use profiler.
                bool mUseProfiler;
                unsigned int mProfilerDumpType;
-               // 0 : default tensor filter type for MLAPI with CPU and GPU.
-               // 1 : default tensor filter type for MLAPI with NPU.
-               uint32_t mDefault_MLAPI_Backend[2];
 
        protected:
                void *mBackendModule;
index d1997a90117e4c36afd482f93f48e1ace5ebc7f5..545211790f5f34bae9d6442afd46feee2e45c77e 100644 (file)
@@ -48,8 +48,30 @@ namespace InferenceEngineInterface
 {
 namespace Common
 {
-       const char *INFERENCE_MLAPI_INI_FILENAME =
-                               "/etc/inference/inference_engine_mlapi_backend.ini";
+       const char *BACKEND_PATH_INI_FILENAME =
+                               "/etc/inference/inference_engine_backend_path.ini";
+       std::map<std::string, inference_backend_type_e> sApiFw =
+       {
+               { "MLAPI", INFERENCE_BACKEND_MLAPI }
+       };
+
+       std::map<std::string, inference_backend_type_e> sBackend =
+       {
+               { "TFLITE", INFERENCE_BACKEND_TFLITE },
+               { "ARMNN", INFERENCE_BACKEND_ARMNN },
+               { "ONE", INFERENCE_BACKEND_ONE },
+               { "OPENCV", INFERENCE_BACKEND_OPENCV }
+       };
+
+       std::map<std::string, inference_backend_npu_type_e> sNpuBackend =
+       {
+               { "VIVANTE", INFERENCE_BACKEND_NPU_VIVANTE },
+               { "TRIV2", INFERENCE_BACKEND_NPU_TRIV2}
+       };
+
+       int sApiFwForTFLITE = -1, sApiFwForARMNN = -1;
+       int sApiFwForONE = -1, sApiFwForOPENCV = -1;
+       int sBackendForNpu = -1;
 
        InferenceEngineCommon::InferenceEngineCommon() :
                        mSelectedBackendEngine(INFERENCE_BACKEND_NONE),
@@ -74,67 +96,99 @@ namespace Common
                LOGW("LEAVE");
        }
 
+       int InferenceEngineCommon::GetNpuBackendType(dictionary *dict, const char *section_name)
+       {
+               // Parse a backend path for NPU device.
+               const char *parsed_str = iniparser_getstring(dict, section_name, NULL);
+               if (parsed_str == NULL) {
+                       LOGI("No type parsed for %s section name.", section_name);
+                       return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+               }
+
+               std::map<std::string, inference_backend_npu_type_e>::iterator it = sNpuBackend.find(parsed_str);
+               if (it == sNpuBackend.end()) {
+                       LOGE("Invalid NPU backend name.");
+                       return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+               }
+
+               return it->second;
+       }
+
+       int InferenceEngineCommon::GetApiFrameworkType(dictionary *dict, const char *section_name)
+       {
+               // Parse a backend path for NPU device.
+               const char *parsed_str = iniparser_getstring(dict, section_name, NULL);
+               if (parsed_str == NULL) {
+                       LOGI("No type parsed for %s section name.", section_name);
+                       return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+               }
+
+               std::map<std::string, inference_backend_type_e>::iterator it = sApiFw.find(parsed_str);
+               if (it == sApiFw.end()) {
+                       LOGE("Invalid API framework name.");
+                       return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+               }
+
+               return it->second;
+       }
+
        int InferenceEngineCommon::LoadConfigFile(std::string ini_file_path)
        {
                int ret = INFERENCE_ENGINE_ERROR_NONE;
-               int npu_type = -1, cpu_and_gpu_type = -1;
-               char *default_type_for_npu = NULL, *default_type_for_cpu_gpu = NULL;
+               std::string strNpuBackend = "", strApiFwName = "";
 
                if (ini_file_path.empty())
-                       ini_file_path = INFERENCE_MLAPI_INI_FILENAME;
+                       ini_file_path = BACKEND_PATH_INI_FILENAME;
 
                LOGI("%s configuration file will be used.\n", ini_file_path.c_str());
 
                dictionary *dict = iniparser_load(ini_file_path.c_str());
                if (dict == NULL) {
-                       LOGE("Fail to load %s file.\n", ini_file_path.c_str());
+                       LOGW("Fail to load %s file.\n", ini_file_path.c_str());
+                       LOGW("so it will not use default backend path.\n");
                        return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                }
 
-               default_type_for_npu = (char *)iniparser_getstring(dict,
-                                       "inference mlapi npu backend:default tensor filter type",
-                                       NULL);
-               if (default_type_for_npu == NULL) {
-                       LOGE("Fail to load default tensor filter type for MLAPI with NPU.");
-                       ret = INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-                       goto out;
+               sBackendForNpu = GetNpuBackendType(dict, "NPU backend:type");
+               if (sBackendForNpu < 0) {
+                       LOGI("No NPU backend type from ini file.");
+                       LOGI("This platform cannot use NPU acceleration for inference.");
                }
 
-               default_type_for_cpu_gpu = (char *)iniparser_getstring(dict,
-                                       "inference mlapi cpu and gpu backend:default tensor filter type",
-                                       NULL);
-               if (default_type_for_cpu_gpu == NULL) {
-                       LOGE("Fail to load default tensor filter type for MLAPI with CPU and GPU.");
-                       ret = INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-                       goto out;
+               LOGI("API FW = %s, NPU = %d", sBackendForNpu > 0 ? "MLAPI" : "Internal", sBackendForNpu);
+
+               sApiFwForTFLITE = GetApiFrameworkType(dict, "TFLITE:API framework");
+               if (sApiFwForTFLITE < 0) {
+                       LOGI("No API framework type from ini file.");
+                       LOGI("So in default, internal API will be used for TFLITE.");
                }
 
-               npu_type = atoi(default_type_for_npu);
-               cpu_and_gpu_type = atoi(default_type_for_cpu_gpu);
+               LOGI("API FW = %s for TFLITE.", sApiFwForTFLITE > 0 ? "MLAPI" : "Internal");
 
-               // Check if loaded configuration value is valid or not.
-               if (npu_type != INFERENCE_BACKEND_NPU_VIVANTE &&
-                       npu_type != INFERENCE_BACKEND_NPU_TRIV2) {
-                       LOGE("Invalid tensor filter type for MLAPI with NPU.");
-                       ret = INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
-                       goto out;
+               sApiFwForARMNN = GetApiFrameworkType(dict, "ARMNN:API framework");
+               if (sApiFwForARMNN < 0) {
+                       LOGI("No API framework type from ini file.");
+                       LOGI("So in default, internal API will be used for ARMNN.");
                }
 
-               if (cpu_and_gpu_type != INFERENCE_BACKEND_TFLITE) {
-                       LOGE("Invalid tensor filter type for MLAPI with CPU and GPU.");
-                       ret = INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
-                       goto out;
+               LOGI("API FW = %s for ARMNN.", sApiFwForARMNN > 0 ? "MLAPI" : "Internal");
+
+               sApiFwForONE = GetApiFrameworkType(dict, "ONE:API framework");
+               if (sApiFwForONE < 0) {
+                       LOGI("No API framework type from ini file.");
+                       LOGI("So in default, internal API will be used for ONE.");
                }
 
-               LOGI("npu = %d, gpu = %d", npu_type, cpu_and_gpu_type);
+               LOGI("API FW = %s for ONE.", sApiFwForONE > 0 ? "MLAPI" : "Internal");
 
-               // TODO. Update mDefault_MLAPI_Backend type using ini config file.
-               // 0 : default tensor filter type for MLAPI with CPU and GPU.
-               // 1 : default tensor filter type for MLAPI with NPU.
-               mDefault_MLAPI_Backend[0] = cpu_and_gpu_type;
-               mDefault_MLAPI_Backend[1] = npu_type;
+               sApiFwForOPENCV = GetApiFrameworkType(dict, "OPENCV:API framework");
+               if (sApiFwForOPENCV < 0) {
+                       LOGI("No API framework type from ini file.");
+                       LOGI("So in default, internal API will be used for OPENCV.");
+               }
+
+               LOGI("API FW = %s for OPENCV.", sApiFwForOPENCV > 0 ? "MLAPI" : "Internal");
 
-out:
                iniparser_freedict(dict);
 
                return ret;
@@ -281,16 +335,17 @@ out:
 
                LOGI("backend_type = %d, device_type = %d", backend_type, device_type);
 
-               // Update tensor filter type for MLAPI if a given backend type is MLAPI.
+               // If user set MLAPI type as backend type then TFLITE tensor filter of NNStreamer will be used in default.
                if (backend_type == INFERENCE_BACKEND_MLAPI) {
-                       if (device_type & (INFERENCE_TARGET_CPU | INFERENCE_TARGET_GPU))
-                               backend_type = mDefault_MLAPI_Backend[0];
-                       if (device_type & INFERENCE_TARGET_CUSTOM)
-                               backend_type = mDefault_MLAPI_Backend[1];
-
-                       LOGI("tensor filter type is %d\n", backend_type);
+                       backend_type = INFERENCE_BACKEND_TFLITE;
+                       LOGI("API framework is MLAPI with TFLITE tensor filter.\n");
                }
 
+               // If NPU type is declared in ini file then pass the type to
+               // a given inference engine backend.
+               if (sBackendForNpu > 0)
+                       backend_type = sBackendForNpu;
+
                int ret = mBackendHandle->SetPrivateData(&backend_type);
                if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                        LOGE("Failed to set a tensor filter plugin type for MLAPI.");
@@ -325,9 +380,6 @@ out:
                        mProfiler.Start(IE_PROFILER_MEMORY);
                }
 
-               std::string backendLibName =
-                               "libinference-engine-" + config->backend_name + ".so";
-
                // If backend_type of config is -1 then update it according to backend_name.
                if (config->backend_type == -1) {
                        std::map<std::string,int> BackendTable;
@@ -340,6 +392,21 @@ out:
                        config->backend_type = BackendTable.find(config->backend_name)->second;
                }
 
+               std::string backendLibName;
+
+               // For below two backends, MLAPI will be used as API framework.
+               //     MLAPI, ONE
+               // And aor all NPU devices, INFERENCE_TARGET_CUSTOM type, MLAPI will be used as API framework.
+               if (config->backend_type == INFERENCE_BACKEND_MLAPI ||
+                               config->target_devices == INFERENCE_TARGET_CUSTOM ||
+                               config->backend_type == INFERENCE_BACKEND_ONE ||
+                               (config->backend_type == INFERENCE_BACKEND_ONE && sApiFwForONE == INFERENCE_BACKEND_MLAPI) ||
+                               (config->backend_type == INFERENCE_BACKEND_TFLITE && sApiFwForTFLITE == INFERENCE_BACKEND_MLAPI) ||
+                               (config->backend_type == INFERENCE_BACKEND_ARMNN && sApiFwForARMNN == INFERENCE_BACKEND_MLAPI))
+                       backendLibName = "libinference-engine-mlapi.so";
+               else
+                       backendLibName = "libinference-engine-" + config->backend_name + ".so";
+
                int ret = InitBackendEngine(backendLibName, config->backend_type, config->target_devices);
                if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                        return ret;
@@ -387,8 +454,23 @@ out:
                        [INFERENCE_BACKEND_ONE] = "mlapi"
                };
 
+               int api_fw_type;
+
+               // For below two backends, MLAPI will be used as API framework.
+               //     MLAPI, ONE
+               // And aor all NPU devices, INFERENCE_TARGET_CUSTOM type, MLAPI will be used as API framework.
+               if (backend_type == INFERENCE_BACKEND_MLAPI ||
+                               device_type == INFERENCE_TARGET_CUSTOM ||
+                               backend_type == INFERENCE_BACKEND_ONE ||
+                               (backend_type == INFERENCE_BACKEND_ONE && sApiFwForONE == INFERENCE_BACKEND_MLAPI) ||
+                               (backend_type == INFERENCE_BACKEND_TFLITE && sApiFwForTFLITE == INFERENCE_BACKEND_MLAPI) ||
+                               (backend_type == INFERENCE_BACKEND_ARMNN && sApiFwForARMNN == INFERENCE_BACKEND_MLAPI))
+                       api_fw_type = INFERENCE_BACKEND_MLAPI;
+               else
+                       api_fw_type = backend_type;
+
                std::string backendLibName =
-                               "libinference-engine-" + backendNameTable[backend_type] + ".so";
+                               "libinference-engine-" + backendNameTable[api_fw_type] + ".so";
 
                int ret = InitBackendEngine(backendLibName, backend_type, device_type);
                if (ret != INFERENCE_ENGINE_ERROR_NONE) {