#include <time.h>
#include <dlfcn.h>
#include <experimental/filesystem>
-#include <iniparser.h>
extern "C"
{
{
namespace Common
{
- const char *INFERENCE_MLAPI_INI_FILENAME =
- "/etc/inference/inference_engine_mlapi_backend.ini";
+ const char *BACKEND_PATH_INI_FILENAME =
+ "/etc/inference/inference_engine_backend_path.ini";
+ std::map<std::string, inference_backend_type_e> sApiFw =
+ {
+ { "MLAPI", INFERENCE_BACKEND_MLAPI }
+ };
+
+ std::map<std::string, inference_backend_type_e> sBackend =
+ {
+ { "TFLITE", INFERENCE_BACKEND_TFLITE },
+ { "ARMNN", INFERENCE_BACKEND_ARMNN },
+ { "ONE", INFERENCE_BACKEND_ONE },
+ { "OPENCV", INFERENCE_BACKEND_OPENCV }
+ };
+
+ std::map<std::string, inference_backend_npu_type_e> sNpuBackend =
+ {
+ { "VIVANTE", INFERENCE_BACKEND_NPU_VIVANTE },
+ { "TRIV2", INFERENCE_BACKEND_NPU_TRIV2}
+ };
+
+ int sApiFwForTFLITE = -1, sApiFwForARMNN = -1, sApiFwForOPENCV = -1;
+ int sBackendForNpu = -1;
InferenceEngineCommon::InferenceEngineCommon() :
mSelectedBackendEngine(INFERENCE_BACKEND_NONE),
LOGW("LEAVE");
}
+ int InferenceEngineCommon::UseMLAPI(const int backend_type, const int device_type)
+ {
+ if (backend_type == INFERENCE_BACKEND_MLAPI ||
+ device_type == INFERENCE_TARGET_CUSTOM ||
+ backend_type == INFERENCE_BACKEND_ONE ||
+ (backend_type == INFERENCE_BACKEND_TFLITE && sApiFwForTFLITE == INFERENCE_BACKEND_MLAPI) ||
+ (backend_type == INFERENCE_BACKEND_ARMNN && sApiFwForARMNN == INFERENCE_BACKEND_MLAPI))
+ return 1;
+
+ return 0;
+ }
+
+ int InferenceEngineCommon::GetNpuBackendType(dictionary *dict, const char *section_name)
+ {
+ // Parse a backend path for NPU device.
+ const char *parsed_str = iniparser_getstring(dict, section_name, NULL);
+ if (parsed_str == NULL) {
+ LOGI("No type parsed for %s section name.", section_name);
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ std::map<std::string, inference_backend_npu_type_e>::iterator it = sNpuBackend.find(parsed_str);
+ if (it == sNpuBackend.end()) {
+ LOGE("Invalid NPU backend name.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ return it->second;
+ }
+
+ int InferenceEngineCommon::GetApiFrameworkType(dictionary *dict, const char *section_name)
+ {
+ // Parse a backend path for NPU device.
+ const char *parsed_str = iniparser_getstring(dict, section_name, NULL);
+ if (parsed_str == NULL) {
+ LOGI("No type parsed for %s section name.", section_name);
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ std::map<std::string, inference_backend_type_e>::iterator it = sApiFw.find(parsed_str);
+ if (it == sApiFw.end()) {
+ LOGE("Invalid API framework name.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ return it->second;
+ }
+
int InferenceEngineCommon::LoadConfigFile(std::string ini_file_path)
{
int ret = INFERENCE_ENGINE_ERROR_NONE;
- int npu_type = -1, cpu_and_gpu_type = -1;
- char *default_type_for_npu = NULL, *default_type_for_cpu_gpu = NULL;
+ std::string strNpuBackend = "", strApiFwName = "";
if (ini_file_path.empty())
- ini_file_path = INFERENCE_MLAPI_INI_FILENAME;
+ ini_file_path = BACKEND_PATH_INI_FILENAME;
LOGI("%s configuration file will be used.\n", ini_file_path.c_str());
dictionary *dict = iniparser_load(ini_file_path.c_str());
if (dict == NULL) {
- LOGE("Fail to load %s file.\n", ini_file_path.c_str());
+ LOGW("Fail to load %s file.\n", ini_file_path.c_str());
+ LOGW("so it will not use default backend path.\n");
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
- default_type_for_npu = (char *)iniparser_getstring(dict,
- "inference mlapi npu backend:default tensor filter type",
- NULL);
- if (default_type_for_npu == NULL) {
- LOGE("Fail to load default tensor filter type for MLAPI with NPU.");
- ret = INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- goto out;
+ sBackendForNpu = GetNpuBackendType(dict, "NPU backend:type");
+ if (sBackendForNpu < 0) {
+ LOGI("No NPU backend type from ini file.");
+ LOGI("This platform cannot use NPU acceleration for inference.");
}
- default_type_for_cpu_gpu = (char *)iniparser_getstring(dict,
- "inference mlapi cpu and gpu backend:default tensor filter type",
- NULL);
- if (default_type_for_cpu_gpu == NULL) {
- LOGE("Fail to load default tensor filter type for MLAPI with CPU and GPU.");
- ret = INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- goto out;
- }
+ LOGI("API FW = %s, NPU = %d", sBackendForNpu > 0 ? "MLAPI" : "Internal", sBackendForNpu);
- npu_type = atoi(default_type_for_npu);
- cpu_and_gpu_type = atoi(default_type_for_cpu_gpu);
-
- // Check if loaded configuration value is valid or not.
- if (npu_type != INFERENCE_BACKEND_NPU_VIVANTE &&
- npu_type != INFERENCE_BACKEND_NPU_TRIV2) {
- LOGE("Invalid tensor filter type for MLAPI with NPU.");
- ret = INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
- goto out;
+ sApiFwForTFLITE = GetApiFrameworkType(dict, "TFLITE:API framework");
+ if (sApiFwForTFLITE < 0) {
+ LOGI("No API framework type from ini file.");
+ LOGI("So in default, internal API will be used for TFLITE.");
}
- if (cpu_and_gpu_type != INFERENCE_BACKEND_TFLITE) {
- LOGE("Invalid tensor filter type for MLAPI with CPU and GPU.");
- ret = INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
- goto out;
- }
+ LOGI("API FW = %s for TFLITE.", sApiFwForTFLITE > 0 ? "MLAPI" : "Internal");
- LOGI("npu = %d, gpu = %d", npu_type, cpu_and_gpu_type);
+ sApiFwForARMNN = GetApiFrameworkType(dict, "ARMNN:API framework");
+ if (sApiFwForARMNN < 0) {
+ LOGI("No API framework type from ini file.");
+ LOGI("So in default, internal API will be used for ARMNN.");
+ }
- // TODO. Update mDefault_MLAPI_Backend type using ini config file.
- // 0 : default tensor filter type for MLAPI with CPU and GPU.
- // 1 : default tensor filter type for MLAPI with NPU.
- mDefault_MLAPI_Backend[0] = cpu_and_gpu_type;
- mDefault_MLAPI_Backend[1] = npu_type;
+ LOGI("API FW = %s for ARMNN.", sApiFwForARMNN > 0 ? "MLAPI" : "Internal");
-out:
iniparser_freedict(dict);
return ret;
-
}
int InferenceEngineCommon::CheckTensorBuffers(
LOGI("backend_type = %d, device_type = %d", backend_type, device_type);
- // Update tensor filter type for MLAPI if a given backend type is MLAPI.
- if (backend_type == INFERENCE_BACKEND_MLAPI) {
- if (device_type & (INFERENCE_TARGET_CPU | INFERENCE_TARGET_GPU))
- backend_type = mDefault_MLAPI_Backend[0];
- if (device_type & INFERENCE_TARGET_CUSTOM)
- backend_type = mDefault_MLAPI_Backend[1];
-
- LOGI("tensor filter type is %d\n", backend_type);
+ // If user set MLAPI type as backend type and device type is CPU or GPU
+ // then TFLITE tensor filter of NNStreamer will be used in default.
+ if (backend_type == INFERENCE_BACKEND_MLAPI &&
+ (device_type & INFERENCE_TARGET_CPU || device_type & INFERENCE_TARGET_GPU)) {
+ backend_type = INFERENCE_BACKEND_TFLITE;
+ LOGI("API framework is MLAPI with TFLITE tensor filter.\n");
}
+ // If NPU type is declared in ini file then pass the type to
+ // a given inference engine backend.
+ if (backend_type == INFERENCE_BACKEND_MLAPI &&
+ device_type == INFERENCE_TARGET_CUSTOM && sBackendForNpu > 0)
+ backend_type = sBackendForNpu;
+
int ret = mBackendHandle->SetPrivateData(&backend_type);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
LOGE("Failed to set a tensor filter plugin type for MLAPI.");
mProfiler.Start(IE_PROFILER_MEMORY);
}
- std::string backendLibName =
- "libinference-engine-" + config->backend_name + ".so";
-
// If backend_type of config is -1 then update it according to backend_name.
if (config->backend_type == -1) {
std::map<std::string,int> BackendTable;
config->backend_type = BackendTable.find(config->backend_name)->second;
}
+ std::string backendLibName;
+
+ // For two backend types - MLAPI and ONE, MLAPI will be used as API framework in default.
+ // And for all NPU devices passed with INFERENCE_TARGET_CUSTOM type, MLAPI will be used as API framework in default.
+ if (UseMLAPI(config->backend_type, config->target_devices))
+ backendLibName = "libinference-engine-mlapi.so";
+ else
+ backendLibName = "libinference-engine-" + config->backend_name + ".so";
+
int ret = InitBackendEngine(backendLibName, config->backend_type, config->target_devices);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
return ret;
[INFERENCE_BACKEND_ONE] = "mlapi"
};
+ int api_fw_type;
+
+ // For two backend types - MLAPI and ONE, MLAPI will be used as API framework in default.
+ // And for all NPU devices passed with INFERENCE_TARGET_CUSTOM type, MLAPI will be used as API framework in default.
+ if (UseMLAPI(backend_type, device_type))
+ api_fw_type = INFERENCE_BACKEND_MLAPI;
+ else
+ api_fw_type = backend_type;
+
std::string backendLibName =
- "libinference-engine-" + backendNameTable[backend_type] + ".so";
+ "libinference-engine-" + backendNameTable[api_fw_type] + ".so";
int ret = InitBackendEngine(backendLibName, backend_type, device_type);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {