#include <unistd.h>
#include <time.h>
#include <queue>
+#include <tuple>
// TODO. Below is test code. DO NOT use ML internal function.
#define ENABLE_FAST
return INFERENCE_ENGINE_ERROR_NONE;
}
- int InferenceMLAPI::Load(std::vector<std::string> model_paths,
- inference_model_format_e model_format)
+ std::tuple<ml_nnfw_type_e, ml_nnfw_hw_e> InferenceMLAPI::GetNNFWInfo()
{
- LOGI("ENTER");
+ switch (mPluginType) {
+ case INFERENCE_BACKEND_NPU_VIVANTE:
+ LOGI("Vivante tensor filter will be used.");
+ return std::make_tuple(ML_NNFW_TYPE_VIVANTE, ML_NNFW_HW_ANY);
- std::string model_str(model_paths[0]);
+ case INFERENCE_BACKEND_ONE:
+ LOGI("NNFW tensor filter will be used.");
- // TODO. Set NNFW backend type and HW type properly.
+ if (mTargetDevice == INFERENCE_TARGET_CPU) {
+ LOGI("Target device is NEON.");
+ return std::make_tuple(ML_NNFW_TYPE_NNFW, ML_NNFW_HW_CPU_NEON);
+ } else if (mTargetDevice == INFERENCE_TARGET_GPU) {
+ LOGI("Target device is GPU");
+ return std::make_tuple(ML_NNFW_TYPE_NNFW, ML_NNFW_HW_GPU);
+ }
- ml_nnfw_type_e nnfw_type = ML_NNFW_TYPE_NNFW;
- ml_nnfw_hw_e nnfw_hw = ML_NNFW_HW_ANY;
+ LOGE("Invalid inference target device type.");
+ throw std::invalid_argument("invalid tensor type.");
+
+ case INFERENCE_BACKEND_ARMNN:
+ LOGI("ARMNN tensor filter will be used.");
+ return std::make_tuple(ML_NNFW_TYPE_ARMNN, ML_NNFW_HW_ANY);
+
+ case INFERENCE_BACKEND_TFLITE:
+ LOGI("TFLITE tensor filter will be used.");
+ return std::make_tuple(ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
+
+ case INFERENCE_BACKEND_SNPE:
+ LOGI("SNPE tensor filter will be used.");
+ return std::make_tuple(ML_NNFW_TYPE_SNPE, ML_NNFW_HW_ANY);
+
+ default:
+ LOGE("Invalid plugin type.");
+ throw std::invalid_argument("invalid tensor type.");
+ }
+ }
+
+ bool InferenceMLAPI::IsFileReadable(const std::string& path)
+ {
+ if (access(path.c_str(), R_OK) == -1) {
+ LOGE("file [%s] is not readable, errno(%d)", path.c_str(), errno);
+ return false;
+ }
+
+ return true;
+ }
+ std::string InferenceMLAPI::GetModelPath(const std::vector<std::string>& model_paths)
+ {
switch (mPluginType) {
case INFERENCE_BACKEND_NPU_VIVANTE:
- nnfw_type = ML_NNFW_TYPE_VIVANTE;
- nnfw_hw = ML_NNFW_HW_ANY;
- LOGI("Vivante tensor filter will be used.");
-
- if (access(model_str.c_str(), R_OK) ||
- access(model_paths[1].c_str(), R_OK)) {
- LOGE("model file path in [%s,%s]", model_str.c_str(),
- model_paths[1].c_str());
- return INFERENCE_ENGINE_ERROR_INVALID_PATH;
- }
+ if (!IsFileReadable(model_paths[0]) ||
+ !IsFileReadable(model_paths[1]))
+ throw std::runtime_error("invalid path");
// ML Single API of MLAPI requires model_paths rule like below,
// "so library file path,nb model file path" or vise versa.
- model_str += "," + model_paths[1];
- break;
+ return model_paths[0] + "," + model_paths[1];
+
case INFERENCE_BACKEND_ONE:
+ /* fall through */
case INFERENCE_BACKEND_ARMNN:
+ /* fall through */
case INFERENCE_BACKEND_TFLITE:
+ /* fall through */
case INFERENCE_BACKEND_SNPE:
- if (mPluginType == INFERENCE_BACKEND_ONE) {
- nnfw_type = ML_NNFW_TYPE_NNFW;
-
- if (mTargetDevice == INFERENCE_TARGET_CPU) {
- nnfw_hw = ML_NNFW_HW_CPU_NEON;
- LOGI("Target device is NEON.");
- } else if (mTargetDevice == INFERENCE_TARGET_GPU) {
- nnfw_hw = ML_NNFW_HW_GPU;
- LOGI("Target device is GPU");
- } else {
- LOGE("Invalid inference target device type.");
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
- }
-
- LOGI("NNFW tensor filter will be used.");
- }
+ if (!IsFileReadable(model_paths[0]))
+ throw std::runtime_error("invalid path");
+ return model_paths[0];
- if (mPluginType == INFERENCE_BACKEND_ARMNN) {
- nnfw_type = ML_NNFW_TYPE_ARMNN;
- LOGI("ARMNN tensor filter will be used.");
- }
+ default:
+ throw std::runtime_error("shouldn't be reach here");
+ }
+ }
- if (mPluginType == INFERENCE_BACKEND_TFLITE) {
- nnfw_type = ML_NNFW_TYPE_TENSORFLOW_LITE;
- LOGI("TFLITE tensor filter will be used.");
- }
+ int InferenceMLAPI::Load(std::vector<std::string> model_paths,
+ inference_model_format_e model_format)
+ {
+ LOGI("ENTER");
- if (mPluginType == INFERENCE_BACKEND_SNPE) {
- nnfw_type = ML_NNFW_TYPE_SNPE;
- nnfw_hw = ML_NNFW_HW_ANY;
- LOGI("SNPE tensor filter will be used.");
- }
+ std::string model_str;
- if (access(model_str.c_str(), R_OK)) {
- LOGE("model file path in [%s]", model_str.c_str());
- return INFERENCE_ENGINE_ERROR_INVALID_PATH;
- }
+ ml_nnfw_type_e nnfw_type = ML_NNFW_TYPE_ANY;
+ ml_nnfw_hw_e nnfw_hw = ML_NNFW_HW_ANY;
- break;
- case INFERENCE_BACKEND_NNTRAINER:
- nnfw_type = ML_NNFW_TYPE_NNTR_INF;
- nnfw_hw = ML_NNFW_HW_ANY;
- break;
- // TODO.
- default:
- LOGE("Invalid plugin type.");
+ try {
+ std::tie(nnfw_type, nnfw_hw) = GetNNFWInfo();
+ model_str = GetModelPath(model_paths);
+ } catch (const std::invalid_argument& ex) {
+ LOGE("Get NNFW info Error (%s)", ex.what());
return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ } catch (const std::runtime_error& ex) {
+ LOGE("Get model path Error (%s)", ex.what());
+ return INFERENCE_ENGINE_ERROR_INVALID_PATH;
}
LOGI("Model name = %s", model_str.c_str());
ml_tensors_info_h in_info = NULL, out_info = NULL;
// In case of nntrainer tensor filter, input and output tensor
- // informaion is needed to load a given model.
+ // information is needed to load a given model.
if (mPluginType == INFERENCE_BACKEND_NNTRAINER) {
int ret = CreateMLAPITensorInfo(in_info, mInputProperty);
if (ret != INFERENCE_ENGINE_ERROR_NONE)