Refactoring InferenceMLAPI::Load() 67/267067/7
authorSeungbae Shin <seungbae.shin@samsung.com>
Thu, 25 Nov 2021 03:30:02 +0000 (12:30 +0900)
committerInki Dae <inki.dae@samsung.com>
Wed, 29 Dec 2021 02:46:53 +0000 (11:46 +0900)
[Version] : 0.3.1
[Issue type] : Refactoring

Change-Id: I370b08981fcdd79f916dfe9cc5ea4225ecf66764

packaging/inference-engine-mlapi.spec
src/inference_engine_mlapi.cpp
src/inference_engine_mlapi_private.h

index a9bce91..230705d 100644 (file)
@@ -1,6 +1,6 @@
 Name:       inference-engine-mlapi
 Summary:    ML Single API backend of NNStreamer for MediaVision
-Version:    0.3.0
+Version:    0.3.1
 Release:    0
 Group:      Multimedia/Libraries
 License:    Apache-2.0
index 9a5c2f8..b9aeaf4 100644 (file)
@@ -23,6 +23,7 @@
 #include <unistd.h>
 #include <time.h>
 #include <queue>
+#include <tuple>
 
 // TODO. Below is test code. DO NOT use ML internal function.
 #define ENABLE_FAST
@@ -199,86 +200,102 @@ namespace MLAPIImpl
                return INFERENCE_ENGINE_ERROR_NONE;
        }
 
-       int InferenceMLAPI::Load(std::vector<std::string> model_paths,
-                                                        inference_model_format_e model_format)
+       std::tuple<ml_nnfw_type_e, ml_nnfw_hw_e> InferenceMLAPI::GetNNFWInfo()
        {
-               LOGI("ENTER");
+               switch (mPluginType) {
+               case INFERENCE_BACKEND_NPU_VIVANTE:
+                       LOGI("Vivante tensor filter will be used.");
+                       return std::make_tuple(ML_NNFW_TYPE_VIVANTE, ML_NNFW_HW_ANY);
 
-               std::string model_str(model_paths[0]);
+               case INFERENCE_BACKEND_ONE:
+                       LOGI("NNFW tensor filter will be used.");
 
-               // TODO. Set NNFW backend type and HW type properly.
+                       if (mTargetDevice == INFERENCE_TARGET_CPU) {
+                               LOGI("Target device is NEON.");
+                               return std::make_tuple(ML_NNFW_TYPE_NNFW, ML_NNFW_HW_CPU_NEON);
+                       } else if (mTargetDevice == INFERENCE_TARGET_GPU) {
+                               LOGI("Target device is GPU");
+                               return std::make_tuple(ML_NNFW_TYPE_NNFW, ML_NNFW_HW_GPU);
+                       }
 
-               ml_nnfw_type_e nnfw_type = ML_NNFW_TYPE_NNFW;
-               ml_nnfw_hw_e nnfw_hw = ML_NNFW_HW_ANY;
+                       LOGE("Invalid inference target device type.");
+                       throw std::invalid_argument("invalid tensor type.");
+
+               case INFERENCE_BACKEND_ARMNN:
+                       LOGI("ARMNN tensor filter will be used.");
+                       return std::make_tuple(ML_NNFW_TYPE_ARMNN, ML_NNFW_HW_ANY);
+
+               case INFERENCE_BACKEND_TFLITE:
+                       LOGI("TFLITE tensor filter will be used.");
+                       return std::make_tuple(ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
+
+               case INFERENCE_BACKEND_SNPE:
+                       LOGI("SNPE tensor filter will be used.");
+                       return std::make_tuple(ML_NNFW_TYPE_SNPE, ML_NNFW_HW_ANY);
+
+               default:
+                       LOGE("Invalid plugin type.");
+                       throw std::invalid_argument("invalid tensor type.");
+               }
+       }
+
+       bool InferenceMLAPI::IsFileReadable(const std::string& path)
+       {
+               if (access(path.c_str(), R_OK) == -1) {
+                       LOGE("file [%s] is not readable, errno(%d)", path.c_str(), errno);
+                       return false;
+               }
+
+               return true;
+       }
 
+       std::string InferenceMLAPI::GetModelPath(const std::vector<std::string>& model_paths)
+       {
                switch (mPluginType) {
                case INFERENCE_BACKEND_NPU_VIVANTE:
-                       nnfw_type = ML_NNFW_TYPE_VIVANTE;
-                       nnfw_hw = ML_NNFW_HW_ANY;
-                       LOGI("Vivante tensor filter will be used.");
-
-                       if (access(model_str.c_str(), R_OK) ||
-                                       access(model_paths[1].c_str(), R_OK)) {
-                               LOGE("model file path in [%s,%s]", model_str.c_str(),
-                                                                                                  model_paths[1].c_str());
-                               return INFERENCE_ENGINE_ERROR_INVALID_PATH;
-                       }
+                       if (!IsFileReadable(model_paths[0]) ||
+                               !IsFileReadable(model_paths[1]))
+                               throw std::runtime_error("invalid path");
 
                        // ML Single API of MLAPI requires model_paths rule like below,
                        // "so library file path,nb model file path" or vise versa.
-                       model_str += "," + model_paths[1];
-                       break;
+                       return model_paths[0] + "," + model_paths[1];
+
                case INFERENCE_BACKEND_ONE:
+                       /* fall through */
                case INFERENCE_BACKEND_ARMNN:
+                       /* fall through */
                case INFERENCE_BACKEND_TFLITE:
+                       /* fall through */
                case INFERENCE_BACKEND_SNPE:
-                       if (mPluginType == INFERENCE_BACKEND_ONE) {
-                               nnfw_type = ML_NNFW_TYPE_NNFW;
-
-                               if (mTargetDevice == INFERENCE_TARGET_CPU) {
-                                       nnfw_hw = ML_NNFW_HW_CPU_NEON;
-                                       LOGI("Target device is NEON.");
-                               } else if (mTargetDevice == INFERENCE_TARGET_GPU) {
-                                       nnfw_hw = ML_NNFW_HW_GPU;
-                                       LOGI("Target device is GPU");
-                               } else {
-                                       LOGE("Invalid inference target device type.");
-                                       return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
-                               }
-
-                               LOGI("NNFW tensor filter will be used.");
-                       }
+                       if (!IsFileReadable(model_paths[0]))
+                               throw std::runtime_error("invalid path");
+                       return model_paths[0];
 
-                       if (mPluginType == INFERENCE_BACKEND_ARMNN) {
-                               nnfw_type = ML_NNFW_TYPE_ARMNN;
-                               LOGI("ARMNN tensor filter will be used.");
-                       }
+               default:
+                       throw std::runtime_error("shouldn't be reach here");
+               }
+       }
 
-                       if (mPluginType == INFERENCE_BACKEND_TFLITE) {
-                               nnfw_type = ML_NNFW_TYPE_TENSORFLOW_LITE;
-                               LOGI("TFLITE tensor filter will be used.");
-                       }
+       int InferenceMLAPI::Load(std::vector<std::string> model_paths,
+                                                        inference_model_format_e model_format)
+       {
+               LOGI("ENTER");
 
-                       if (mPluginType == INFERENCE_BACKEND_SNPE) {
-                               nnfw_type = ML_NNFW_TYPE_SNPE;
-                               nnfw_hw = ML_NNFW_HW_ANY;
-                               LOGI("SNPE tensor filter will be used.");
-                       }
+               std::string model_str;
 
-                       if (access(model_str.c_str(), R_OK)) {
-                               LOGE("model file path in [%s]", model_str.c_str());
-                               return INFERENCE_ENGINE_ERROR_INVALID_PATH;
-                       }
+               ml_nnfw_type_e nnfw_type = ML_NNFW_TYPE_ANY;
+               ml_nnfw_hw_e nnfw_hw = ML_NNFW_HW_ANY;
 
-                       break;
-               case INFERENCE_BACKEND_NNTRAINER:
-                       nnfw_type = ML_NNFW_TYPE_NNTR_INF;
-                       nnfw_hw = ML_NNFW_HW_ANY;
-                       break;
-               // TODO.
-               default:
-                       LOGE("Invalid plugin type.");
+               try {
+                       std::tie(nnfw_type, nnfw_hw) = GetNNFWInfo();
+                       model_str = GetModelPath(model_paths);
+               } catch (const std::invalid_argument& ex) {
+                       LOGE("Get NNFW info Error (%s)", ex.what());
                        return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+               } catch (const std::runtime_error& ex) {
+                       LOGE("Get model path Error (%s)", ex.what());
+                       return INFERENCE_ENGINE_ERROR_INVALID_PATH;
                }
 
                LOGI("Model name = %s", model_str.c_str());
@@ -286,7 +303,7 @@ namespace MLAPIImpl
                ml_tensors_info_h in_info = NULL, out_info = NULL;
 
                // In case of nntrainer tensor filter, input and output tensor
-               // informaion is needed to load a given model.
+               // information is needed to load a given model.
                if (mPluginType == INFERENCE_BACKEND_NNTRAINER) {
                        int ret = CreateMLAPITensorInfo(in_info, mInputProperty);
                        if (ret != INFERENCE_ENGINE_ERROR_NONE)
index d2eacb9..98ab53a 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <memory>
 #include <dlog.h>
+#include <tuple>
 
 #ifdef LOG_TAG
 #undef LOG_TAG
@@ -84,6 +85,10 @@ namespace MLAPIImpl
                int CreateMLAPITensorInfo(ml_tensors_info_h& tensor_info,
                                                                  inference_engine_layer_property& layer_property);
 
+               bool IsFileReadable(const std::string& path);
+               std::tuple<ml_nnfw_type_e, ml_nnfw_hw_e> GetNNFWInfo();
+               std::string GetModelPath(const std::vector<std::string>& model_paths);
+
                int mPluginType;
                int mTargetDevice;
                ml_single_h mSingle;