Pass model file format to backend engine
authorInki Dae <inki.dae@samsung.com>
Tue, 11 Feb 2020 06:25:53 +0000 (15:25 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 14 Apr 2020 00:42:53 +0000 (09:42 +0900)
This patch passes model file format coming from Inference layer
to backend engine. As for this, it adds enumeration values
which indicate various model file format.

Change-Id: I11b141399498c73cda6a34350a6f6fd3eac74a17
Signed-off-by: Inki Dae <inki.dae@samsung.com>
common/inference_engine_common_impl.cpp
include/inference_engine_common.h
include/inference_engine_common_impl.h
include/inference_engine_type.h
include/inference_engine_vision_impl.h
vision/inference_engine_vision_impl.cpp

index 0501c6c47006967a45fa5c766c80ebf2d1ecf117..503ecc1ca98d424c9bcf712f69df848f651280ab 100755 (executable)
@@ -162,16 +162,11 @@ int InferenceEngineCommon::SetTargetDevices(int types)
     return ret;
 }
 
-int InferenceEngineCommon::Load(std::vector<std::string> model_paths, unsigned int num_of_models)
+int InferenceEngineCommon::Load(std::vector<std::string> model_paths, inference_model_format_e model_format)
 {
     LOGI("ENTER");
 
-    // TODO. Consider multiple models.
-
-    std::string model = model_paths.front();
-    LOGI("Load %s model.", model.c_str());
-
-    int ret = engine->Load(model_paths, num_of_models);
+    int ret = engine->Load(model_paths, model_format);
     if (ret != INFERENCE_ENGINE_ERROR_NONE)
         LOGE("Fail to load InferenceEngineVision");
 
index 168acf1f75d7c07d5ce8d78c115e5c383a674bd0..886d4314e2f757486c0b876129628319fbf1bf02 100755 (executable)
@@ -57,7 +57,7 @@ public:
      *
      * @since_tizen 6.0
      */
-    virtual int Load(std::vector<std::string> model_paths, unsigned int num_of_models) { return 0; }
+    virtual int Load(std::vector<std::string> model_paths, inference_model_format_e model_format) { return 0; }
 
     /**
      * @brief Create a memory. Deprecated.
index cbd0fb0218a594cd6e441b4fc136c27461da94dc..a299205a5d9ab4e5180535f4e565616adddc7a7b 100755 (executable)
@@ -86,7 +86,7 @@ public:
      *
      * @since_tizen 6.0
      */
-    int Load(std::vector<std::string> model_paths, unsigned int num_of_models);
+    int Load(std::vector<std::string> model_paths, inference_model_format_e model_format);
 
     /**
      * @brief Get an input layer's type such as float32, float16, and so on. Deprecated.
index c2bd4b281f7fbf2e60f7924f7b5ececa45e5ff8d..925605d524bac8a6e90993b691ef2bd414254eef 100644 (file)
@@ -55,6 +55,18 @@ typedef enum {
     INFERENCE_TARGET_MAX    = 1 << 3,
 } inference_target_type_e;
 
+typedef enum {
+    INFERENCE_MODEL_NONE     = 0,
+    INFERENCE_MODEL_CAFFE,           /**< CAFFE. *.prototxt config file is needed. */
+    INFERENCE_MODEL_TF,              /**< Tensorflow. *.pbtxt config file is needed. */
+    INFERENCE_MODEL_TFLITE,          /** Tensorflow-Lite. */
+    INFERENCE_MODEL_TORCH,           /**< Torch */
+    INFERENCE_MODEL_DARKNET,         /**< Darknet. *.cfg config file is needed. */
+    INFERENCE_MODEL_DLDT,            /**< DLDT. *.xml config file is needed. */
+    INFERENCE_MODEL_ONNX,            /**< ONNX */
+    INFERENCE_MODEL_MAX
+} inference_model_format_e;
+
 /**
  * @brief Enumeration for tensor shape type.
  *
index f231fe4f0a6530389140d07cb013bd3d9c8ad443..ff327ae77360c6e86bbd15460829cc11ae24fc69 100755 (executable)
@@ -151,7 +151,7 @@ public:
      *
      * @since_tizen 6.0
      */
-    int Load(std::vector<std::string> model_paths, unsigned int num_of_models);
+    int Load(std::vector<std::string> model_paths, inference_model_format_e model_format);
 
     /**
      * @brief Get capacity from a given backend engine.
index 04b75fccb7a5272852bcf48a2a65d68a906ebbb9..b6100c68b18897ad05d94c64d2aef6307def38aa 100755 (executable)
@@ -250,13 +250,17 @@ int InferenceEngineVision::SetOutputTensorParamNodes(std::vector<std::string> no
     return ret;
 }
 
-int InferenceEngineVision::Load(std::vector<std::string> model_paths, unsigned int num_of_models)
+int InferenceEngineVision::Load(std::vector<std::string> model_paths, inference_model_format_e model_format)
 {
     LOGI("ENTER");
 
-    // TODO. Consider 'num_of_models > 1' case.
-    // Load label data.
-    std::string label_file = model_paths[1];
+    std::string label_file;
+    if (model_format == INFERENCE_MODEL_TFLITE || model_format == INFERENCE_MODEL_TORCH) {
+        label_file = model_paths[1];
+    } else {
+        label_file = model_paths[2];
+    }
+
     size_t userFileLength = label_file.length();
     if (userFileLength > 0 && access(label_file.c_str(), F_OK)) {
         LOGE("Label file path in [%s] ", label_file.c_str());
@@ -270,7 +274,7 @@ int InferenceEngineVision::Load(std::vector<std::string> model_paths, unsigned i
     }
 
     // Load model files.
-    ret = mCommonEngine->Load(model_paths, num_of_models);
+    ret = mCommonEngine->Load(model_paths, model_format);
     if (ret != INFERENCE_ENGINE_ERROR_NONE) {
         LOGE("Fail to load InferenceEngineVision");
         return ret;