mv_inference: separate binding a backend engine and loading model files
authorInki Dae <inki.dae@samsung.com>
Fri, 7 Feb 2020 02:48:07 +0000 (11:48 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 14 Apr 2020 00:40:31 +0000 (09:40 +0900)
This patch makes model files to be changed in runtime without reloading
a backend engine library.

With this patch, behavior of the two C-API, mv_inference_configure() and
mv_inference_prepare(), will be changed like below,
    mv_inference_configure()
    . Create inference-engine-common class object, pass a backend type
      to the inference-engine-common object, and then load its corresponding
      backend library.
    mv_inference_prepare()
    . Pass configuration values required for inference engine backend to load
      given model files, and request the inference engine backend to load
      the model files.

Change-Id: Ibe2bde17c9aae71c8e0850b5901e963368fed00f
Signed-off-by: Inki Dae <inki.dae@samsung.com>
mv_common/src/EngineConfig.cpp
mv_inference/inference/include/Inference.h [changed mode: 0644->0755]
mv_inference/inference/include/mv_inference_open.h [changed mode: 0644->0755]
mv_inference/inference/src/Inference.cpp
mv_inference/inference/src/mv_inference.c [changed mode: 0644->0755]
mv_inference/inference/src/mv_inference_open.cpp [changed mode: 0644->0755]
src/mv_inference.c

index 8fad23380c948004f749639154642c9465849646..566f07225e246bd20d5a94d058394492accfccc0 100644 (file)
@@ -190,6 +190,7 @@ int EngineConfig::getBooleanAttribute(const std::string& key, bool *value) const
 int EngineConfig::getStringAttribute(const std::string& key, std::string *value) const
 {
        DictStrConstIter dictIter = m_strDict.find(key);
+
        if (dictIter == m_strDict.end()) {
                LOGE("Attempt to access to the unsupported string attribute [%s] "
                                "of the engine config %p", key.c_str(), this);
old mode 100644 (file)
new mode 100755 (executable)
index 683ef7f..7451abd
@@ -150,12 +150,31 @@ public:
        void ConfigureOutputNodeNames(const std::vector<std::string> nodeNames);
 
        /**
-        * @brief   Prepares inference
-        * @details Use this function to create the instance based on
-        *          the configured backend, to set tensor information,
-        *          and load the models.
+        * @brief   Bind a backend engine
+        * @details Use this function to bind a backend engine for the inference.
+        *                      This creates a inference engine common class object, and loads a backend
+        *                      library which inferfaces with a Neural Network runtime such as TF Lite,
+        *                      OpenCV, ARMNN and so on.
         *
-        * @since_tizen 5.5
+        *                      Ps. The created inference engine common object will be released and its
+        *                              corresponding backend library will be unbound when deconstructor
+        *                              of Inference class will be called.
+        *
+        * @since_tizen 6.0
+        *
+        * @return @c 0 on success, otherwise a negative error value
+        * @retval #MEDIA_VISION_ERROR_NONE Successful
+        * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+        */
+       int Bind();
+
+       /**
+        * @brief   Set default configuration for the inference
+        * @details Use this function to set default configuration given in json file by user.
+        *
+        *                      Ps. this callback should be called after Bind callback.
+        *
+        * @since_tizen 6.0
         *
         * @return @c 0 on success, otherwise a negative error value
         * @retval #MEDIA_VISION_ERROR_NONE Successful
@@ -163,6 +182,20 @@ public:
         */
        int Prepare();
 
+       /**
+        * @brief   Load model files
+        * @details Use this function to load given model files for the inference.
+        *
+        *                      Ps. this callback should be called after Prepare callback.
+        *
+        * @since_tizen 6.0
+        *
+        * @return @c 0 on success, otherwise a negative error value
+        * @retval #MEDIA_VISION_ERROR_NONE Successful
+        * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+        */
+       int Load();
+
        /**
         * @brief       Runs inference with the roi of a given image
         * @details Use this function to run forward pass with the given image.
@@ -217,10 +250,15 @@ public:
 
        int GetResults(std::vector<std::vector<int>>* dimInfo, std::vector<float*> *results);
 
+       mv_engine_config_h GetEngineConfig(void) { return engine_config; }
+
+       void SetEngineConfig(mv_engine_config_h config) { engine_config = config; }
+
 private:
        bool mCanRun; /**< The flag indicating ready to run Inference */
 
        InferenceConfig mConfig;
+       mv_engine_config_h engine_config;
 
        InferenceEngineVision * mBackend;
 
old mode 100644 (file)
new mode 100755 (executable)
index 88229df..b4934f5
@@ -33,6 +33,9 @@ extern "C" {
 /*************/
 /* Inference */
 /*************/
+
+mv_engine_config_h mv_inference_get_engine_config(mv_inference_h infer);
+
 /**
  * @brief Create infernce handle.
  * @details Use this function to create an inference handle. After creation
index 743ed4b0fe98877cf9626898358399a66e3dfe89..e5667a6386e14237ff930b14701a65ace83c2beb 100755 (executable)
@@ -67,6 +67,7 @@ Inference::Inference() :
                auto iter = mSupportedInferenceBackend.find(i);
                LOGE("%d: %s: %s", i, (iter->second).first.c_str(), (iter->second).second ? "TRUE" : "FALSE");
        }
+
        LOGI("LEAVE");
 }
 
@@ -213,10 +214,10 @@ void Inference::ConfigureOutputNodeNames(const std::vector<std::string> nodeName
        mConfig.mOutputNodeNames = nodeNames;
 }
 
-int Inference::Prepare()
+int Inference::Bind(void)
 {
-       int ret = INFERENCE_ENGINE_ERROR_NONE;
-       LOGE("ENTER");
+       LOGI("ENTER");
+
        if (mConfig.mBackedType <= MV_INFERENCE_BACKEND_NONE ||
                mConfig.mBackedType >= MV_INFERENCE_BACKEND_MAX) {
                LOGE("NOT SUPPORTED BACKEND %d", mConfig.mBackedType);
@@ -225,27 +226,37 @@ int Inference::Prepare()
 
        auto iter = mSupportedInferenceBackend.find(mConfig.mBackedType);
        std::string backendName = (iter->second).first;
-       LOGE("backend string name: %s", backendName.c_str());
+       LOGI("backend string name: %s", backendName.c_str());
 
-    inference_engine_config config = {
-        .backend_name = backendName,
-        .target_devices = mConfig.mTargetType,
-    };
+       inference_engine_config config = {
+               .backend_name = backendName,
+               // As a default, Target device is CPU. If user defined desired device type in json file
+               // then the device type will be set by Load callback.
+               .target_devices = mConfig.mTargetType,
+       };
 
-    // Create backend class object.
+       // Create a backend class object.
        mBackend = new InferenceEngineVision(&config);
-
        if (!mBackend) {
                LOGE("Fail to create backend");
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
        }
 
-    // Bind backend library.
-    ret = mBackend->BindBackend(&config);
-    if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-        LOGE("Fail to bind backend library.(%d)", mConfig.mBackedType);
-        return MEDIA_VISION_ERROR_INVALID_OPERATION;
-    }
+       // Bind a backend library.
+       int ret = mBackend->BindBackend(&config);
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("Fail to bind backend library.(%d)", mConfig.mBackedType);
+               return MEDIA_VISION_ERROR_INVALID_OPERATION;
+       }
+
+       LOGI("LEAVE");
+
+       return MEDIA_VISION_ERROR_NONE;
+}
+
+int Inference::Prepare(void)
+{
+       LOGI("ENTER");
 
        // Input Tensor Param
        mBackend->SetInputTensorParamInput(mConfig.mTensorInfo.width,
@@ -264,6 +275,17 @@ int Inference::Prepare()
 
        mBackend->SetOutputTensorParamNodes(mConfig.mOutputNodeNames);
 
+       mBackend->SetTargetDevice(mConfig.mTargetType);
+
+       LOGI("LEAVE");
+
+       return MEDIA_VISION_ERROR_NONE;
+}
+
+int Inference::Load(void)
+{
+       LOGI("ENTER");
+
        // Add model files to load.
     // TODO. model file and its corresponding label file should be added by
     // user request.
@@ -272,7 +294,7 @@ int Inference::Prepare()
     models.push_back(mConfig.mUserFilePath);
 
     // Request model loading to backend engine.
-    ret = mBackend->Load(models, 1);
+    int ret = mBackend->Load(models, 1);
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                delete mBackend;
                LOGE("Fail to load model");
@@ -282,10 +304,8 @@ int Inference::Prepare()
 
        mCanRun = true;
 
-       // target type
-       // foreach supported??
-       mBackend->SetTargetDevice(config.target_devices);
-       LOGE("LEAVE");
+       LOGI("LEAVE");
+
        return MEDIA_VISION_ERROR_NONE;
 }
 
old mode 100644 (file)
new mode 100755 (executable)
index d38ae81..29029e4
@@ -96,21 +96,45 @@ int mv_inference_configure(mv_inference_h infer, mv_engine_config_h engine_confi
 
 #else
 
-       ret = mv_inference_configure_model_open(infer, engine_config);
+    ret = mv_inference_configure_engine_open(infer, engine_config);
     if (ret != MEDIA_VISION_ERROR_NONE){
-        LOGE("Fail to configure model");
+        LOGE("Fail to configure engine and target");
         return ret;
     }
 
-    ret = mv_inference_configure_tensor_info_open(infer, engine_config);
+#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
+
+       MEDIA_VISION_FUNCTION_LEAVE();
+       return ret;
+}
+
+
+int mv_inference_prepare(mv_inference_h infer)
+{
+       MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
+       MEDIA_VISION_INSTANCE_CHECK(infer);
+
+       MEDIA_VISION_FUNCTION_ENTER();
+
+       int ret = MEDIA_VISION_ERROR_NONE;
+
+    mv_engine_config_h engine_config = mv_inference_get_engine_config(infer);
+
+#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
+
+       //ret = mv_inference_prepare_lic(infer);
+
+#else
+
+       ret = mv_inference_configure_model_open(infer, engine_config);
     if (ret != MEDIA_VISION_ERROR_NONE){
-        LOGE("Fail to configure tensor information");
+        LOGE("Fail to configure model");
         return ret;
     }
 
-    ret = mv_inference_configure_engine_open(infer, engine_config);
+    ret = mv_inference_configure_tensor_info_open(infer, engine_config);
     if (ret != MEDIA_VISION_ERROR_NONE){
-        LOGE("Fail to configure engine and target");
+        LOGE("Fail to configure tensor information");
         return ret;
     }
 
@@ -138,28 +162,6 @@ int mv_inference_configure(mv_inference_h infer, mv_engine_config_h engine_confi
                return ret;
        }
 
-#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
-
-       MEDIA_VISION_FUNCTION_LEAVE();
-       return ret;
-}
-
-
-int mv_inference_prepare(mv_inference_h infer)
-{
-       MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
-       MEDIA_VISION_INSTANCE_CHECK(infer);
-
-       MEDIA_VISION_FUNCTION_ENTER();
-
-       int ret = MEDIA_VISION_ERROR_NONE;
-
-#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
-
-       //ret = mv_inference_prepare_lic(infer);
-
-#else
-
        ret = mv_inference_prepare_open(infer);
 
 #endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
old mode 100644 (file)
new mode 100755 (executable)
index e044849..69ceaaa
 
 using namespace mediavision::inference;
 
+mv_engine_config_h mv_inference_get_engine_config(mv_inference_h infer)
+{
+       Inference *pInfer = static_cast<Inference *>(infer);
+       return pInfer->GetEngineConfig();
+}
+
 int mv_inference_create_open(mv_inference_h *infer)
 {
        if (infer == NULL ) {
@@ -61,6 +67,8 @@ int mv_inference_destroy_open(mv_inference_h infer)
 
 int mv_inference_configure_model_open(mv_inference_h infer, mv_engine_config_h engine_config)
 {
+       LOGI("ENTER");
+
     Inference *pInfer = static_cast<Inference *>(infer);
 
     int ret = MEDIA_VISION_ERROR_NONE;
@@ -71,6 +79,7 @@ int mv_inference_configure_model_open(mv_inference_h infer, mv_engine_config_h e
        double modelMeanValue = 0.0;
        int backendType= 0;
        size_t userFileLength = 0;
+
     ret = mv_engine_config_get_string_attribute(engine_config,
                                                                                        MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
                                                                                        &modelConfigFilePath);
@@ -147,6 +156,8 @@ _ERROR_:
        if (modelUserFilePath)
                free(modelUserFilePath);
 
+       LOGI("LEAVE");
+
     return ret;
 }
 
@@ -211,11 +222,12 @@ _ERROR_ :
 int mv_inference_configure_engine_open(mv_inference_h infer, mv_engine_config_h engine_config)
 {
     Inference *pInfer = static_cast<Inference *>(infer);
-
        int backendType = 0;
        int targetType = 0;
     int ret = MEDIA_VISION_ERROR_NONE;
 
+       pInfer->SetEngineConfig(engine_config);
+
        ret = mv_engine_config_get_int_attribute(engine_config,
                                                                                        MV_INFERENCE_BACKEND_TYPE,
                                                                                        &backendType);
@@ -234,6 +246,16 @@ int mv_inference_configure_engine_open(mv_inference_h infer, mv_engine_config_h
 
        ret = pInfer->ConfigureEngine((mv_inference_backend_type_e)backendType,
                                        (mv_inference_target_type_e)targetType);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+               LOGE("Fail to configure a backend engine.");
+               goto _ERROR_;
+    }
+
+       // Create a inference-engine-common class object and load its corresponding library.
+       ret = pInfer->Bind();
+       if (ret != MEDIA_VISION_ERROR_NONE) {
+               LOGE("Fail to bind a backend engine.");
+       }
 
 _ERROR_:
        return ret;
@@ -351,9 +373,17 @@ int mv_inference_prepare_open(mv_inference_h infer)
 
        int ret = MEDIA_VISION_ERROR_NONE;
 
+    // Pass parameters needed to load model files to a backend engine.
        ret = pInfer->Prepare();
-       if (ret != MEDIA_VISION_ERROR_NONE)
+       if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to prepare inference");
+        return ret;
+    }
+
+    // Request to load model files to a backend engine.
+    ret = pInfer->Load();
+    if (ret != MEDIA_VISION_ERROR_NONE)
+        LOGE("Fail to load model files.");
 
        return ret;
 }
index d38ae81e856462b95617145d441b0965e4a38a73..29029e4a9ea0592dc2c347209bd08dc9aa79cafe 100644 (file)
@@ -96,21 +96,45 @@ int mv_inference_configure(mv_inference_h infer, mv_engine_config_h engine_confi
 
 #else
 
-       ret = mv_inference_configure_model_open(infer, engine_config);
+    ret = mv_inference_configure_engine_open(infer, engine_config);
     if (ret != MEDIA_VISION_ERROR_NONE){
-        LOGE("Fail to configure model");
+        LOGE("Fail to configure engine and target");
         return ret;
     }
 
-    ret = mv_inference_configure_tensor_info_open(infer, engine_config);
+#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
+
+       MEDIA_VISION_FUNCTION_LEAVE();
+       return ret;
+}
+
+
+int mv_inference_prepare(mv_inference_h infer)
+{
+       MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
+       MEDIA_VISION_INSTANCE_CHECK(infer);
+
+       MEDIA_VISION_FUNCTION_ENTER();
+
+       int ret = MEDIA_VISION_ERROR_NONE;
+
+    mv_engine_config_h engine_config = mv_inference_get_engine_config(infer);
+
+#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
+
+       //ret = mv_inference_prepare_lic(infer);
+
+#else
+
+       ret = mv_inference_configure_model_open(infer, engine_config);
     if (ret != MEDIA_VISION_ERROR_NONE){
-        LOGE("Fail to configure tensor information");
+        LOGE("Fail to configure model");
         return ret;
     }
 
-    ret = mv_inference_configure_engine_open(infer, engine_config);
+    ret = mv_inference_configure_tensor_info_open(infer, engine_config);
     if (ret != MEDIA_VISION_ERROR_NONE){
-        LOGE("Fail to configure engine and target");
+        LOGE("Fail to configure tensor information");
         return ret;
     }
 
@@ -138,28 +162,6 @@ int mv_inference_configure(mv_inference_h infer, mv_engine_config_h engine_confi
                return ret;
        }
 
-#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
-
-       MEDIA_VISION_FUNCTION_LEAVE();
-       return ret;
-}
-
-
-int mv_inference_prepare(mv_inference_h infer)
-{
-       MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
-       MEDIA_VISION_INSTANCE_CHECK(infer);
-
-       MEDIA_VISION_FUNCTION_ENTER();
-
-       int ret = MEDIA_VISION_ERROR_NONE;
-
-#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
-
-       //ret = mv_inference_prepare_lic(infer);
-
-#else
-
        ret = mv_inference_prepare_open(infer);
 
 #endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */