mv_inference: use new Load callback and drop Init callback
authorInki Dae <inki.dae@samsung.com>
Wed, 5 Feb 2020 07:06:28 +0000 (16:06 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 14 Apr 2020 00:40:31 +0000 (09:40 +0900)
This patch replaces Load callback with a new version and
drops callback because what Init callback does is just to
bind a backend engine library with a given backend name so
we can change the Init callback to BindBackend to clarify.

Change-Id: I31bc6a0d8f753ba52babe721f3e38181487b191b
Signed-off-by: Inki Dae <inki.dae@samsung.com>
mv_inference/inference/src/Inference.cpp

index 618a5034991d5dc730826a01d3bdf6aa43b9a15f..6010a97aba4516e1da8b181d9a805828f360d46a 100644 (file)
@@ -222,6 +222,8 @@ int Inference::Prepare()
         .backend_name = backendName,
         .target_devices = INFERENCE_TARGET_CPU,
     };
+
+    // Create backend class object.
        mBackend = new InferenceEngineVision(&config);
 
        if (!mBackend) {
@@ -229,13 +231,12 @@ int Inference::Prepare()
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
        }
 
-       ret = mBackend->Init(mConfig.mConfigFilePath,
-                                       mConfig.mWeightFilePath,
-                                       mConfig.mUserFilePath);
-
-       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-               return ConvertEngineErrorToVisionError(ret);
-       }
+    // Bind backend library.
+    ret = mBackend->BindBackend(&config);
+    if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+        LOGE("Fail to bind backend library.(%d)", mConfig.mBackedType);
+        return MEDIA_VISION_ERROR_INVALID_OPERATION;
+    }
 
        // Input Tensor Param
        mBackend->SetInputTensorParamInput(mConfig.mTensorInfo.width,
@@ -254,8 +255,15 @@ int Inference::Prepare()
 
        mBackend->SetOutputTensorParamNodes(mConfig.mOutputNodeNames);
 
-       // load model
-       ret = mBackend->Load();
+       // Add model files to load.
+    // TODO. model file and its corresponding label file should be added by
+    // user request.
+    std::vector<std::string> models;
+    models.push_back(mConfig.mWeightFilePath);
+    models.push_back(mConfig.mUserFilePath);
+
+    // Request model loading to backend engine.
+    ret = mBackend->Load(models, 1);
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                delete mBackend;
                LOGE("Fail to load model");
@@ -267,7 +275,7 @@ int Inference::Prepare()
 
        // target type
        // foreach supported??
-       mBackend->SetTargetDevice(mConfig.mTargetType);
+       mBackend->SetTargetDevice(config.target_devices);
        LOGE("LEAVE");
        return MEDIA_VISION_ERROR_NONE;
 }