Set model path according to MLAPI backend 63/235963/3
authorInki Dae <inki.dae@samsung.com>
Thu, 11 Jun 2020 07:29:14 +0000 (16:29 +0900)
committerInki Dae <inki.dae@samsung.com>
Thu, 11 Jun 2020 11:22:41 +0000 (20:22 +0900)
NNFW - in-house NN Runtime - needs NNPackage type of package
which is a directory containing a model file and its meta file.
For more details, you can refer to
https://github.com/Samsung/ONE/tree/master/nnpackage/examples/one_op_in_tflite

ML Single API framework of NNStreamer receives a full path of a given model file
from user - in our case, inference-engine-mlapi backend - and find metadata
in the directory that the a given model file is located.

So inference-engine-mlapi backend should pass a full path of
the a given model file to ML Single API framework.

Change-Id: I6bdd871d5b683dbd6e60fce0f6dbd052985cd514
Signed-off-by: Inki Dae <inki.dae@samsung.com>
src/inference_engine_mlapi.cpp

index d6d4706408883f27eba9bfeaa782ed12fb388cc8..ddb650707dd1622500cdbc2aff067932c940d6b3 100644 (file)
@@ -103,11 +103,7 @@ namespace MLAPIImpl
        {
                LOGI("ENTER");
 
-               // ML Single API of MLAPI requires model_paths rule like below,
-               // "so library file path,nb model file path" or vise versa.
-               std::string model_str(model_paths[0] + "," + model_paths[1]);
-
-               LOGI("Model name = %s", model_str.c_str());
+               std::string model_str("");
 
                // TODO. Set NNFW backend type and HW type properly.
 
@@ -128,9 +124,14 @@ namespace MLAPIImpl
                                LOGE("Invalid target device type.");
                                return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
                        }
+
+                       // ML Single API of MLAPI requires model_paths rule like below,
+                       // "so library file path,nb model file path" or vise versa.
+                       model_str += model_paths[0] + "," + model_paths[1];
                        break;
                case INFERENCE_BACKEND_NNFW:
                        nnfw_type = ML_NNFW_TYPE_NNFW;
+
                        if (mTargetDevice == INFERENCE_TARGET_CPU) {
                                nnfw_hw = ML_NNFW_HW_CPU_NEON;
                                LOGI("Target device is NEON.");
@@ -141,6 +142,8 @@ namespace MLAPIImpl
                                LOGE("Invalid inference target device type.");
                                return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
                        }
+
+                       model_str += model_paths[0];
                        LOGI("NNFW tensor filter will be used.");
                        break;
                // TODO.
@@ -149,6 +152,11 @@ namespace MLAPIImpl
                        return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
                }
 
+               LOGI("Model name = %s", model_str.c_str());
+
+               // TODO. create ml_tensor_info for input and output tensor and pass
+               //               them as parameters of ml_single_open function.
+
                int ret = ml_single_open(&mSingle, model_str.c_str(), NULL, NULL,
                                                                 nnfw_type, nnfw_hw);
                if (ret != ML_ERROR_NONE) {