From: Inki Dae Date: Thu, 11 Jun 2020 07:29:14 +0000 (+0900) Subject: Set model path according to MLAPI backend X-Git-Tag: submit/tizen/20200626.050805~3 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=c08e5496c150d2c667cb4caf9bce0f0d02dcff6e;p=platform%2Fcore%2Fmultimedia%2Finference-engine-mlapi.git Set model path according to MLAPI backend NNFW - in-house NN Runtime - needs NNPackage type of package which is a directory containing a model file and its meta file. For more details, you can refer to https://github.com/Samsung/ONE/tree/master/nnpackage/examples/one_op_in_tflite ML Single API framework of NNStreamer receives a full path of a given model file from user - in our case, inference-engine-mlapi backend - and find metadata in the directory that the a given model file is located. So inference-engine-mlapi backend should pass a full path of the a given model file to ML Single API framework. Change-Id: I6bdd871d5b683dbd6e60fce0f6dbd052985cd514 Signed-off-by: Inki Dae --- diff --git a/src/inference_engine_mlapi.cpp b/src/inference_engine_mlapi.cpp index d6d4706..ddb6507 100644 --- a/src/inference_engine_mlapi.cpp +++ b/src/inference_engine_mlapi.cpp @@ -103,11 +103,7 @@ namespace MLAPIImpl { LOGI("ENTER"); - // ML Single API of MLAPI requires model_paths rule like below, - // "so library file path,nb model file path" or vise versa. - std::string model_str(model_paths[0] + "," + model_paths[1]); - - LOGI("Model name = %s", model_str.c_str()); + std::string model_str(""); // TODO. Set NNFW backend type and HW type properly. @@ -128,9 +124,14 @@ namespace MLAPIImpl LOGE("Invalid target device type."); return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; } + + // ML Single API of MLAPI requires model_paths rule like below, + // "so library file path,nb model file path" or vise versa. + model_str += model_paths[0] + "," + model_paths[1]; break; case INFERENCE_BACKEND_NNFW: nnfw_type = ML_NNFW_TYPE_NNFW; + if (mTargetDevice == INFERENCE_TARGET_CPU) { nnfw_hw = ML_NNFW_HW_CPU_NEON; LOGI("Target device is NEON."); @@ -141,6 +142,8 @@ namespace MLAPIImpl LOGE("Invalid inference target device type."); return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; } + + model_str += model_paths[0]; LOGI("NNFW tensor filter will be used."); break; // TODO. @@ -149,6 +152,11 @@ namespace MLAPIImpl return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; } + LOGI("Model name = %s", model_str.c_str()); + + // TODO. create ml_tensor_info for input and output tensor and pass + // them as parameters of ml_single_open function. + int ret = ml_single_open(&mSingle, model_str.c_str(), NULL, NULL, nnfw_type, nnfw_hw); if (ret != ML_ERROR_NONE) {