From: Tae-Young Chung Date: Fri, 18 Mar 2022 05:41:26 +0000 (+0900) Subject: Update GetCustomProp() to get outputLayer as well as Runtime X-Git-Tag: submit/tizen_6.5/20220404.012322~2 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=26c75a374dcacbb81baca620cb8c3662fdfef0f2;p=platform%2Fcore%2Fmultimedia%2Finference-engine-mlapi.git Update GetCustomProp() to get outputLayer as well as Runtime [Version] 0.0.3-0 [Issue type] update Change-Id: Ia8e9a80392e8dbaf1e0d7373743a179635ea546e Signed-off-by: Tae-Young Chung --- diff --git a/packaging/inference-engine-mlapi.spec b/packaging/inference-engine-mlapi.spec index 3411032..346bd26 100644 --- a/packaging/inference-engine-mlapi.spec +++ b/packaging/inference-engine-mlapi.spec @@ -1,6 +1,6 @@ Name: inference-engine-mlapi Summary: ML Single API backend of NNStreamer for MediaVision -Version: 0.0.2 +Version: 0.0.3 Release: 0 Group: Multimedia/Libraries License: Apache-2.0 diff --git a/src/inference_engine_mlapi.cpp b/src/inference_engine_mlapi.cpp index 51c9b43..e0079a9 100644 --- a/src/inference_engine_mlapi.cpp +++ b/src/inference_engine_mlapi.cpp @@ -290,13 +290,33 @@ namespace MLAPIImpl } } - const char* InferenceMLAPI::GetCustomProp() + std::string InferenceMLAPI::GetCustomProp() { if (mPluginType != INFERENCE_BACKEND_NPU_SNPE) return ""; - return mTargetDevice == INFERENCE_TARGET_CPU ? "RUNTIME:CPU" : - mTargetDevice == INFERENCE_TARGET_GPU ? "RUNTIME:GPU" : "RUNTIME:DSP"; + std::string custom; + if (mTargetDevice == INFERENCE_TARGET_CPU) + custom = "Runtime:CPU"; + else if (mTargetDevice == INFERENCE_TARGET_GPU) + custom = "Runtime:GPU"; + else + custom = "Runtime:DSP"; + + if (!mOutputProperty.layers.empty()){ + size_t layerSize = mOutputProperty.layers.size(); + custom += ",OutputLayer:"; + + for (auto& layer : mOutputProperty.layers) { + LOGI("output layer name = %s", layer.first.c_str()); + custom += layer.first; + if (--layerSize > 0) { + custom += ";"; + } + } + } + + return custom; } int InferenceMLAPI::Load(std::vector model_paths, @@ -324,8 +344,11 @@ namespace MLAPIImpl ml_tensors_info_h in_info = NULL, out_info = NULL; + auto customOp = GetCustomProp(); + LOGI("customOp: %s", customOp.c_str()); + int err = ml_single_open_full(&mSingle, model_str.c_str(), in_info, out_info, - nnfw_type, nnfw_hw, GetCustomProp()); + nnfw_type, nnfw_hw, customOp.c_str()); if (err != ML_ERROR_NONE) { LOGE("Failed to request ml_single_open_full(%d).", err); return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; diff --git a/src/inference_engine_mlapi_private.h b/src/inference_engine_mlapi_private.h index df5b317..1a3df04 100644 --- a/src/inference_engine_mlapi_private.h +++ b/src/inference_engine_mlapi_private.h @@ -89,7 +89,7 @@ namespace MLAPIImpl bool IsFileReadable(const std::string& path); std::tuple GetNNFWInfo(); std::string GetModelPath(const std::vector& model_paths); - const char* GetCustomProp(); + std::string GetCustomProp(); #else int ConvertTensorType(int tensor_type); #endif