From 1181e97cf26aae40cf357bb3d84b325a77da4e2b Mon Sep 17 00:00:00 2001 From: Kwanghoon Son Date: Tue, 16 Aug 2022 00:24:41 -0400 Subject: [PATCH] Bring GetCustomProp patch This patch is from https://review.tizen.org/gerrit/c/platform/core/multimedia/inference-engine-mlapi/+/272510 https://review.tizen.org/gerrit/c/platform/core/multimedia/inference-engine-mlapi/+/273028 https://review.tizen.org/gerrit/c/platform/core/multimedia/inference-engine-mlapi/+/274047 Change-Id: If7399a2be855b12c9bd7cdb1ef69fb55a3f6b0b3 Signed-off-by: Kwanghoon Son --- src/inference_engine_mlapi.cpp | 34 ++++++++++++++++++++++++++++++---- src/inference_engine_mlapi_private.h | 2 +- 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/src/inference_engine_mlapi.cpp b/src/inference_engine_mlapi.cpp index b23ffe3..46fa41b 100644 --- a/src/inference_engine_mlapi.cpp +++ b/src/inference_engine_mlapi.cpp @@ -293,13 +293,37 @@ namespace MLAPIImpl } } - const char *InferenceMLAPI::GetCustomProp() + std::string InferenceMLAPI::GetCustomProp() { if (mPluginType != INFERENCE_BACKEND_SNPE) return ""; - return mTargetDevice == INFERENCE_TARGET_CPU ? "RUNTIME:CPU" : - mTargetDevice == INFERENCE_TARGET_GPU ? "RUNTIME:GPU" : "RUNTIME:DSP"; + std::string custom; + if (mTargetDevice == INFERENCE_TARGET_CPU) + custom = "Runtime:CPU"; + else if (mTargetDevice == INFERENCE_TARGET_GPU) + custom = "Runtime:GPU"; + else + custom = "Runtime:DSP"; + + if (!mOutputProperty.layers.empty()) + { + size_t layerSize = mOutputProperty.layers.size(); + custom += ",OutputTensor:"; + + for (auto &layer : mOutputProperty.layers) + { + LOGI("output layer name = %s", layer.first.c_str()); + custom += layer.first; + if (--layerSize > 0) + { + custom += ";"; + } + } + custom += ",UserBuffer:true"; + } + + return custom; } int InferenceMLAPI::Load(std::vector model_paths, @@ -340,9 +364,11 @@ namespace MLAPIImpl return ret; } } + auto customOp = GetCustomProp(); + LOGI("customOp: %s", customOp.c_str()); int err = ml_single_open_full(&mSingle, model_str.c_str(), in_info, out_info, - nnfw_type, nnfw_hw, GetCustomProp()); + nnfw_type, nnfw_hw, customOp.c_str()); if (err != ML_ERROR_NONE) { LOGE("Failed to request ml_single_open_full(%d).", err); ml_tensors_info_destroy(in_info); diff --git a/src/inference_engine_mlapi_private.h b/src/inference_engine_mlapi_private.h index 60d2570..07d57ef 100644 --- a/src/inference_engine_mlapi_private.h +++ b/src/inference_engine_mlapi_private.h @@ -90,7 +90,7 @@ namespace MLAPIImpl bool IsFileReadable(const std::string& path); std::tuple GetNNFWInfo(); std::string GetModelPath(const std::vector& model_paths); - const char *GetCustomProp(); + std::string GetCustomProp(); int GetTensorInfo(std::map& designated_layers, std::map &buffers, ml_tensors_data_h& dataHandle, ml_tensors_info_h& infoHandle); -- 2.7.4