Bring GetCustomProp patch 78/279678/1
authorKwanghoon Son <k.son@samsung.com>
Tue, 16 Aug 2022 04:24:41 +0000 (00:24 -0400)
committerKwanghoon Son <k.son@samsung.com>
Tue, 16 Aug 2022 04:24:41 +0000 (00:24 -0400)
This patch is from
https://review.tizen.org/gerrit/c/platform/core/multimedia/inference-engine-mlapi/+/272510
https://review.tizen.org/gerrit/c/platform/core/multimedia/inference-engine-mlapi/+/273028
https://review.tizen.org/gerrit/c/platform/core/multimedia/inference-engine-mlapi/+/274047

Change-Id: If7399a2be855b12c9bd7cdb1ef69fb55a3f6b0b3
Signed-off-by: Kwanghoon Son <k.son@samsung.com>
src/inference_engine_mlapi.cpp
src/inference_engine_mlapi_private.h

index b23ffe3..46fa41b 100644 (file)
@@ -293,13 +293,37 @@ namespace MLAPIImpl
                }
        }
 
-       const char *InferenceMLAPI::GetCustomProp()
+       std::string InferenceMLAPI::GetCustomProp()
        {
                if (mPluginType != INFERENCE_BACKEND_SNPE)
                        return "";
 
-               return mTargetDevice == INFERENCE_TARGET_CPU ? "RUNTIME:CPU" :
-                          mTargetDevice == INFERENCE_TARGET_GPU ? "RUNTIME:GPU" : "RUNTIME:DSP";
+               std::string custom;
+               if (mTargetDevice == INFERENCE_TARGET_CPU)
+                       custom = "Runtime:CPU";
+               else if (mTargetDevice == INFERENCE_TARGET_GPU)
+                       custom = "Runtime:GPU";
+               else
+                       custom = "Runtime:DSP";
+
+               if (!mOutputProperty.layers.empty())
+               {
+                       size_t layerSize = mOutputProperty.layers.size();
+                       custom += ",OutputTensor:";
+
+                       for (auto &layer : mOutputProperty.layers)
+                       {
+                               LOGI("output layer name = %s", layer.first.c_str());
+                               custom += layer.first;
+                               if (--layerSize > 0)
+                               {
+                                       custom += ";";
+                               }
+                       }
+                       custom += ",UserBuffer:true";
+               }
+
+               return custom;
        }
 
        int InferenceMLAPI::Load(std::vector<std::string> model_paths,
@@ -340,9 +364,11 @@ namespace MLAPIImpl
                                return ret;
                        }
                }
+               auto customOp = GetCustomProp();
+               LOGI("customOp: %s", customOp.c_str());
 
                int err = ml_single_open_full(&mSingle, model_str.c_str(), in_info, out_info,
-                                                                nnfw_type, nnfw_hw, GetCustomProp());
+                                                                nnfw_type, nnfw_hw, customOp.c_str());
                if (err != ML_ERROR_NONE) {
                        LOGE("Failed to request ml_single_open_full(%d).", err);
                        ml_tensors_info_destroy(in_info);
index 60d2570..07d57ef 100644 (file)
@@ -90,7 +90,7 @@ namespace MLAPIImpl
                bool IsFileReadable(const std::string& path);
                std::tuple<ml_nnfw_type_e, ml_nnfw_hw_e> GetNNFWInfo();
                std::string GetModelPath(const std::vector<std::string>& model_paths);
-               const char *GetCustomProp();
+               std::string GetCustomProp();
                int GetTensorInfo(std::map<std::string, int>& designated_layers,
                                                  std::map<std::string, inference_engine_tensor_buffer> &buffers,
                                                  ml_tensors_data_h& dataHandle, ml_tensors_info_h& infoHandle);