Update GetCustomProp() to get outputLayer as well as Runtime 10/272510/6
authorTae-Young Chung <ty83.chung@samsung.com>
Fri, 18 Mar 2022 05:41:26 +0000 (14:41 +0900)
committerTae-Young Chung <ty83.chung@samsung.com>
Mon, 21 Mar 2022 07:01:42 +0000 (07:01 +0000)
[Version] 0.0.3-0
[Issue type] update

Change-Id: Ia8e9a80392e8dbaf1e0d7373743a179635ea546e
Signed-off-by: Tae-Young Chung <ty83.chung@samsung.com>
packaging/inference-engine-mlapi.spec
src/inference_engine_mlapi.cpp
src/inference_engine_mlapi_private.h

index 3411032ee0ee9d7951f534e338579cd47dcbddf6..346bd26c927f52e01ed5d0068915b0764626bbff 100644 (file)
@@ -1,6 +1,6 @@
 Name:       inference-engine-mlapi
 Summary:    ML Single API backend of NNStreamer for MediaVision
-Version:    0.0.2
+Version:    0.0.3
 Release:    0
 Group:      Multimedia/Libraries
 License:    Apache-2.0
index 51c9b43f474e1539617cc91642554e6f0932a442..e0079a96dce40387f76737bdbd7c39d23a500dfa 100644 (file)
@@ -290,13 +290,33 @@ namespace MLAPIImpl
                }
        }
 
-       const char* InferenceMLAPI::GetCustomProp()
+       std::string InferenceMLAPI::GetCustomProp()
        {
                if (mPluginType != INFERENCE_BACKEND_NPU_SNPE)
                        return "";
 
-               return mTargetDevice == INFERENCE_TARGET_CPU ? "RUNTIME:CPU" :
-                                  mTargetDevice == INFERENCE_TARGET_GPU ? "RUNTIME:GPU" : "RUNTIME:DSP";
+               std::string custom;
+               if (mTargetDevice == INFERENCE_TARGET_CPU)
+                       custom = "Runtime:CPU";
+               else if (mTargetDevice == INFERENCE_TARGET_GPU)
+                       custom = "Runtime:GPU";
+               else
+                       custom = "Runtime:DSP";
+
+               if (!mOutputProperty.layers.empty()){
+                       size_t layerSize = mOutputProperty.layers.size();
+                       custom += ",OutputLayer:";
+
+                       for (auto& layer : mOutputProperty.layers) {
+                               LOGI("output layer name = %s", layer.first.c_str());
+                               custom += layer.first;
+                               if (--layerSize > 0) {
+                                       custom += ";";
+                               }
+                       }
+               }
+
+               return custom;
        }
 
        int InferenceMLAPI::Load(std::vector<std::string> model_paths,
@@ -324,8 +344,11 @@ namespace MLAPIImpl
 
                ml_tensors_info_h in_info = NULL, out_info = NULL;
 
+               auto customOp = GetCustomProp();
+               LOGI("customOp: %s", customOp.c_str());
+
                int err = ml_single_open_full(&mSingle, model_str.c_str(), in_info, out_info,
-                                                                nnfw_type, nnfw_hw, GetCustomProp());
+                                                                nnfw_type, nnfw_hw, customOp.c_str());
                if (err != ML_ERROR_NONE) {
                        LOGE("Failed to request ml_single_open_full(%d).", err);
                        return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
index df5b3174f799b1af0d07b6687eb61bb0a01f344f..1a3df048c09f3a4919115bd6ecdc466770fe52dd 100644 (file)
@@ -89,7 +89,7 @@ namespace MLAPIImpl
                bool IsFileReadable(const std::string& path);
                std::tuple<ml_nnfw_type_e, ml_nnfw_hw_e> GetNNFWInfo();
                std::string GetModelPath(const std::vector<std::string>& model_paths);
-               const char* GetCustomProp();
+               std::string GetCustomProp();
 #else
                int ConvertTensorType(int tensor_type);
 #endif