src: use ml_single_open_full api 79/268679/2
authorInki Dae <inki.dae@samsung.com>
Wed, 29 Dec 2021 09:23:42 +0000 (18:23 +0900)
committerInki Dae <inki.dae@samsung.com>
Thu, 30 Dec 2021 05:46:21 +0000 (14:46 +0900)
[Version] : 1.3.2-0
[Issue type] : bug fix

Replaced ml_single_open with ml_single_open_full api
for various target devices with SNPE engine.

In case of SNPE tensor filter of NNStreamer, target device
is decided by user-given custom property. So use
ml_single_open_full api which allows custom property for it.

Change-Id: I2a6f1ab2b619c59164e4043fcfb03dd0cea97ad6
Signed-off-by: Inki Dae <inki.dae@samsung.com>
packaging/inference-engine-mlapi.spec
src/inference_engine_mlapi.cpp
src/inference_engine_mlapi_private.h

index 230705de28b79205e9671395356118eac1694f10..97e668cc03031f6195b007d4f1ed4a14403b3d51 100644 (file)
@@ -1,6 +1,6 @@
 Name:       inference-engine-mlapi
 Summary:    ML Single API backend of NNStreamer for MediaVision
-Version:    0.3.1
+Version:    0.3.2
 Release:    0
 Group:      Multimedia/Libraries
 License:    Apache-2.0
index b9aeaf4bbe2570c44c6c20004bb8d440fac5cf34..e119e57a65bbbdeba1bd8f3ee81c0e2c93cf47d0 100644 (file)
@@ -277,6 +277,15 @@ namespace MLAPIImpl
                }
        }
 
+       const char *InferenceMLAPI::GetCustomProp()
+       {
+               if (mPluginType != INFERENCE_BACKEND_SNPE)
+                       return "";
+
+               return mTargetDevice == INFERENCE_TARGET_CPU ? "RUNTIME:CPU" :
+                          mTargetDevice == INFERENCE_TARGET_GPU ? "RUNTIME:GPU" : "RUNTIME:DSP";
+       }
+
        int InferenceMLAPI::Load(std::vector<std::string> model_paths,
                                                         inference_model_format_e model_format)
        {
@@ -314,8 +323,8 @@ namespace MLAPIImpl
                                return ret;
                }
 
-               int err = ml_single_open(&mSingle, model_str.c_str(), in_info, out_info,
-                                                                nnfw_type, nnfw_hw);
+               int err = ml_single_open_full(&mSingle, model_str.c_str(), in_info, out_info,
+                                                                nnfw_type, nnfw_hw, GetCustomProp());
                if (err != ML_ERROR_NONE) {
                        LOGE("Failed to request ml_single_open(%d).", err);
                        return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
index 98ab53a029d84e020bbeb424c2fe70c385b24d21..52dfaff86b86b536f2b483892970af8ff92766c3 100644 (file)
@@ -88,6 +88,7 @@ namespace MLAPIImpl
                bool IsFileReadable(const std::string& path);
                std::tuple<ml_nnfw_type_e, ml_nnfw_hw_e> GetNNFWInfo();
                std::string GetModelPath(const std::vector<std::string>& model_paths);
+               const char *GetCustomProp();
 
                int mPluginType;
                int mTargetDevice;