Add ML Single API support of NNStreamer 41/234941/1
authorInki Dae <inki.dae@samsung.com>
Mon, 1 Jun 2020 02:16:53 +0000 (11:16 +0900)
committerInki Dae <inki.dae@samsung.com>
Mon, 1 Jun 2020 02:33:31 +0000 (11:33 +0900)
This patch adds ML Single API support of NNStreamer.

On MediaVision, NNFW(in-house NN Runtime) and Vivante NPU
will be controlled by inference-engine-mlapi backend
through ML Single API of NNStreamer.

As one more backends should be controlled by the inference-engine-mlapi
backend, this patch introduces a new function, SetPluginType,
which lets ML Single API know which tensor filter - NNFW or Vivante NPU -
should be used, and adds two backend types - Vivante and NNFW.

Change-Id: Iccf9fe97f5ec50c5c1bfd7bab48927e0f343f182
Signed-off-by: Inki Dae <inki.dae@samsung.com>
include/inference_engine_common.h
include/inference_engine_common_impl.h
include/inference_engine_type.h
src/inference_engine_common_impl.cpp
test/src/inference_engine_profiler.cpp
test/src/inference_engine_tc.cpp

index 7501744..be6b037 100755 (executable)
@@ -31,6 +31,16 @@ public:
     virtual ~IInferenceEngineCommon() {};
 
     /**
+     * @brief Set a tensor filter plugin type.
+     * @details See #inference_backend_type_e
+     *          This callback passes a tensor filter plugin type - NNFW or VIVANTE to a tensor filter plugin for NNStreamer.
+     *
+     * @since_tizen 6.0 (Optional)
+     * @param[in] type This could be one among plugin types enumerated on inference_backend_type_e.
+     */
+       virtual int SetPluginType(const int type = 0) { return type; }
+
+    /**
      * @brief Set target devices.
      * @details See #inference_target_type_e
      *          This callback passes given device types - CPU, GPU, CUSTOM or combinated one if a backend engine supports hybrid inferencea - to a backend engine.
index 08ccf52..7934236 100755 (executable)
@@ -229,7 +229,7 @@ public:
        int DumpProfileToFile(const std::string filename = "dump.txt");
 
 private:
-       int InitBackendEngine(const std::string &backend_path);
+       int InitBackendEngine(const std::string &backend_path, int backend_type);
        int CheckTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers);
        int CheckLayerProperty(inference_engine_layer_property &property);
 
index ef7e74f..33c656e 100644 (file)
@@ -38,6 +38,8 @@ typedef enum {
     INFERENCE_BACKEND_OPENCV,    /**< OpenCV */
     INFERENCE_BACKEND_TFLITE,    /**< TensorFlow-Lite */
     INFERENCE_BACKEND_ARMNN,     /**< ARMNN */
+    INFERENCE_BACKEND_VIVANTE,   /** < Vivante */
+    INFERENCE_BACKEND_NNFW,      /** < NNFW */
     INFERENCE_BACKEND_MAX        /**< Backend MAX */
 } inference_backend_type_e;
 
@@ -70,6 +72,7 @@ typedef enum {
     INFERENCE_MODEL_DARKNET,         /**< Darknet. *.cfg config file is needed. */
     INFERENCE_MODEL_DLDT,            /**< DLDT. *.xml config file is needed. */
     INFERENCE_MODEL_ONNX,            /**< ONNX */
+    INFERENCE_MODEL_VIVANTE,         /**< Vivante. model specific so library and nb model files are needed. */
     INFERENCE_MODEL_MAX
 } inference_model_format_e;
 
@@ -132,7 +135,8 @@ typedef struct _tensor_t {
  * @since_tizen 6.0
  */
 typedef struct _inference_engine_config {
-    std::string backend_name; /**< a backend name which could be one among supported backends(tflite, opencv, armnn, dldt) */
+    std::string backend_name; /**< a backend name which could be one among supported backends(tflite, opencv, armnn, dldt, nnstreamer) */
+       int backend_type; /**< a tensor filter plugin type for NNStreamer if a backend is NNStreamer. */
     int target_devices; /**< which device or devices to be targeted for inference. (Please, refer to inference_target_type_e) */
     // TODO.
 } inference_engine_config;
index f8b902d..ada716f 100755 (executable)
@@ -167,7 +167,7 @@ int InferenceEngineCommon::DumpProfileToFile(const std::string filename)
        return INFERENCE_ENGINE_ERROR_NONE;
 }
 
-int InferenceEngineCommon::InitBackendEngine(const std::string &backend_path)
+int InferenceEngineCommon::InitBackendEngine(const std::string &backend_path, int backend_type)
 {
        LOGI("lib: %s", backend_path.c_str());
        mBackendModule = dlopen(backend_path.c_str(), RTLD_NOW);
@@ -196,6 +196,15 @@ int InferenceEngineCommon::InitBackendEngine(const std::string &backend_path)
                return INFERENCE_ENGINE_ERROR_INTERNAL;
        }
 
+       // If a backend is nnstreamer then set a tensor filter plugin type.
+       if (backend_type == INFERENCE_BACKEND_NNFW || backend_type == INFERENCE_BACKEND_VIVANTE) {
+               int ret = mBackendHandle->SetPluginType(backend_type);
+               if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+                       LOGE("Failed to set a tensor filter plugin.");
+                       return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+               }
+       }
+
        return INFERENCE_ENGINE_ERROR_NONE;
 }
 
@@ -220,7 +229,7 @@ int InferenceEngineCommon::BindBackend(inference_engine_config *config)
 
     std::string backendLibName = "libinference-engine-" + config->backend_name + ".so";
 
-       int ret = InitBackendEngine(backendLibName);
+       int ret = InitBackendEngine(backendLibName, config->backend_type);
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                return ret;
        }
@@ -256,12 +265,14 @@ int InferenceEngineCommon::BindBackend(int backend_type)
        std::string backendNameTable[INFERENCE_BACKEND_MAX] = {
                [INFERENCE_BACKEND_OPENCV] = "opencv",
                [INFERENCE_BACKEND_TFLITE] = "tflite",
-               [INFERENCE_BACKEND_ARMNN] = "armnn"
+               [INFERENCE_BACKEND_ARMNN] = "armnn",
+               [INFERENCE_BACKEND_VIVANTE] = "nnstreamer",
+               [INFERENCE_BACKEND_NNFW] = "nnstreamer"
        };
 
     std::string backendLibName = "libinference-engine-" + backendNameTable[backend_type] + ".so";
 
-       int ret = InitBackendEngine(backendLibName);
+       int ret = InitBackendEngine(backendLibName, backend_type);
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                return ret;
        }
index d46ea1b..949d729 100644 (file)
@@ -78,6 +78,7 @@ TEST_P(InferenceEngineTfliteTest, Inference)
        std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
        inference_engine_config config = {
                .backend_name = backend_name,
+               .backend_type = 0,
                .target_devices = target_devices
        };
 
@@ -253,6 +254,7 @@ TEST_P(InferenceEngineCaffeTest, Inference)
        std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
        inference_engine_config config = {
                .backend_name = backend_name,
+               .backend_type = 0,
                .target_devices = target_devices
        };
 
@@ -430,6 +432,7 @@ TEST_P(InferenceEngineDldtTest, Inference)
        std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
        inference_engine_config config = {
                .backend_name = backend_name,
+               .backend_type = 0,
                .target_devices = target_devices
        };
 
index 2738c5c..d7a37af 100644 (file)
@@ -52,13 +52,13 @@ class InferenceEngineTestCase_G7 : public testing::TestWithParam<ParamType_One_I
 class InferenceEngineTestCase_G8 : public testing::TestWithParam<ParamType_One_Int> { };
 
 static auto InferenceEngineInit_One_Param = [](InferenceEngineCommon *engine, std::string &backend_name) -> int {
-       inference_engine_config config = { backend_name, 0 };
+       inference_engine_config config = { backend_name, 0, 0 };
 
        return engine->BindBackend(&config);
 };
 
 static auto InferenceEngineInit_Two_Params = [](InferenceEngineCommon *engine, std::string &backend_name, int &target_devices) -> int {
-       inference_engine_config config = { backend_name, target_devices };
+       inference_engine_config config = { backend_name, 0, target_devices };
 
        int ret = engine->BindBackend(&config);
        if (ret != INFERENCE_ENGINE_ERROR_NONE)