[Ext/Filter/OpenVino] Put the class definition in a new .hh file
authorWook Song <wook16.song@samsung.com>
Thu, 30 Jan 2020 05:20:51 +0000 (14:20 +0900)
committerMyungJoo Ham <myungjoo.ham@samsung.com>
Wed, 5 Feb 2020 09:35:56 +0000 (01:35 -0800)
This patch puts the class definition of TensorFilterOpenVino in the
header file, newly created, instead of the existing .cc file so that
test cases for the OpenVino filter could exploit the
TensorFilterOpenVino class.

Signed-off-by: Wook Song <wook16.song@samsung.com>
ext/nnstreamer/tensor_filter/tensor_filter_openvino.cc
ext/nnstreamer/tensor_filter/tensor_filter_openvino.hh [new file with mode: 0644]

index f405fab..09e09e1 100644 (file)
 #include <iostream>
 #include <string>
 #include <vector>
+#include "tensor_filter_openvino.hh"
 
-const gchar *openvino_accl_support[] = {
-  ACCL_CPU_STR,
-  ACCL_NPU_MOVIDIUS_STR,
-  NULL
-};
 
 void init_filter_openvino (void) __attribute__ ((constructor));
 void fini_filter_openvino (void) __attribute__ ((destructor));
 
-class TensorFilterOpenvino
-{
-public:
-  enum RetVal
-  {
-    RetSuccess = 0,
-    RetEBusy = -EBUSY,
-    RetEInval = -EINVAL,
-    RetENoDev = -ENODEV,
-    RetEOverFlow = -EOVERFLOW,
-  };
-
-  static tensor_type convertFromIETypeStr (std::string type);
-  static InferenceEngine::Blob::Ptr convertGstTensorMemoryToBlobPtr (
-      const InferenceEngine::TensorDesc tensorDesc,
-      const GstTensorMemory * gstTensor);
-  static bool isAcclDevSupported (std::vector<std::string> &devsVector,
-      accl_hw hw);
-
-  TensorFilterOpenvino (std::string path_model_xml, std::string path_model_bin);
-  ~TensorFilterOpenvino ();
-
-  // TODO: Need to support other acceleration devices
-  int loadModel (accl_hw hw);
-  bool isModelLoaded () {
-    return _isLoaded;
-  }
-
-  int getInputTensorDim (GstTensorsInfo * info);
-  int getOutputTensorDim (GstTensorsInfo * info);
-  int invoke (const GstTensorFilterProperties * prop,
-      const GstTensorMemory * input, GstTensorMemory * output);
-  std::string getPathModelXml ();
-  std::string getPathModelBin ();
-
-  static const std::string extBin;
-  static const std::string extXml;
-
-private:
-  TensorFilterOpenvino ();
-
-  InferenceEngine::Core _ieCore;
-  InferenceEngine::CNNNetReader _networkReaderCNN;
-  InferenceEngine::CNNNetwork _networkCNN;
-  InferenceEngine::InputsDataMap _inputsDataMap;
-  InferenceEngine::TensorDesc _inputTensorDescs[NNS_TENSOR_SIZE_LIMIT];
-  InferenceEngine::OutputsDataMap _outputsDataMap;
-  InferenceEngine::TensorDesc _outputTensorDescs[NNS_TENSOR_SIZE_LIMIT];
-  InferenceEngine::ExecutableNetwork _executableNet;
-  InferenceEngine::InferRequest _inferRequest;
-  static std::map<accl_hw, std::string> _nnsAcclHwToOVDevMap;
-
-  std::string _pathModelXml;
-  std::string _pathModelBin;
-  bool _isLoaded;
-  accl_hw _hw;
-};
-
 std::map<accl_hw, std::string> TensorFilterOpenvino::_nnsAcclHwToOVDevMap = {
     {ACCL_CPU, "CPU"},
     {ACCL_NPU_MOVIDIUS, "MYRIAD"},
diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_openvino.hh b/ext/nnstreamer/tensor_filter/tensor_filter_openvino.hh
new file mode 100644 (file)
index 0000000..e39e038
--- /dev/null
@@ -0,0 +1,116 @@
+/**
+ * GStreamer Tensor_Filter, OpenVino (DLDT) Module
+ * Copyright (C) 2019 Wook Song <wook16.song@samsung.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ */
+/**
+ * @file    tensor_filter_openvino.hh
+ * @date    23 Dec 2019
+ * @brief   Tensor_filter subplugin for OpenVino (DLDT).
+ * @see     http://github.com/nnsuite/nnstreamer
+ * @author  Wook Song <wook16.song@samsung.com>
+ * @bug     No known bugs except for NYI items
+ *
+ * This is the per-NN-framework plugin (OpenVino) for tensor_filter.
+ *
+ * @note This header file is only for internal use.
+ *
+ * To Packagers:
+ *
+ * This should not to be exposed with the development packages to the application developers.
+ */
+
+#ifndef __TENSOR_FILTER_OPENVINO_H__
+#define __TENSOR_FILTER_OPENVINO_H__
+
+#include <glib.h>
+#include <nnstreamer_plugin_api_filter.h>
+#include <tensor_common.h>
+#ifdef __OPENVINO_CPU_EXT__
+#include <ext_list.hpp>
+#endif /* __OPENVINO_CPU_EXT__ */
+#include <inference_engine.hpp>
+#include <iostream>
+#include <string>
+#include <vector>
+
+
+const gchar *openvino_accl_support[] = {
+  ACCL_CPU_STR,
+  ACCL_NPU_MOVIDIUS_STR,
+  NULL
+};
+
+class TensorFilterOpenvino
+{
+public:
+  enum RetVal
+  {
+    RetSuccess = 0,
+    RetEBusy = -EBUSY,
+    RetEInval = -EINVAL,
+    RetENoDev = -ENODEV,
+    RetEOverFlow = -EOVERFLOW,
+  };
+
+  static tensor_type convertFromIETypeStr (std::string type);
+  static InferenceEngine::Blob::Ptr convertGstTensorMemoryToBlobPtr (
+      const InferenceEngine::TensorDesc tensorDesc,
+      const GstTensorMemory * gstTensor);
+  static bool isAcclDevSupported (std::vector<std::string> &devsVector,
+      accl_hw hw);
+
+  TensorFilterOpenvino (std::string path_model_xml, std::string path_model_bin);
+  ~TensorFilterOpenvino ();
+
+  // TODO: Need to support other acceleration devices
+  int loadModel (accl_hw hw);
+  bool isModelLoaded () {
+    return _isLoaded;
+  }
+
+  int getInputTensorDim (GstTensorsInfo * info);
+  int getOutputTensorDim (GstTensorsInfo * info);
+  int invoke (const GstTensorFilterProperties * prop,
+      const GstTensorMemory * input, GstTensorMemory * output);
+  std::string getPathModelXml ();
+  void setPathModelXml (std::string pathXml);
+  std::string getPathModelBin ();
+  void setPathModelBin (std::string pathBin);
+
+  static const std::string extBin;
+  static const std::string extXml;
+
+protected:
+  InferenceEngine::InputsDataMap _inputsDataMap;
+  InferenceEngine::OutputsDataMap _outputsDataMap;
+
+private:
+  TensorFilterOpenvino ();
+
+  InferenceEngine::Core _ieCore;
+  InferenceEngine::CNNNetReader _networkReaderCNN;
+  InferenceEngine::CNNNetwork _networkCNN;
+  InferenceEngine::TensorDesc _inputTensorDescs[NNS_TENSOR_SIZE_LIMIT];
+  InferenceEngine::TensorDesc _outputTensorDescs[NNS_TENSOR_SIZE_LIMIT];
+  InferenceEngine::ExecutableNetwork _executableNet;
+  InferenceEngine::InferRequest _inferRequest;
+  static std::map<accl_hw, std::string> _nnsAcclHwToOVDevMap;
+
+  std::string _pathModelXml;
+  std::string _pathModelBin;
+  bool _isLoaded;
+  accl_hw _hw;
+};
+
+#endif // __TENSOR_FILTER_OPENVINO_H__