From 0a74119e8a45bee632fb564c5bc1d9fcc34c4b66 Mon Sep 17 00:00:00 2001 From: Wook Song Date: Thu, 30 Jan 2020 14:20:51 +0900 Subject: [PATCH] [Ext/Filter/OpenVino] Put the class definition in a new .hh file This patch puts the class definition of TensorFilterOpenVino in the header file, newly created, instead of the existing .cc file so that test cases for the OpenVino filter could exploit the TensorFilterOpenVino class. Signed-off-by: Wook Song --- .../tensor_filter/tensor_filter_openvino.cc | 64 +----------- .../tensor_filter/tensor_filter_openvino.hh | 116 +++++++++++++++++++++ 2 files changed, 117 insertions(+), 63 deletions(-) create mode 100644 ext/nnstreamer/tensor_filter/tensor_filter_openvino.hh diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_openvino.cc b/ext/nnstreamer/tensor_filter/tensor_filter_openvino.cc index f405fab..09e09e1 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_openvino.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_openvino.cc @@ -34,74 +34,12 @@ #include #include #include +#include "tensor_filter_openvino.hh" -const gchar *openvino_accl_support[] = { - ACCL_CPU_STR, - ACCL_NPU_MOVIDIUS_STR, - NULL -}; void init_filter_openvino (void) __attribute__ ((constructor)); void fini_filter_openvino (void) __attribute__ ((destructor)); -class TensorFilterOpenvino -{ -public: - enum RetVal - { - RetSuccess = 0, - RetEBusy = -EBUSY, - RetEInval = -EINVAL, - RetENoDev = -ENODEV, - RetEOverFlow = -EOVERFLOW, - }; - - static tensor_type convertFromIETypeStr (std::string type); - static InferenceEngine::Blob::Ptr convertGstTensorMemoryToBlobPtr ( - const InferenceEngine::TensorDesc tensorDesc, - const GstTensorMemory * gstTensor); - static bool isAcclDevSupported (std::vector &devsVector, - accl_hw hw); - - TensorFilterOpenvino (std::string path_model_xml, std::string path_model_bin); - ~TensorFilterOpenvino (); - - // TODO: Need to support other acceleration devices - int loadModel (accl_hw hw); - bool isModelLoaded () { - return _isLoaded; - } - - int getInputTensorDim (GstTensorsInfo * info); - int getOutputTensorDim (GstTensorsInfo * info); - int invoke (const GstTensorFilterProperties * prop, - const GstTensorMemory * input, GstTensorMemory * output); - std::string getPathModelXml (); - std::string getPathModelBin (); - - static const std::string extBin; - static const std::string extXml; - -private: - TensorFilterOpenvino (); - - InferenceEngine::Core _ieCore; - InferenceEngine::CNNNetReader _networkReaderCNN; - InferenceEngine::CNNNetwork _networkCNN; - InferenceEngine::InputsDataMap _inputsDataMap; - InferenceEngine::TensorDesc _inputTensorDescs[NNS_TENSOR_SIZE_LIMIT]; - InferenceEngine::OutputsDataMap _outputsDataMap; - InferenceEngine::TensorDesc _outputTensorDescs[NNS_TENSOR_SIZE_LIMIT]; - InferenceEngine::ExecutableNetwork _executableNet; - InferenceEngine::InferRequest _inferRequest; - static std::map _nnsAcclHwToOVDevMap; - - std::string _pathModelXml; - std::string _pathModelBin; - bool _isLoaded; - accl_hw _hw; -}; - std::map TensorFilterOpenvino::_nnsAcclHwToOVDevMap = { {ACCL_CPU, "CPU"}, {ACCL_NPU_MOVIDIUS, "MYRIAD"}, diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_openvino.hh b/ext/nnstreamer/tensor_filter/tensor_filter_openvino.hh new file mode 100644 index 0000000..e39e038 --- /dev/null +++ b/ext/nnstreamer/tensor_filter/tensor_filter_openvino.hh @@ -0,0 +1,116 @@ +/** + * GStreamer Tensor_Filter, OpenVino (DLDT) Module + * Copyright (C) 2019 Wook Song + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + */ +/** + * @file tensor_filter_openvino.hh + * @date 23 Dec 2019 + * @brief Tensor_filter subplugin for OpenVino (DLDT). + * @see http://github.com/nnsuite/nnstreamer + * @author Wook Song + * @bug No known bugs except for NYI items + * + * This is the per-NN-framework plugin (OpenVino) for tensor_filter. + * + * @note This header file is only for internal use. + * + * To Packagers: + * + * This should not to be exposed with the development packages to the application developers. + */ + +#ifndef __TENSOR_FILTER_OPENVINO_H__ +#define __TENSOR_FILTER_OPENVINO_H__ + +#include +#include +#include +#ifdef __OPENVINO_CPU_EXT__ +#include +#endif /* __OPENVINO_CPU_EXT__ */ +#include +#include +#include +#include + + +const gchar *openvino_accl_support[] = { + ACCL_CPU_STR, + ACCL_NPU_MOVIDIUS_STR, + NULL +}; + +class TensorFilterOpenvino +{ +public: + enum RetVal + { + RetSuccess = 0, + RetEBusy = -EBUSY, + RetEInval = -EINVAL, + RetENoDev = -ENODEV, + RetEOverFlow = -EOVERFLOW, + }; + + static tensor_type convertFromIETypeStr (std::string type); + static InferenceEngine::Blob::Ptr convertGstTensorMemoryToBlobPtr ( + const InferenceEngine::TensorDesc tensorDesc, + const GstTensorMemory * gstTensor); + static bool isAcclDevSupported (std::vector &devsVector, + accl_hw hw); + + TensorFilterOpenvino (std::string path_model_xml, std::string path_model_bin); + ~TensorFilterOpenvino (); + + // TODO: Need to support other acceleration devices + int loadModel (accl_hw hw); + bool isModelLoaded () { + return _isLoaded; + } + + int getInputTensorDim (GstTensorsInfo * info); + int getOutputTensorDim (GstTensorsInfo * info); + int invoke (const GstTensorFilterProperties * prop, + const GstTensorMemory * input, GstTensorMemory * output); + std::string getPathModelXml (); + void setPathModelXml (std::string pathXml); + std::string getPathModelBin (); + void setPathModelBin (std::string pathBin); + + static const std::string extBin; + static const std::string extXml; + +protected: + InferenceEngine::InputsDataMap _inputsDataMap; + InferenceEngine::OutputsDataMap _outputsDataMap; + +private: + TensorFilterOpenvino (); + + InferenceEngine::Core _ieCore; + InferenceEngine::CNNNetReader _networkReaderCNN; + InferenceEngine::CNNNetwork _networkCNN; + InferenceEngine::TensorDesc _inputTensorDescs[NNS_TENSOR_SIZE_LIMIT]; + InferenceEngine::TensorDesc _outputTensorDescs[NNS_TENSOR_SIZE_LIMIT]; + InferenceEngine::ExecutableNetwork _executableNet; + InferenceEngine::InferRequest _inferRequest; + static std::map _nnsAcclHwToOVDevMap; + + std::string _pathModelXml; + std::string _pathModelBin; + bool _isLoaded; + accl_hw _hw; +}; + +#endif // __TENSOR_FILTER_OPENVINO_H__ -- 2.7.4