1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
5 // Copyright (C) 2018, Intel Corporation, all rights reserved.
6 // Third party copyrights are property of their respective owners.
8 #ifndef __OPENCV_DNN_OP_INF_ENGINE_HPP__
9 #define __OPENCV_DNN_OP_INF_ENGINE_HPP__
11 #include "opencv2/core/cvdef.h"
13 #ifdef HAVE_INF_ENGINE
14 #if defined(__GNUC__) && __GNUC__ >= 5
15 //#pragma GCC diagnostic push
16 #pragma GCC diagnostic ignored "-Wsuggest-override"
18 #include <inference_engine.hpp>
19 #if defined(__GNUC__) && __GNUC__ >= 5
20 //#pragma GCC diagnostic pop
22 #endif // HAVE_INF_ENGINE
24 namespace cv { namespace dnn {
26 #ifdef HAVE_INF_ENGINE
28 class InfEngineBackendNet : public InferenceEngine::ICNNNetwork
31 InfEngineBackendNet();
33 InfEngineBackendNet(InferenceEngine::CNNNetwork& net);
35 virtual void Release() noexcept CV_OVERRIDE;
37 void setPrecision(InferenceEngine::Precision p) noexcept;
39 virtual InferenceEngine::Precision getPrecision() noexcept;
41 virtual InferenceEngine::Precision getPrecision() const noexcept;
43 virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) noexcept /*CV_OVERRIDE*/;
45 virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) const noexcept /*CV_OVERRIDE*/;
47 virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) noexcept /*CV_OVERRIDE*/;
49 virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) const noexcept /*CV_OVERRIDE*/;
51 virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) noexcept;
53 virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) const noexcept;
55 virtual void getName(char *pName, size_t len) noexcept;
57 virtual void getName(char *pName, size_t len) const noexcept;
59 virtual const std::string& getName() const noexcept;
61 virtual size_t layerCount() noexcept;
63 virtual size_t layerCount() const noexcept;
65 virtual InferenceEngine::DataPtr& getData(const char *dname) noexcept CV_OVERRIDE;
67 virtual void addLayer(const InferenceEngine::CNNLayerPtr &layer) noexcept CV_OVERRIDE;
69 virtual InferenceEngine::StatusCode addOutput(const std::string &layerName,
70 size_t outputIndex = 0,
71 InferenceEngine::ResponseDesc *resp = nullptr) noexcept;
73 virtual InferenceEngine::StatusCode getLayerByName(const char *layerName,
74 InferenceEngine::CNNLayerPtr &out,
75 InferenceEngine::ResponseDesc *resp) noexcept;
77 virtual InferenceEngine::StatusCode getLayerByName(const char *layerName,
78 InferenceEngine::CNNLayerPtr &out,
79 InferenceEngine::ResponseDesc *resp) const noexcept;
81 virtual void setTargetDevice(InferenceEngine::TargetDevice device) noexcept CV_OVERRIDE;
83 virtual InferenceEngine::TargetDevice getTargetDevice() noexcept;
85 virtual InferenceEngine::TargetDevice getTargetDevice() const noexcept;
87 virtual InferenceEngine::StatusCode setBatchSize(const size_t size) noexcept CV_OVERRIDE;
89 virtual size_t getBatchSize() const noexcept CV_OVERRIDE;
91 void init(int targetId);
93 void addBlobs(const std::vector<Ptr<BackendWrapper> >& wrappers);
100 std::vector<InferenceEngine::CNNLayerPtr> layers;
101 InferenceEngine::InputsDataMap inputs;
102 InferenceEngine::OutputsDataMap outputs;
103 InferenceEngine::BlobMap inpBlobs;
104 InferenceEngine::BlobMap outBlobs;
105 InferenceEngine::BlobMap allBlobs;
106 InferenceEngine::TargetDevice targetDevice;
107 InferenceEngine::Precision precision;
108 InferenceEngine::InferenceEnginePluginPtr enginePtr;
109 InferenceEngine::InferencePlugin plugin;
110 InferenceEngine::ExecutableNetwork netExec;
111 InferenceEngine::InferRequest infRequest;
115 void initPlugin(InferenceEngine::ICNNNetwork& net);
118 class InfEngineBackendNode : public BackendNode
121 InfEngineBackendNode(const InferenceEngine::CNNLayerPtr& layer);
123 void connect(std::vector<Ptr<BackendWrapper> >& inputs,
124 std::vector<Ptr<BackendWrapper> >& outputs);
126 InferenceEngine::CNNLayerPtr layer;
127 // Inference Engine network object that allows to obtain the outputs of this layer.
128 Ptr<InfEngineBackendNet> net;
131 class InfEngineBackendWrapper : public BackendWrapper
134 InfEngineBackendWrapper(int targetId, const Mat& m);
136 InfEngineBackendWrapper(Ptr<BackendWrapper> wrapper);
138 ~InfEngineBackendWrapper();
140 static Ptr<BackendWrapper> create(Ptr<BackendWrapper> wrapper);
142 virtual void copyToHost() CV_OVERRIDE;
144 virtual void setHostDirty() CV_OVERRIDE;
146 InferenceEngine::DataPtr dataPtr;
147 InferenceEngine::Blob::Ptr blob;
150 InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout = InferenceEngine::Layout::ANY);
152 InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape, InferenceEngine::Layout layout);
154 InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr);
156 Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);
158 // Convert Inference Engine blob with FP32 precision to FP16 precision.
159 // Allocates memory for a new blob.
160 InferenceEngine::TBlob<int16_t>::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
162 // This is a fake class to run networks from Model Optimizer. Objects of that
163 // class simulate responses of layers are imported by OpenCV and supported by
164 // Inference Engine. The main difference is that they do not perform forward pass.
165 class InfEngineBackendLayer : public Layer
168 InfEngineBackendLayer(const InferenceEngine::DataPtr& output);
170 virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
171 const int requiredOutputs,
172 std::vector<MatShape> &outputs,
173 std::vector<MatShape> &internals) const CV_OVERRIDE;
175 virtual void forward(std::vector<Mat*> &input, std::vector<Mat> &output,
176 std::vector<Mat> &internals) CV_OVERRIDE;
178 virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
179 OutputArrayOfArrays internals) CV_OVERRIDE;
181 virtual bool supportBackend(int backendId) CV_OVERRIDE;
184 InferenceEngine::DataPtr output;
187 #endif // HAVE_INF_ENGINE
189 bool haveInfEngine();
191 void forwardInfEngine(Ptr<BackendNode>& node);
193 }} // namespace dnn, namespace cv
195 #endif // __OPENCV_DNN_OP_INF_ENGINE_HPP__