dnn, IE backend: updated to match new interface
[platform/upstream/opencv.git] / modules / dnn / src / op_inf_engine.hpp
1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
4 //
5 // Copyright (C) 2018, Intel Corporation, all rights reserved.
6 // Third party copyrights are property of their respective owners.
7
8 #ifndef __OPENCV_DNN_OP_INF_ENGINE_HPP__
9 #define __OPENCV_DNN_OP_INF_ENGINE_HPP__
10
11 #include "opencv2/core/cvdef.h"
12
13 #ifdef HAVE_INF_ENGINE
14 #if defined(__GNUC__) && __GNUC__ >= 5
15 //#pragma GCC diagnostic push
16 #pragma GCC diagnostic ignored "-Wsuggest-override"
17 #endif
18 #include <inference_engine.hpp>
19 #if defined(__GNUC__) && __GNUC__ >= 5
20 //#pragma GCC diagnostic pop
21 #endif
22 #endif  // HAVE_INF_ENGINE
23
24 namespace cv { namespace dnn {
25
26 #ifdef HAVE_INF_ENGINE
27
28 class InfEngineBackendNet : public InferenceEngine::ICNNNetwork
29 {
30 public:
31     InfEngineBackendNet();
32
33     InfEngineBackendNet(InferenceEngine::CNNNetwork& net);
34
35     virtual void Release() noexcept CV_OVERRIDE;
36
37     void setPrecision(InferenceEngine::Precision p) noexcept;
38
39     virtual InferenceEngine::Precision getPrecision() noexcept;
40
41     virtual InferenceEngine::Precision getPrecision() const noexcept;
42
43     virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) noexcept /*CV_OVERRIDE*/;
44
45     virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) const noexcept /*CV_OVERRIDE*/;
46
47     virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) noexcept /*CV_OVERRIDE*/;
48
49     virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) const noexcept /*CV_OVERRIDE*/;
50
51     virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) noexcept;
52
53     virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) const noexcept;
54
55     virtual void getName(char *pName, size_t len) noexcept;
56
57     virtual void getName(char *pName, size_t len) const noexcept;
58
59     virtual const std::string& getName() const noexcept;
60
61     virtual size_t layerCount() noexcept;
62
63     virtual size_t layerCount() const noexcept;
64
65     virtual InferenceEngine::DataPtr& getData(const char *dname) noexcept CV_OVERRIDE;
66
67     virtual void addLayer(const InferenceEngine::CNNLayerPtr &layer) noexcept CV_OVERRIDE;
68
69     virtual InferenceEngine::StatusCode addOutput(const std::string &layerName,
70                                                   size_t outputIndex = 0,
71                                                   InferenceEngine::ResponseDesc *resp = nullptr) noexcept;
72
73     virtual InferenceEngine::StatusCode getLayerByName(const char *layerName,
74                                                        InferenceEngine::CNNLayerPtr &out,
75                                                        InferenceEngine::ResponseDesc *resp) noexcept;
76
77     virtual InferenceEngine::StatusCode getLayerByName(const char *layerName,
78                                                        InferenceEngine::CNNLayerPtr &out,
79                                                        InferenceEngine::ResponseDesc *resp) const noexcept;
80
81     virtual void setTargetDevice(InferenceEngine::TargetDevice device) noexcept CV_OVERRIDE;
82
83     virtual InferenceEngine::TargetDevice getTargetDevice() noexcept;
84
85     virtual InferenceEngine::TargetDevice getTargetDevice() const noexcept;
86
87     virtual InferenceEngine::StatusCode setBatchSize(const size_t size) noexcept CV_OVERRIDE;
88
89     virtual size_t getBatchSize() const noexcept CV_OVERRIDE;
90
91     void init(int targetId);
92
93     void addBlobs(const std::vector<Ptr<BackendWrapper> >& wrappers);
94
95     void forward();
96
97     bool isInitialized();
98
99 private:
100     std::vector<InferenceEngine::CNNLayerPtr> layers;
101     InferenceEngine::InputsDataMap inputs;
102     InferenceEngine::OutputsDataMap outputs;
103     InferenceEngine::BlobMap inpBlobs;
104     InferenceEngine::BlobMap outBlobs;
105     InferenceEngine::BlobMap allBlobs;
106     InferenceEngine::TargetDevice targetDevice;
107     InferenceEngine::Precision precision;
108     InferenceEngine::InferenceEnginePluginPtr enginePtr;
109     InferenceEngine::InferencePlugin plugin;
110     InferenceEngine::ExecutableNetwork netExec;
111     InferenceEngine::InferRequest infRequest;
112
113     std::string name;
114
115     void initPlugin(InferenceEngine::ICNNNetwork& net);
116 };
117
118 class InfEngineBackendNode : public BackendNode
119 {
120 public:
121     InfEngineBackendNode(const InferenceEngine::CNNLayerPtr& layer);
122
123     void connect(std::vector<Ptr<BackendWrapper> >& inputs,
124                  std::vector<Ptr<BackendWrapper> >& outputs);
125
126     InferenceEngine::CNNLayerPtr layer;
127     // Inference Engine network object that allows to obtain the outputs of this layer.
128     Ptr<InfEngineBackendNet> net;
129 };
130
131 class InfEngineBackendWrapper : public BackendWrapper
132 {
133 public:
134     InfEngineBackendWrapper(int targetId, const Mat& m);
135
136     InfEngineBackendWrapper(Ptr<BackendWrapper> wrapper);
137
138     ~InfEngineBackendWrapper();
139
140     static Ptr<BackendWrapper> create(Ptr<BackendWrapper> wrapper);
141
142     virtual void copyToHost() CV_OVERRIDE;
143
144     virtual void setHostDirty() CV_OVERRIDE;
145
146     InferenceEngine::DataPtr dataPtr;
147     InferenceEngine::Blob::Ptr blob;
148 };
149
150 InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout = InferenceEngine::Layout::ANY);
151
152 InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape, InferenceEngine::Layout layout);
153
154 InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr);
155
156 Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);
157
158 // Convert Inference Engine blob with FP32 precision to FP16 precision.
159 // Allocates memory for a new blob.
160 InferenceEngine::TBlob<int16_t>::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
161
162 // This is a fake class to run networks from Model Optimizer. Objects of that
163 // class simulate responses of layers are imported by OpenCV and supported by
164 // Inference Engine. The main difference is that they do not perform forward pass.
165 class InfEngineBackendLayer : public Layer
166 {
167 public:
168     InfEngineBackendLayer(const InferenceEngine::DataPtr& output);
169
170     virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
171                                  const int requiredOutputs,
172                                  std::vector<MatShape> &outputs,
173                                  std::vector<MatShape> &internals) const CV_OVERRIDE;
174
175     virtual void forward(std::vector<Mat*> &input, std::vector<Mat> &output,
176                          std::vector<Mat> &internals) CV_OVERRIDE;
177
178     virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
179                          OutputArrayOfArrays internals) CV_OVERRIDE;
180
181     virtual bool supportBackend(int backendId) CV_OVERRIDE;
182
183 private:
184     InferenceEngine::DataPtr output;
185 };
186
187 #endif  // HAVE_INF_ENGINE
188
189 bool haveInfEngine();
190
191 void forwardInfEngine(Ptr<BackendNode>& node);
192
193 }}  // namespace dnn, namespace cv
194
195 #endif  // __OPENCV_DNN_OP_INF_ENGINE_HPP__