4295e10417f75479125d9cd43a5e888feaa934fb
[platform/upstream/opencv.git] / modules / dnn / src / op_inf_engine.hpp
1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
4 //
5 // Copyright (C) 2018, Intel Corporation, all rights reserved.
6 // Third party copyrights are property of their respective owners.
7
8 #ifndef __OPENCV_DNN_OP_INF_ENGINE_HPP__
9 #define __OPENCV_DNN_OP_INF_ENGINE_HPP__
10
11 #ifdef HAVE_INF_ENGINE
12 #if defined(__GNUC__) && __GNUC__ >= 5
13 //#pragma GCC diagnostic push
14 #pragma GCC diagnostic ignored "-Wsuggest-override"
15 #endif
16 #include <inference_engine.hpp>
17 #if defined(__GNUC__) && __GNUC__ >= 5
18 //#pragma GCC diagnostic pop
19 #endif
20 #endif  // HAVE_INF_ENGINE
21
22 namespace cv { namespace dnn {
23
24 #ifdef HAVE_INF_ENGINE
25
26 class InfEngineBackendNet : public InferenceEngine::ICNNNetwork
27 {
28 public:
29     InfEngineBackendNet();
30
31     InfEngineBackendNet(InferenceEngine::CNNNetwork& net);
32
33     virtual void Release() noexcept CV_OVERRIDE;
34
35     void setPrecision(InferenceEngine::Precision p) noexcept;
36
37     virtual InferenceEngine::Precision getPrecision() noexcept CV_OVERRIDE;
38
39     virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) noexcept /*CV_OVERRIDE*/;
40
41     virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) const noexcept /*CV_OVERRIDE*/;
42
43     virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) noexcept /*CV_OVERRIDE*/;
44
45     virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) const noexcept /*CV_OVERRIDE*/;
46
47     virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) noexcept CV_OVERRIDE;
48
49     virtual void getName(char *pName, size_t len) noexcept;
50
51     virtual void getName(char *pName, size_t len) const noexcept;
52
53     virtual size_t layerCount() noexcept CV_OVERRIDE;
54
55     virtual InferenceEngine::DataPtr& getData(const char *dname) noexcept CV_OVERRIDE;
56
57     virtual void addLayer(const InferenceEngine::CNNLayerPtr &layer) noexcept CV_OVERRIDE;
58
59     virtual InferenceEngine::StatusCode addOutput(const std::string &layerName,
60                                                   size_t outputIndex = 0,
61                                                   InferenceEngine::ResponseDesc *resp = nullptr) noexcept CV_OVERRIDE;
62
63     virtual InferenceEngine::StatusCode getLayerByName(const char *layerName,
64                                                        InferenceEngine::CNNLayerPtr &out,
65                                                        InferenceEngine::ResponseDesc *resp) noexcept CV_OVERRIDE;
66
67     virtual void setTargetDevice(InferenceEngine::TargetDevice device) noexcept CV_OVERRIDE;
68
69     virtual InferenceEngine::TargetDevice getTargetDevice() noexcept CV_OVERRIDE;
70
71     virtual InferenceEngine::StatusCode setBatchSize(const size_t size) noexcept CV_OVERRIDE;
72
73     virtual size_t getBatchSize() const noexcept CV_OVERRIDE;
74
75     void init(int targetId);
76
77     void addBlobs(const std::vector<Ptr<BackendWrapper> >& wrappers);
78
79     void forward();
80
81     bool isInitialized();
82
83 private:
84     std::vector<InferenceEngine::CNNLayerPtr> layers;
85     InferenceEngine::InputsDataMap inputs;
86     InferenceEngine::OutputsDataMap outputs;
87     InferenceEngine::BlobMap inpBlobs;
88     InferenceEngine::BlobMap outBlobs;
89     InferenceEngine::BlobMap allBlobs;
90     InferenceEngine::TargetDevice targetDevice;
91     InferenceEngine::Precision precision;
92     InferenceEngine::InferenceEnginePluginPtr enginePtr;
93     InferenceEngine::InferencePlugin plugin;
94     InferenceEngine::ExecutableNetwork netExec;
95     InferenceEngine::InferRequest infRequest;
96
97     void initPlugin(InferenceEngine::ICNNNetwork& net);
98 };
99
100 class InfEngineBackendNode : public BackendNode
101 {
102 public:
103     InfEngineBackendNode(const InferenceEngine::CNNLayerPtr& layer);
104
105     void connect(std::vector<Ptr<BackendWrapper> >& inputs,
106                  std::vector<Ptr<BackendWrapper> >& outputs);
107
108     InferenceEngine::CNNLayerPtr layer;
109     // Inference Engine network object that allows to obtain the outputs of this layer.
110     Ptr<InfEngineBackendNet> net;
111 };
112
113 class InfEngineBackendWrapper : public BackendWrapper
114 {
115 public:
116     InfEngineBackendWrapper(int targetId, const Mat& m);
117
118     InfEngineBackendWrapper(Ptr<BackendWrapper> wrapper);
119
120     ~InfEngineBackendWrapper();
121
122     static Ptr<BackendWrapper> create(Ptr<BackendWrapper> wrapper);
123
124     virtual void copyToHost() CV_OVERRIDE;
125
126     virtual void setHostDirty() CV_OVERRIDE;
127
128     InferenceEngine::DataPtr dataPtr;
129     InferenceEngine::Blob::Ptr blob;
130 };
131
132 InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout = InferenceEngine::Layout::ANY);
133
134 InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape, InferenceEngine::Layout layout);
135
136 InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr);
137
138 Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);
139
140 // Convert Inference Engine blob with FP32 precision to FP16 precision.
141 // Allocates memory for a new blob.
142 InferenceEngine::TBlob<int16_t>::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
143
144 // This is a fake class to run networks from Model Optimizer. Objects of that
145 // class simulate responses of layers are imported by OpenCV and supported by
146 // Inference Engine. The main difference is that they do not perform forward pass.
147 class InfEngineBackendLayer : public Layer
148 {
149 public:
150     InfEngineBackendLayer(const InferenceEngine::DataPtr& output);
151
152     virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
153                                  const int requiredOutputs,
154                                  std::vector<MatShape> &outputs,
155                                  std::vector<MatShape> &internals) const CV_OVERRIDE;
156
157     virtual void forward(std::vector<Mat*> &input, std::vector<Mat> &output,
158                          std::vector<Mat> &internals) CV_OVERRIDE;
159
160     virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
161                          OutputArrayOfArrays internals) CV_OVERRIDE;
162
163     virtual bool supportBackend(int backendId) CV_OVERRIDE;
164
165 private:
166     InferenceEngine::DataPtr output;
167 };
168
169 #endif  // HAVE_INF_ENGINE
170
171 bool haveInfEngine();
172
173 void forwardInfEngine(Ptr<BackendNode>& node);
174
175 }}  // namespace dnn, namespace cv
176
177 #endif  // __OPENCV_DNN_OP_INF_ENGINE_HPP__