1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
5 // Copyright (C) 2018, Intel Corporation, all rights reserved.
6 // Third party copyrights are property of their respective owners.
8 #ifndef __OPENCV_DNN_OP_INF_ENGINE_HPP__
9 #define __OPENCV_DNN_OP_INF_ENGINE_HPP__
11 #ifdef HAVE_INF_ENGINE
12 #if defined(__GNUC__) && __GNUC__ >= 5
13 //#pragma GCC diagnostic push
14 #pragma GCC diagnostic ignored "-Wsuggest-override"
16 #include <inference_engine.hpp>
17 #if defined(__GNUC__) && __GNUC__ >= 5
18 //#pragma GCC diagnostic pop
20 #endif // HAVE_INF_ENGINE
22 namespace cv { namespace dnn {
24 #ifdef HAVE_INF_ENGINE
26 class InfEngineBackendNet : public InferenceEngine::ICNNNetwork
29 InfEngineBackendNet();
31 InfEngineBackendNet(InferenceEngine::CNNNetwork& net);
33 virtual void Release() noexcept CV_OVERRIDE;
35 void setPrecision(InferenceEngine::Precision p) noexcept;
37 virtual InferenceEngine::Precision getPrecision() noexcept CV_OVERRIDE;
39 virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) noexcept /*CV_OVERRIDE*/;
41 virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) const noexcept /*CV_OVERRIDE*/;
43 virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) noexcept /*CV_OVERRIDE*/;
45 virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) const noexcept /*CV_OVERRIDE*/;
47 virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) noexcept CV_OVERRIDE;
49 virtual void getName(char *pName, size_t len) noexcept;
51 virtual void getName(char *pName, size_t len) const noexcept;
53 virtual size_t layerCount() noexcept CV_OVERRIDE;
55 virtual InferenceEngine::DataPtr& getData(const char *dname) noexcept CV_OVERRIDE;
57 virtual void addLayer(const InferenceEngine::CNNLayerPtr &layer) noexcept CV_OVERRIDE;
59 virtual InferenceEngine::StatusCode addOutput(const std::string &layerName,
60 size_t outputIndex = 0,
61 InferenceEngine::ResponseDesc *resp = nullptr) noexcept CV_OVERRIDE;
63 virtual InferenceEngine::StatusCode getLayerByName(const char *layerName,
64 InferenceEngine::CNNLayerPtr &out,
65 InferenceEngine::ResponseDesc *resp) noexcept CV_OVERRIDE;
67 virtual void setTargetDevice(InferenceEngine::TargetDevice device) noexcept CV_OVERRIDE;
69 virtual InferenceEngine::TargetDevice getTargetDevice() noexcept CV_OVERRIDE;
71 virtual InferenceEngine::StatusCode setBatchSize(const size_t size) noexcept CV_OVERRIDE;
73 virtual size_t getBatchSize() const noexcept CV_OVERRIDE;
75 void init(int targetId);
77 void addBlobs(const std::vector<Ptr<BackendWrapper> >& wrappers);
84 std::vector<InferenceEngine::CNNLayerPtr> layers;
85 InferenceEngine::InputsDataMap inputs;
86 InferenceEngine::OutputsDataMap outputs;
87 InferenceEngine::BlobMap inpBlobs;
88 InferenceEngine::BlobMap outBlobs;
89 InferenceEngine::BlobMap allBlobs;
90 InferenceEngine::TargetDevice targetDevice;
91 InferenceEngine::Precision precision;
92 InferenceEngine::InferenceEnginePluginPtr enginePtr;
93 InferenceEngine::InferencePlugin plugin;
94 InferenceEngine::ExecutableNetwork netExec;
95 InferenceEngine::InferRequest infRequest;
97 void initPlugin(InferenceEngine::ICNNNetwork& net);
100 class InfEngineBackendNode : public BackendNode
103 InfEngineBackendNode(const InferenceEngine::CNNLayerPtr& layer);
105 void connect(std::vector<Ptr<BackendWrapper> >& inputs,
106 std::vector<Ptr<BackendWrapper> >& outputs);
108 InferenceEngine::CNNLayerPtr layer;
109 // Inference Engine network object that allows to obtain the outputs of this layer.
110 Ptr<InfEngineBackendNet> net;
113 class InfEngineBackendWrapper : public BackendWrapper
116 InfEngineBackendWrapper(int targetId, const Mat& m);
118 InfEngineBackendWrapper(Ptr<BackendWrapper> wrapper);
120 ~InfEngineBackendWrapper();
122 static Ptr<BackendWrapper> create(Ptr<BackendWrapper> wrapper);
124 virtual void copyToHost() CV_OVERRIDE;
126 virtual void setHostDirty() CV_OVERRIDE;
128 InferenceEngine::DataPtr dataPtr;
129 InferenceEngine::Blob::Ptr blob;
132 InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout = InferenceEngine::Layout::ANY);
134 InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape, InferenceEngine::Layout layout);
136 InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr);
138 Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);
140 // Convert Inference Engine blob with FP32 precision to FP16 precision.
141 // Allocates memory for a new blob.
142 InferenceEngine::TBlob<int16_t>::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
144 // This is a fake class to run networks from Model Optimizer. Objects of that
145 // class simulate responses of layers are imported by OpenCV and supported by
146 // Inference Engine. The main difference is that they do not perform forward pass.
147 class InfEngineBackendLayer : public Layer
150 InfEngineBackendLayer(const InferenceEngine::DataPtr& output);
152 virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
153 const int requiredOutputs,
154 std::vector<MatShape> &outputs,
155 std::vector<MatShape> &internals) const CV_OVERRIDE;
157 virtual void forward(std::vector<Mat*> &input, std::vector<Mat> &output,
158 std::vector<Mat> &internals) CV_OVERRIDE;
160 virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
161 OutputArrayOfArrays internals) CV_OVERRIDE;
163 virtual bool supportBackend(int backendId) CV_OVERRIDE;
166 InferenceEngine::DataPtr output;
169 #endif // HAVE_INF_ENGINE
171 bool haveInfEngine();
173 void forwardInfEngine(Ptr<BackendNode>& node);
175 }} // namespace dnn, namespace cv
177 #endif // __OPENCV_DNN_OP_INF_ENGINE_HPP__