1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
5 // Copyright (C) 2018, Intel Corporation, all rights reserved.
6 // Third party copyrights are property of their respective owners.
8 #ifndef __OPENCV_DNN_OP_INF_ENGINE_HPP__
9 #define __OPENCV_DNN_OP_INF_ENGINE_HPP__
11 #include "precomp.hpp"
13 #ifdef HAVE_INF_ENGINE
14 #include <inference_engine.hpp>
15 #endif // HAVE_INF_ENGINE
17 namespace cv { namespace dnn {
19 #ifdef HAVE_INF_ENGINE
21 class InfEngineBackendNet : public InferenceEngine::ICNNNetwork
24 virtual void Release() noexcept;
26 virtual InferenceEngine::Precision getPrecision() noexcept;
28 virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) noexcept;
30 virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) noexcept;
32 virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) const noexcept;
34 virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) noexcept;
36 virtual void getName(char *pName, size_t len) noexcept;
38 virtual size_t layerCount() noexcept;
40 virtual InferenceEngine::DataPtr& getData(const char *dname) noexcept;
42 virtual void addLayer(const InferenceEngine::CNNLayerPtr &layer) noexcept;
44 virtual InferenceEngine::StatusCode addOutput(const std::string &layerName,
45 size_t outputIndex = 0,
46 InferenceEngine::ResponseDesc *resp = nullptr) noexcept;
48 virtual InferenceEngine::StatusCode getLayerByName(const char *layerName,
49 InferenceEngine::CNNLayerPtr &out,
50 InferenceEngine::ResponseDesc *resp) noexcept;
52 virtual void setTargetDevice(InferenceEngine::TargetDevice device) noexcept;
54 virtual InferenceEngine::TargetDevice getTargetDevice() noexcept;
56 virtual InferenceEngine::StatusCode setBatchSize(const size_t size) noexcept;
58 virtual size_t getBatchSize() const noexcept;
62 void addBlobs(const std::vector<Ptr<BackendWrapper> >& wrappers);
69 std::vector<InferenceEngine::CNNLayerPtr> layers;
70 InferenceEngine::InputsDataMap inputs;
71 InferenceEngine::OutputsDataMap outputs;
72 InferenceEngine::BlobMap inpBlobs;
73 InferenceEngine::BlobMap outBlobs;
74 InferenceEngine::BlobMap allBlobs;
75 InferenceEngine::InferenceEnginePluginPtr engine;
78 class InfEngineBackendNode : public BackendNode
81 InfEngineBackendNode(const InferenceEngine::CNNLayerPtr& layer);
83 void connect(std::vector<Ptr<BackendWrapper> >& inputs,
84 std::vector<Ptr<BackendWrapper> >& outputs);
86 InferenceEngine::CNNLayerPtr layer;
87 // Inference Engine network object that allows to obtain the outputs of this layer.
88 Ptr<InfEngineBackendNet> net;
91 class InfEngineBackendWrapper : public BackendWrapper
94 InfEngineBackendWrapper(int targetId, const Mat& m);
96 ~InfEngineBackendWrapper();
98 virtual void copyToHost();
100 virtual void setHostDirty();
102 InferenceEngine::DataPtr dataPtr;
103 InferenceEngine::TBlob<float>::Ptr blob;
106 InferenceEngine::TBlob<float>::Ptr wrapToInfEngineBlob(const Mat& m);
108 InferenceEngine::TBlob<float>::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape);
110 InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr);
112 // Fuses convolution weights and biases with channel-wise scales and shifts.
113 void fuseConvWeights(const std::shared_ptr<InferenceEngine::ConvolutionLayer>& conv,
114 const Mat& w, const Mat& b = Mat());
116 #endif // HAVE_INF_ENGINE
118 bool haveInfEngine();
120 void forwardInfEngine(Ptr<BackendNode>& node);
122 }} // namespace dnn, namespace cv
124 #endif // __OPENCV_DNN_OP_INF_ENGINE_HPP__